date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | rgalindo-wl/langchain_capstone_project | src~components.py | import openai
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from src.settings import OPENAI_API_KEY, configure_logger, template
from src.utils import CommaSeparatedListOutputParser
logger = configure_logger("Main components")
openai.api_key = OPENAI_API_KEY
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_query = ""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_query)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(
llm=ChatOpenAI(), prompt=chat_prompt, output_parser=CommaSeparatedListOutputParser()
)
def run_all(human_query: str, path: str):
human_message_prompt = HumanMessagePromptTemplate.from_template(human_query)
logger.info("Created human prompt")
loader = PyPDFDirectoryLoader(path)
logger.info("Created loader")
index = VectorstoreIndexCreator().from_loaders([loader])
logger.info("Created index")
logger.info(f"{human_message_prompt}")
response = index.query(human_query)
# qa_chain = RetrievalQA.from_chain_type(
# chain, retriever=index.vectorstore.as_retriever(), return_source_documents=True
# )
logger.info(response)
return response
| [
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | kyouyap/streamlit_sample | 03_summary.py | import streamlit as st
from streamlit_chat import message
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
def init_page():
st.set_page_config(
page_title="Website Summarizer",
page_icon="🤗"
)
st.header("Website Summarizer 🤗")
st.sidebar.title("Options")
def init_messages():
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a helpful assistant.")
]
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
return ChatOpenAI(temperature=0, model_name=model_name)
def get_url_input():
url = st.text_input("URL: ", key="input")
return url
def validate_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def get_content(url):
try:
with st.spinner("Fetching Content ..."):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# fetch text from main (change the below code to filter page)
if soup.main:
return soup.main.get_text()
elif soup.article:
return soup.article.get_text()
else:
return soup.body.get_text()
except:
st.write('something wrong')
return None
def build_prompt(content, n_chars=300):
return f"""以下はとある。Webページのコンテンツである。内容を{n_chars}程度でわかりやすく要約してください。
========
{content[:1000]}
========
日本語で書いてね!
"""
def get_answer(llm, messages):
with get_openai_callback() as cb:
answer = llm(messages)
return answer.content, cb.total_cost
def main():
init_page()
llm = select_model()
init_messages()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
is_valid_url = validate_url(url)
if not is_valid_url:
st.write('Please input valid url')
answer = None
else:
content = get_content(url)
if content:
prompt = build_prompt(content)
st.session_state.messages.append(HumanMessage(content=prompt))
with st.spinner("ChatGPT is typing ..."):
answer, cost = get_answer(llm, st.session_state.messages)
st.session_state.costs.append(cost)
else:
answer = None
if answer:
with response_container:
st.markdown("## Summary")
st.write(answer)
st.markdown("---")
st.markdown("## Original Text")
st.write(content)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"You are a helpful assistant."
] |
2024-01-10 | kyouyap/streamlit_sample | 02_ai_chat.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import StreamlitCallbackHandler
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
from langchain.callbacks import get_openai_callback
from forex_python.converter import CurrencyRates
import datetime
from token_cost_process import TokenCostProcess,CostCalcAsyncHandler
def init_page():
st.set_page_config(
page_title="My Great ChatGPT",
page_icon="🤗"
)
st.header("My Great ChatGPT 🤗")
st.sidebar.title("Options")
def init_messages():
init_content=f"""
You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture. Knowledge cutoff: 2021-09. Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}.
"""
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content=init_content)
]
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
# サイドバーにスライダーを追加し、temperatureを0から2までの範囲で選択可能にする
# 初期値は0.0、刻み幅は0.1とする
temperature = st.sidebar.slider("Temperature:", min_value=0.0, max_value=2.0, value=0.0, step=0.01)
st.session_state.model_name = model_name
return ChatOpenAI(temperature=temperature, model_name=model_name, streaming=True)
def show_massages(messages_container):
messages = st.session_state.get('messages', [])
with messages_container:
for message in messages:
if isinstance(message, AIMessage):
with st.chat_message('assistant'):
st.markdown(message.content)
elif isinstance(message, HumanMessage):
with st.chat_message('user'):
st.markdown(message.content)
else: # isinstance(message, SystemMessage):
st.write(f"System message: {message.content}")
def place_input_form(input_container, messages_container, llm):
messages = st.session_state.get('messages', [])
with input_container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_area(label='Message: ', key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
# 何か入力されて Submit ボタンが押されたら実行される
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("ChatGPT is typing ..."):
response = llm(messages)
st.session_state.messages.append(AIMessage(content=response.content))
def main():
init_page()
llm = select_model()
init_messages()
messages_container = st.container() # メッセージ用のコンテナ
input_container = st.container() # 入力フォーム用のコンテナ
place_input_form(input_container, messages_container, llm)
show_massages(messages_container)
# ユーザーの入力を監視
if __name__ == '__main__':
main() | [] |
2024-01-10 | kyouyap/streamlit_sample | 00_test.py | import streamlit as st
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
def init_page():
st.set_page_config(
page_title="My Great ChatGPT",
page_icon="🤗"
)
st.header("My Great ChatGPT 🤗")
st.sidebar.title("Options")
def init_messages():
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a helpful assistant.")
]
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
# サイドバーにスライダーを追加し、temperatureを0から2までの範囲で選択可能にする
# 初期値は0.0、刻み幅は0.1とする
temperature = st.sidebar.slider("Temperature:", min_value=0.0, max_value=2.0, value=0.0, step=0.01)
return ChatOpenAI(temperature=temperature, model_name=model_name, streaming=True)
def main():
init_page()
llm = select_model()
init_messages()
# ユーザーの入力を監視
user_input = st.text_input("聞きたいことを入力してね!")
messages = st.session_state.get('messages', [])
for message in messages:
if isinstance(message, AIMessage):
with st.chat_message('assistant'):
st.markdown(message.content)
elif isinstance(message, HumanMessage):
with st.chat_message('user'):
st.markdown(message.content)
else: # isinstance(message, SystemMessage):
st.write(f"System message: {message.content}")
if user_input:
st.session_state.messages.append(HumanMessage(content=user_input))
st.chat_message("user").markdown(user_input)
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
response = llm(messages, callbacks=[st_callback])
st.session_state.messages.append(AIMessage(content=response.content))
if __name__ == '__main__':
main()
| [
"You are a helpful assistant."
] |
2024-01-10 | kyouyap/streamlit_sample | 08_docment_chat.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import datetime
import dotenv
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain.vectorstores import Qdrant
from langchain.embeddings import OpenAIEmbeddings
import os
import openai
import glob
from typing import Any
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
CharacterTextSplitter,
)
class JapaneseCharacterTextSplitter(RecursiveCharacterTextSplitter):
def __init__(self, **kwargs: Any):
separators = ["\n\n", "\n", "。", "、", " ", ""]
super().__init__(separators=separators, **kwargs)
def init():
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# TODO: 必要に応じて追記する
def load_qdrant():
"""
Qdrantをロードする関数。
"""
QDRANT_PATH, COLLECTION_NAME = os.getenv("QDRANT_PATH"), os.getenv(
"COLLECTION_NAME"
)
client = QdrantClient(path=QDRANT_PATH)
# すべてのコレクション名を取得
collections = client.get_collections().collections
collection_names = [collection.name for collection in collections]
# コレクションが存在しなければ作成
if COLLECTION_NAME not in collection_names:
# コレクションが存在しない場合、新しく作成します
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
print("collection created")
return Qdrant(
client=client, collection_name=COLLECTION_NAME, embeddings=OpenAIEmbeddings()
)
def init_page():
st.set_page_config(page_title="My Great ChatGPT", page_icon="🤗")
st.header("My Great ChatGPT 🤗")
st.sidebar.title("Options")
def init_messages():
init_content = f"""
You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture. Knowledge cutoff: 2021-09. Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}.
"""
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [SystemMessage(content=init_content)]
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
# サイドバーにスライダーを追加し、temperatureを0から2までの範囲で選択可能にする
# 初期値は0.0、刻み幅は0.1とする
temperature = st.sidebar.slider(
"Temperature:", min_value=0.0, max_value=2.0, value=0.0, step=0.01
)
st.session_state.model_name = model_name
return ChatOpenAI(temperature=temperature, model_name=model_name, streaming=True)
def show_massages(messages_container):
messages = st.session_state.get("messages", [])
with messages_container:
for message in messages:
if isinstance(message, AIMessage):
with st.chat_message("assistant"):
st.markdown(message.content)
elif isinstance(message, HumanMessage):
with st.chat_message("user"):
st.markdown(message.content)
else: # isinstance(message, SystemMessage):
st.write(f"System message: {message.content}")
def build_qa_model(llm): # noqa
"""
質問応答モデルを構築する関数。
"""
qdrant = load_qdrant()
retriever = qdrant.as_retriever()
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Answer in Japanese:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs=chain_type_kwargs,
)
return qa
def query_search(query): # noqa
"""
質問応答モデルを構築する関数。
"""
qdrant = load_qdrant()
print("query_search")
# qdrant.similarity_search_with_score("「フリーランスのリモートワークの実態」について教えて。", k=2)
docs = qdrant.similarity_search_with_score(query, k=2)
return docs
def place_input_form(input_container, messages_container, llm):
messages = st.session_state.get("messages", [])
with input_container:
with st.form(key="my_form", clear_on_submit=True):
user_input = st.text_area(label="Message: ", key="input")
submit_button = st.form_submit_button(label="Send")
if submit_button and user_input:
# 何か入力されて Submit ボタンが押されたら実行される
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("ChatGPT is typing ..."):
response = build_qa_model(llm).run(user_input)
st.session_state.messages.append(AIMessage(content=response))
st.session_state.messages = st.session_state.messages[-3:]
def build_vector_store():
qdrant = load_qdrant()
text_files = glob.glob("documents/*.txt", recursive=True)
print(text_files)
docs = []
for text_file in text_files:
with open(text_file) as f:
text = f.read()
text_splitter = CharacterTextSplitter(
separator="\n\n", # 文章を分割する文字列
chunk_size=1800, # チャンクの文字数
chunk_overlap=0, # チャンク間で重複させる文字数
)
split_texts = text_splitter.split_text(text)
docs.extend([Document(page_content=split_text) for split_text in split_texts])
qdrant.add_documents(docs)
print(docs)
def document_to_vector():
st.write("docment配下のファイルをベクトル化します")
submit_button = st.button(label="To vector")
if submit_button:
load_qdrant()
build_vector_store()
def chat_with_gpt():
llm = select_model()
messages_container = st.container()
input_container = st.container()
place_input_form(input_container, messages_container, llm)
show_massages(messages_container)
def main():
init()
init_page()
init_messages()
selection = st.sidebar.radio("Go to", ["Document to vector", "Chat"])
if selection == "Document to vector":
document_to_vector()
else:
chat_with_gpt()
if __name__ == "__main__":
main()
| [
"context",
"question",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n {context}\n\n Question: {question}\n Answer in Japanese:"
] |
2024-01-10 | kyouyap/streamlit_sample | 06_PDF_chat.py | from glob import glob
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
from PyPDF2 import PdfReader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Qdrant
from langchain.chains import RetrievalQA
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
QDRANT_PATH = "./local_qdrant"
COLLECTION_NAME = "my_collection_2"
def init_page():
"""
Streamlitのページを初期化する関数。
"""
st.set_page_config(page_title="Ask My PDF(s)", page_icon="🤗")
st.sidebar.title("Nav")
st.session_state.costs = []
def select_model():
"""
モデルを選択する関数。
"""
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-3.5-16k", "GPT-4"))
if model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo"
elif model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo-16k"
else:
st.session_state.model_name = "gpt-4"
# 300: 本文以外の指示のトークン数 (以下同じ)
st.session_state.max_token = (
OpenAI.modelname_to_contextsize(st.session_state.model_name) - 300
)
return ChatOpenAI(temperature=0, model_name=st.session_state.model_name)
def get_pdf_text():
"""
PDFファイルからテキストを抽出する関数。
"""
uploaded_file = st.file_uploader(label="Upload your PDF here😇", type="pdf")
if uploaded_file:
pdf_reader = PdfReader(uploaded_file)
text = "\n\n".join([page.extract_text() for page in pdf_reader.pages])
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
model_name="text-embedding-ada-002",
# 適切な chunk size は質問対象のPDFによって変わるため調整が必要
# 大きくしすぎると質問回答時に色々な箇所の情報を参照することができない
# 逆に小さすぎると一つのchunkに十分なサイズの文脈が入らない
chunk_size=500,
chunk_overlap=0,
)
return text_splitter.split_text(text)
else:
return None
def load_qdrant():
"""
Qdrantをロードする関数。
"""
client = QdrantClient(path=QDRANT_PATH)
# すべてのコレクション名を取得
collections = client.get_collections().collections
collection_names = [collection.name for collection in collections]
# コレクションが存在しなければ作成
if COLLECTION_NAME not in collection_names:
# コレクションが存在しない場合、新しく作成します
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
print("collection created")
return Qdrant(
client=client, collection_name=COLLECTION_NAME, embeddings=OpenAIEmbeddings()
)
def build_vector_store(pdf_text):
"""
PDFファイルからベクトルストアを構築する関数。
"""
qdrant = load_qdrant()
qdrant.add_texts(pdf_text)
def build_qa_model(llm): # noqa
"""
質問応答モデルを構築する関数。
"""
qdrant = load_qdrant()
retriever = qdrant.as_retriever(
# "mmr", "similarity_score_threshold" などもある
search_type="similarity",
# 文書を何個取得するか (default: 4)
search_kwargs={"k": 10},
)
return RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
def page_pdf_upload_and_build_vector_db():
"""
PDFファイルをアップロードしてベクトルストアを構築するページを表示する関数。
"""
st.title("PDF Upload")
container = st.container()
with container:
pdf_text = get_pdf_text()
if pdf_text:
with st.spinner("Loading PDF ..."):
build_vector_store(pdf_text)
def ask(qa, query):
"""
質問に回答する関数。
"""
with get_openai_callback() as cb:
# query / result / source_documents
answer = qa(query)
return answer, cb.total_cost
def page_ask_my_pdf():
"""
PDFファイルに対して質問をするページを表示する関数。
"""
st.title("Ask My PDF(s)")
llm = select_model()
container = st.container()
response_container = st.container()
with container:
query = st.text_input("Query: ", key="input")
if not query:
answer = None
else:
qa = build_qa_model(llm)
if qa:
with st.spinner("ChatGPT is typing ..."):
answer, cost = ask(qa, query)
st.session_state.costs.append(cost)
else:
answer = None
if answer:
with response_container:
st.markdown("## Answer")
st.write(answer)
def main():
"""
メイン関数。
"""
init_page()
selection = st.sidebar.radio("Go to", ["PDF Upload", "Ask My PDF(s)"])
if selection == "PDF Upload":
page_pdf_upload_and_build_vector_db()
elif selection == "Ask My PDF(s)":
page_ask_my_pdf()
costs = st.session_state.get("costs", [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | kyouyap/streamlit_sample | 04_youtube_summary.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
def init_page():
st.set_page_config(
page_title="Youtube Summarizer",
page_icon="🤗"
)
st.header("Youtube Summarizer 🤗")
st.sidebar.title("Options")
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4","GPT-3.5-16k"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
elif model == "GPT-3.5-16k":
model_name = "gpt-3.5-turbo-16k"
else:
model_name = "gpt-4"
return ChatOpenAI(temperature=0, model_name=model_name)
def get_url_input():
url = st.text_input("Youtube URL: ", key="input")
return url
def get_document(url):
with st.spinner("Fetching Content ..."):
loader = YoutubeLoader.from_youtube_url(
url,
add_video_info=True, # タイトルや再生数も取得できる
language=['en', 'ja'] # 英語→日本語の優先順位で字幕を取得
)
return loader.load()
def summarize(llm, docs):
prompt_template = """Write a concise Japanese summary of the following transcript of Youtube Video.
============
{text}
============
ここから日本語で書いてね
必ず3段落以内の200文字以内で簡潔にまとめること:
"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
with get_openai_callback() as cb:
chain = load_summarize_chain(
llm,
chain_type="stuff",
verbose=True,
prompt=PROMPT
)
response = chain({"input_documents": docs}, return_only_outputs=True)
return response['output_text'], cb.total_cost
def main():
init_page()
llm = select_model()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
if url:
document = get_document(url)
with st.spinner("ChatGPT is typing ..."):
output_text, cost = summarize(llm, document)
st.session_state.costs.append(cost)
else:
output_text = None
if output_text:
with response_container:
st.markdown("## Summary")
st.write(output_text)
st.markdown("---")
st.markdown("## Original Text")
st.write(document)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"Write a concise Japanese summary of the following transcript of Youtube Video.\n\n============\n \n{text}\n\n============\n\nここから日本語で書いてね\n必ず3段落以内の200文字以内で簡潔にまとめること:\n"
] |
2024-01-10 | kyouyap/streamlit_sample | 01_sample.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import (SystemMessage, HumanMessage, AIMessage)
def main():
llm = ChatOpenAI(temperature=0)
st.set_page_config(
page_title="My Great ChatGPT",
page_icon="🤗"
)
st.header("My Great ChatGPT 🤗")
# チャット履歴の初期化
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a helpful assistant.")
]
# ユーザーの入力を監視
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_area(label='Message: ', key='input', height=100)
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
# 何か入力されて Submit ボタンが押されたら実行される
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("ChatGPT is typing ..."):
response = llm(st.session_state.messages)
st.session_state.messages.append(AIMessage(content=response.content))
# チャット履歴の表示
messages = st.session_state.get('messages', [])
for message in messages:
if isinstance(message, AIMessage):
with st.chat_message('assistant'):
st.markdown(message.content)
elif isinstance(message, HumanMessage):
with st.chat_message('user'):
st.markdown(message.content)
else: # isinstance(message, SystemMessage):
st.write(f"System message: {message.content}")
if __name__ == '__main__':
main() | [
"You are a helpful assistant."
] |
2024-01-10 | kyouyap/streamlit_sample | 05_youtube_summary_added.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
def init_page():
st.set_page_config(
page_title="Youtube Summarizer",
page_icon="🤗"
)
st.header("Youtube Summarizer 🤗")
st.sidebar.title("Options")
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-3.5-16k", "GPT-4"))
if model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo-0613"
elif model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo-16k-0613"
else:
st.session_state.model_name = "gpt-4"
# 300: 本文以外の指示のtoken数 (以下同じ)
st.session_state.max_token = OpenAI.modelname_to_contextsize(st.session_state.model_name) - 300
return ChatOpenAI(temperature=0, model_name=st.session_state.model_name)
def get_url_input():
url = st.text_input("Youtube URL: ", key="input")
return url
def get_document(url):
with st.spinner("Fetching Content ..."):
loader = YoutubeLoader.from_youtube_url(
url,
add_video_info=True, # タイトルや再生数も取得できる
language=['en', 'ja'] # 英語→日本語の優先順位で字幕を取得
)
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
model_name=st.session_state.model_name,
chunk_size=st.session_state.max_token,
chunk_overlap=0,
)
return loader.load_and_split(text_splitter=text_splitter)
def summarize(llm, docs):
prompt_template = """Write a concise Japanese summary of the following transcript of Youtube Video.
{text}
ここから日本語で書いてね:
"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
with get_openai_callback() as cb:
chain = load_summarize_chain(
llm,
chain_type="map_reduce",
verbose=True,
map_prompt=PROMPT,
combine_prompt=PROMPT
)
response = chain(
{
"input_documents": docs,
# token_max を指示しないと、GPT3.5など通常の
# モデルサイズに合わせた内部処理になってしまうので注意
"token_max": st.session_state.max_token
},
return_only_outputs=True
)
return response['output_text'], cb.total_cost
def main():
init_page()
llm = select_model()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
document = get_document(url)
if document:
with st.spinner("ChatGPT is typing ..."):
output_text, cost = summarize(llm, document)
st.session_state.costs.append(cost)
else:
output_text = None
if output_text:
with response_container:
st.markdown("## Summary")
st.write(output_text)
st.markdown("---")
st.markdown("## Original Text")
st.write(document)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"Write a concise Japanese summary of the following transcript of Youtube Video.\n\n{text}\n\nここから日本語で書いてね:\n"
] |
2024-01-10 | kyouyap/streamlit_sample | token_cost_process.py | from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import LLMResult
from typing import Any, Dict, List
import tiktoken
MODEL_COST_PER_1K_TOKENS = {
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
"gpt-4-completion": 0.06,
"gpt-4-0314-completion": 0.06,
"gpt-4-32k": 0.06,
"gpt-4-32k-0314": 0.06,
"gpt-4-32k-completion": 0.12,
"gpt-4-32k-0314-completion": 0.12,
"gpt-3.5-turbo": 0.002,
"gpt-3.5-turbo-0301": 0.002,
"text-ada-001": 0.0004,
"ada": 0.0004,
"text-babbage-001": 0.0005,
"babbage": 0.0005,
"text-curie-001": 0.002,
"curie": 0.002,
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
}
class TokenCostProcess:
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
def sum_prompt_tokens( self, tokens: int ):
self.prompt_tokens = self.prompt_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_completion_tokens( self, tokens: int ):
self.completion_tokens = self.completion_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_successful_requests( self, requests: int ):
self.successful_requests = self.successful_requests + requests
def get_openai_total_cost_for_model( self, model: str ) -> float:
return MODEL_COST_PER_1K_TOKENS[model] * self.total_tokens / 1000
def get_cost_summary(self, model:str) -> str:
cost = self.get_openai_total_cost_for_model(model)
return cost
class CostCalcAsyncHandler(AsyncCallbackHandler):
model: str = ""
socketprint = None
websocketaction: str = "appendtext"
token_cost_process: TokenCostProcess
def __init__( self, model, token_cost_process ):
self.model = model
self.token_cost_process = token_cost_process
def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
encoding = tiktoken.encoding_for_model( self.model )
if self.token_cost_process == None: return
for prompt in prompts:
self.token_cost_process.sum_prompt_tokens( len(encoding.encode(prompt)) )
async def on_llm_new_token(self, token: str, **kwargs) -> None:
print( token )
self.token_cost_process.sum_completion_tokens( 1 )
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.token_cost_process.sum_successful_requests( 1 )
| [
"0"
] |
2024-01-10 | tejpshah/dynamic-quiz-generation | questionGenerator.py | import os
import re
import datetime
import argparse
import openai
from PyPDF2 import PdfReader
from reportlab.lib.pagesizes import letter
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak
from config import OPENAI_API_KEY
from promptTemplates import (summarizerSystemPrompt, questionGeneratorSystemPrompt,
questionCritiquerSystemPrompt, convertToMongoDBSystemPrompt)
# Configuration setup
openai.api_key = OPENAI_API_KEY
def extract_all_text_in_data_directory(directory="data/"):
"""Extracts all text from PDF and TXT files in the specified directory."""
all_text = []
for file_name in os.listdir(directory):
file_path = os.path.join(directory, file_name)
# Extract text from PDFs
if file_name.endswith('.pdf'):
with open(file_path, 'rb') as pdf_file:
pdf_reader = PdfReader(pdf_file)
for page in pdf_reader.pages:
all_text.append(page.extract_text())
print(f"Successfully processed {file_name}")
# Extract text from text files
elif file_name.endswith('.txt'):
with open(file_path, 'r', encoding='utf-8') as text_file:
all_text.append(text_file.read())
return ''.join(all_text)
def openai_request(system_prompt, context):
"""Helper function to handle OpenAI API calls."""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": context},
]
)
return response['choices'][0]['message']['content']
def generate_summary(text):
"""Generates a summary for the given text."""
prompt = f"This is the input below:\n{text}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": summarizerSystemPrompt},
{"role": "user", "content": prompt},
]
)
return response['choices'][0]['message']['content']
def generate_questions(text):
"""Generates questions for the given text."""
prompt = f"Make questions on the following content:\n{text}"
return openai_request(questionGeneratorSystemPrompt, prompt)
def critique_questions(text):
"""Generates critiques for the given set of questions."""
prompt = f"Critique the set of questions generated:\n{text}"
return openai_request(questionCritiquerSystemPrompt, prompt)
def finalize_questions(text, critiques):
"""Revises the questions based on provided critiques."""
prompt = f"This is the content you're making questions on: \n{text}\nThese are the critiques you've received: \n{critiques}. Your revised questions are:"
return openai_request(questionGeneratorSystemPrompt, prompt)
def convert_to_mongoDB(text):
"""Converts the questions to a MongoDB format."""
prompt = f"Convert the questions to MongoDB format:\n{text}"
return openai_request(convertToMongoDBSystemPrompt, prompt)
def text_to_pdf(text, pdf_filename="questions.pdf"):
"""Converts a text to a PDF."""
# Split the text into individual questions
questions = re.split(r'\d+\.', text)[1:] # Split by numbers followed by a dot
# Create a new PDF document
doc = SimpleDocTemplate(pdf_filename, pagesize=letter)
# Define styles for the PDF
styles = getSampleStyleSheet()
# Create an empty list to hold the PDF content
content = []
for question in questions:
lines = question.strip().split('\n')
for line in lines:
content.append(Paragraph(line.strip(), styles['Normal']))
content.append(Spacer(1, 12)) # Add a space after each line for clarity
content.append(PageBreak())
# Build the PDF document with the content
doc.build(content)
def study_guide_to_pdf(text, pdf_filename="study_guide.pdf"):
"""Converts a study guide to a PDF."""
styles = getSampleStyleSheet()
# Custom styles for different parts of the document
title_style = styles["Heading1"]
subtitle_style = styles["Heading2"]
normal_style = styles["Normal"]
bullet_style = ParagraphStyle(
"bullet",
parent=styles["BodyText"],
spaceBefore=0,
leftIndent=20,
spaceAfter=0,
)
content = []
# Split sections by blank lines
sections = re.split(r'\n\n', text)
for section in sections:
# If it's a structured outline or bullet point details
if re.match(r'[IVX]+\.', section) or "- " in section:
items = section.split('\n')
for item in items:
# Check if it's a main point (e.g., I., II., etc.)
if re.match(r'[IVX]+\.', item):
content.append(Paragraph(item, subtitle_style))
else:
# It's a subpoint or bullet point
content.append(Paragraph(item, bullet_style))
content.append(Spacer(1, 12))
else:
# General paragraphs or titles
lines = section.strip().split('\n')
for line in lines:
# If it's a title (like "Executive Summary:" or "Key Insights Extraction:")
if line.endswith(":"):
content.append(Paragraph(line, title_style))
else:
content.append(Paragraph(line, normal_style))
content.append(Spacer(1, 12))
doc = SimpleDocTemplate(pdf_filename, pagesize=letter)
doc.build(content)
def create_new_folder():
"""Creates a new folder in the 'output' directory and returns its path."""
output_dir = "output"
new_folder_name = str(len(os.listdir(output_dir)))
new_folder_path = os.path.join(output_dir, new_folder_name)
os.mkdir(new_folder_path)
return new_folder_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process data directory to generate summary, questions, etc.")
parser.add_argument("-d", "--data_directory", help="path to data directory", default="data/")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
input_text = extract_all_text_in_data_directory(args.data_directory)
if args.verbose:
print("Extracted all text from the data directory.")
summary = generate_summary(input_text)
print("The summary is completed.")
if args.verbose:
print(summary)
questions = generate_questions(summary)
print("The questions are generated.")
if args.verbose:
print(questions)
critiques = critique_questions(questions)
print("The questions are critiqued.")
if args.verbose:
print(critiques)
finalized_questions = finalize_questions(summary, critiques)
print("The questions are finalized.")
if args.verbose:
print(finalized_questions)
mongoDB_format = convert_to_mongoDB(finalized_questions)
print("The questions are converted to JSON.")
if args.verbose:
print(mongoDB_format)
output_path = create_new_folder()
date_str = datetime.datetime.now().strftime("%m_%d")
text_to_pdf(finalized_questions, os.path.join(output_path, f"questions_{date_str}.pdf"))
study_guide_to_pdf(summary, os.path.join(output_path, f"study_guide_{date_str}.pdf"))
if args.verbose:
print(f"Saved PDFs to {output_path}")
# Save the MongoDB file to a text file
with open(os.path.join(output_path, f"mongoDB_{date_str}.json"), 'w') as f:
f.write(mongoDB_format)
if args.verbose:
print(f"Saved MongoDB format to {output_path}/mongoDB_{date_str}.json")
| [
"This is the input below:\nPLACEHOLDER",
"Make questions on the following content:\nPLACEHOLDER",
"Convert the questions to MongoDB format:\nPLACEHOLDER",
"Critique the set of questions generated:\nPLACEHOLDER",
"This is the content you're making questions on: \nPLACEHOLDER\nThese are the critiques you've received: \nPLACEHOLDER. Your revised questions are:"
] |
2024-01-10 | mkirby42/Social-Analysis | tweet_analysis~nlp.py | from decouple import config
import tweepy
import basilica
# Pytorch and BERT
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
mallet_path = '/Users/mattkirby/Social-Analysis/tweet-analysis/mallet-2.0.8/bin/mallet'
# Spacy for lemmatization
import spacy
nlp = spacy.load('en_core_web_lg', disable=['parser', 'ner'])
# NLTK
import nltk
from nltk.corpus import stopwords
#Preprocess for BERT
def bert_preprocess(list_of_stings):
sentences = []
begin_tag = "[CLS] "
end_tag = " [SEP]"
for tweet in list_of_stings:
for sentence in tweet.split('.'):
sentences.append(begin_tag + ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ", sentence).split()) + end_tag)
indexed_tokens = [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentence)) for sentence in sentences]
return indexed_tokens
# Get BERT embeddings
def BERT_embeddings(list_sentences):
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
for indexed_tokens in list_sentences:
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([0] * len(indexed_tokens))
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
# Create corpus
def new_corpus(tweets):
#Tokenize
# Clean corpus
def tokenize(corpus):
for tweet in corpus:
yield(gensim.utils.simple_preprocess(
str(tweet.full_text), deacc=True))
tokens = list(tokenize(tweets))
# Remove stopwords
stop_words = stopwords.words('english')
stop_words.extend([
'from',
'subject',
're',
'edu',
'use',
'https',
'try',
'http'])
no_stop_tokens = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in tokens]
# Do lemmatization keeping only noun, adj, vb, adv
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
data_lemmatized = lemmatization(no_stop_tokens, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# Create Dictionary
global id2word
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
# Term Document Frequency
corpus = [id2word.doc2bow(tweet) for tweet in data_lemmatized]
return corpus
# mallet_topic_model
def mallet_topics(corpus):
ldamallet = gensim.models.wrappers.LdaMallet(
mallet_path,
corpus=corpus,
num_topics=20,
id2word=id2word
)
topics = {}
for i in range(0, ldamallet.num_topics):
topic = []
for word_record in ldamallet.print_topic(i).split('+'):
topic.append((word_record.split("*")[0],
word_record.split("*")[1]\
.replace('"', "")\
.replace(' ', "")))
topics['topic' + str(i)] = topic
return topics
#Get embeddings for corpus
def embeddings(tweets):
tweet_embeddings = {}
for tweet in tweets:
# Create embedding
embedding = BASILICA.embed_sentence(tweet.full_text, model = 'twitter')
# Create DB record
tweet_embeddings[str(tweet.id)] = embedding
return
| [] |
2024-01-10 | YuweiYin/FinPT | run_step2_gpt_profile.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
__author__ = "@YuweiYin"
"""
import os
import sys
import time
import json
import logging
import argparse
import openai
def run_openai(ds_name: str, ds_split: str, start_idx: int = 0, end_idx: int = -1) -> int:
# print_cnt = int(1e3)
print_cnt = 100
# for ds_name in ds_name_list:
profile_dir = os.path.join(profile_root_dir, ds_name)
os.makedirs(profile_dir, exist_ok=True)
logger.info(f"\n\n>>> ds_name: {ds_name}; ds_split: {ds_split}")
instruction_path = os.path.join(profile_dir, f"instruction_for_profile_X_{ds_split}.jsonl")
if end_idx > 0:
profile_path = os.path.join(profile_dir, f"profile_X_{ds_split}_{end_idx}.jsonl")
else:
profile_path = os.path.join(profile_dir, f"profile_X_{ds_split}_all.jsonl")
logger.info(f">>> profile_path: {profile_path}")
read_cnt = 0
write_cnt = 0
# write_cnt = start_idx
while True:
try:
logger.info(f"\n\n>>> >>> start_idx: {start_idx}")
read_cnt = 0
with open(instruction_path, mode="r", encoding="utf-8") as fp_in:
with open(profile_path, mode="a+", encoding="utf-8") as fp_out:
for line_idx, line in enumerate(fp_in):
read_cnt += 1
if line_idx < start_idx:
continue
if line_idx >= end_idx > 0:
logger.info(f">>> >>> [{ds_name} - {ds_split}] line_idx >= end_idx > 0; "
f"read_cnt = {read_cnt}; write_cnt = {write_cnt}")
break
instruction = str(json.loads(line.strip()))
# OpenAI request
response = openai.ChatCompletion.create(
model=openai_model,
messages=[
{"role": "system", "content":
"You are a helpful financial assistant."},
{"role": "user", "content": f"{instruction}"},
],
temperature=0,
)
res_content = response["choices"][0]["message"]["content"]
res_json = json.dumps(res_content.strip())
fp_out.write(res_json + "\n")
write_cnt += 1
if read_cnt % print_cnt == 0:
logger.info(f">>> >>> [{ds_name} - {ds_split}] "
f"read_cnt = {read_cnt}; write_cnt = {write_cnt}")
time.sleep(0.2)
break
except Exception as e:
start_idx = read_cnt - 1
# logger.info(f">>> *** >>> Exception: {e}")
logger.info(f">>> *** >>> [{ds_name} - {ds_split}] "
f"read_cnt = {read_cnt}; write_cnt = {write_cnt} Next start_idx: {start_idx}\n")
# time.sleep(1.0)
logger.info(f"\n>>> DONE: [{ds_name} - {ds_split}] read_cnt = {read_cnt}; write_cnt = {write_cnt}\n\n")
return 0
if __name__ == "__main__":
"""
python3 run_step2_gpt_profile.py --ds_name cd1 --ds_split train --start_idx 0 --end_idx -1
python3 run_step2_gpt_profile.py --ds_name cd1 --ds_split validation --start_idx 0 --end_idx -1
python3 run_step2_gpt_profile.py --ds_name cd1 --ds_split test --start_idx 0 --end_idx -1
"""
logging.basicConfig(
format="[%(asctime)s - %(levelname)s - %(name)s] - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Step2 Get_Profile Args")
parser.add_argument("--ds_name", type=str, default="cd1", help="Specify which dataset to use")
parser.add_argument("--ds_split", type=str, default="train", help="train OR validation OR test")
parser.add_argument("--start_idx", type=int, default=0, help="Start index for continue generating")
parser.add_argument("--end_idx", type=int, default=-1, help="Ending index for continue generating")
args = parser.parse_args()
logger.info(args)
ds_name = str(args.ds_name)
ds_split = str(args.ds_split)
start_idx = int(args.start_idx)
end_idx = int(args.end_idx)
cache_dir = "~/.cache/huggingface/"
os.environ["TRANSFORMERS_CACHE"] = cache_dir
cache_model = os.path.join(cache_dir, "models")
cache_ds = os.path.join(cache_dir, "datasets")
# OpenAI settings
openai.organization = "YOUR_ORG_ID"
openai.api_key = os.getenv("OPENAI_API_KEY")
openai_model = "gpt-3.5-turbo"
profile_root_dir = os.path.join("./data/profile")
os.makedirs(profile_root_dir, exist_ok=True)
run_openai(ds_name=ds_name, ds_split=ds_split, start_idx=start_idx, end_idx=end_idx)
sys.exit(0)
| [
"PLACEHOLDER",
"You are a helpful financial assistant."
] |
2024-01-10 | Vchitect/SEINE | diffusion~gaussian_diffusion.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import math
import numpy as np
import torch as th
import enum
from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
warmup_time = int(num_diffusion_timesteps * warmup_frac)
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
"""
This is the deprecated API for creating beta schedules.
See get_named_beta_schedule() for the new library of schedules.
"""
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start ** 0.5,
beta_end ** 0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "warmup10":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif beta_schedule == "warmup50":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
return get_beta_schedule(
"linear",
beta_start=scale * 0.0001,
beta_end=scale * 0.02,
# diffuser stable diffusion
# beta_start=scale * 0.00085,
# beta_end=scale * 0.012,
num_diffusion_timesteps=num_diffusion_timesteps,
)
elif schedule_name == "squaredcos_cap_v2":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Original ported from this codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
) if len(self.posterior_variance) > 1 else np.array([])
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None,
mask=None, x_start=None, use_concat=False):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, F, C = x.shape[:3]
assert t.shape == (B,)
if use_concat:
model_output = model(th.concat([x, mask, x_start], dim=1), t, **model_kwargs)
else:
model_output = model(x, t, **model_kwargs)
try:
model_output = model_output.sample # for tav unet
except:
pass
# model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
model_output, extra = model_output
else:
extra = None
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, F, C * 2, *x.shape[3:])
model_output, model_var_values = th.split(model_output, C, dim=2)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"extra": extra,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
mask=None,
x_start=None,
use_concat=False
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
mask=mask,
x_start=x_start,
use_concat=use_concat
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
mask=None,
x_start=None,
use_concat=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
mask=mask,
x_start=x_start,
use_concat=use_concat
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
mask=None,
x_start=None,
use_concat=False
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
mask=mask,
x_start=x_start,
use_concat=use_concat
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
mask=None,
x_start=None,
use_concat=False
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
mask=mask,
x_start=x_start,
use_concat=use_concat
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
mask=None,
x_start=None,
use_concat=False
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
mask=mask,
x_start=x_start,
use_concat=use_concat
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
mask=None,
x_start=None,
use_concat=False
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
mask=mask,
x_start=x_start,
use_concat=use_concat
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, use_mask=False):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
if use_mask:
x_t = th.cat([x_t[:, :4], x_start[:, 4:]], dim=1)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, t, **model_kwargs)
try:
# model_output = model(x_t, t, **model_kwargs).sample
model_output = model_output.sample # for tav unet
except:
pass
# model_output = model(x_t, t, **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, F, C = x_t.shape[:3]
assert model_output.shape == (B, F, C * 2, *x_t.shape[3:])
model_output, model_var_values = th.split(model_output, C, dim=2)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=2)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
# assert model_output.shape == target.shape == x_start.shape
if use_mask:
terms["mse"] = mean_flat((target[:,:4] - model_output) ** 2)
else:
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res + th.zeros(broadcast_shape, device=timesteps.device)
| [] |
2024-01-10 | Vchitect/SEINE | diffusion~timestep_sampler.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| [] |
2024-01-10 | Vchitect/SEINE | diffusion~diffusion_utils.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import torch as th
import numpy as np
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def continuous_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a continuous Gaussian distribution.
:param x: the targets
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
centered_x = x - means
inv_stdv = th.exp(-log_scales)
normalized_x = centered_x * inv_stdv
log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x)
return log_probs
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| [] |
2024-01-10 | Vchitect/SEINE | diffusion~__init__.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
from . import gaussian_diffusion as gd
from .respace import SpacedDiffusion, space_timesteps
def create_diffusion(
timestep_respacing,
noise_schedule="linear",
use_kl=False,
sigma_small=False,
predict_xstart=False,
# learn_sigma=True,
learn_sigma=False, # for unet
rescale_learned_sigmas=False,
diffusion_steps=1000
):
betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if timestep_respacing is None or timestep_respacing == "":
timestep_respacing = [diffusion_steps]
return SpacedDiffusion(
use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type
# rescale_timesteps=rescale_timesteps,
)
| [] |
2024-01-10 | Vchitect/SEINE | diffusion~respace.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import torch
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
# @torch.compile
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
# self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
# if self.rescale_timesteps:
# new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| [] |
2024-01-10 | realtalishaw/TheoBot | integrations~pinecone_db.py | import pinecone
import os
import openai
from dotenv import load_dotenv
load_dotenv()
pinecone.init(api_key=os.getenv('PINECONE_API_KEY'), environment='gcp-starter')
client = openai.Client(api_key=os.getenv('OPENAI_API_KEY'))
def get_openai_embedding(text):
response = client.embeddings.create(
input=[text],
model="text-embedding-ada-002" # Choose the appropriate model
)
embedding = response.data[0].embedding
return list(embedding)
index_name = 'theo-bot'
# Create or connect to an existing Pinecone index
if index_name not in pinecone.list_indexes():
pinecone.create_index(index_name, dimension=2048) # Dimension based on OpenAI's embedding size
index = pinecone.Index(index_name)
# Example text to be embedded
example_text = "Hello, this is a test text for Pinecone and OpenAI embeddings."
# Get the embedding using OpenAI
embedding = get_openai_embedding(example_text)
if not isinstance(embedding, list) or not all(isinstance(x, float) for x in embedding):
raise ValueError("Embedding is not in the correct format")
# Upsert the vector with a unique ID
index.upsert(vectors={'example_text_id': embedding})
# Query to test
results = index.query(queries=[embedding], top_k=1)
print(results) | [] |
2024-01-10 | realtalishaw/TheoBot | bot~conversation_handler.py | from utils.logger import setup_logger
import openai
import time
client = openai.Client(api_key='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')
logger = setup_logger(__name__, 'bot.log')
my_assistant = client.beta.assistants.retrieve("asst_KgDFGq9GlACc8d2HVHDFjT0m")
def chat_with_openai(update, context):
print(f'updating contexct ${context}, and update: ${update}')
user_message = update.message.text
chat_type = update.message.chat.type
bot_username = "@TheometricsBot"
# Check if the bot is mentioned in the message in group chats
if chat_type in ['group', 'supergroup'] and bot_username not in user_message:
return
try:
# Create a Thread
thread = client.beta.threads.create()
# Add a Message to a Thread
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_message
)
print(f'Message Thread, ${message}')
# Run the Assistant
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=my_assistant.id,
)
# Poll for the completed run
for _ in range(100): # Try for a certain number of times
run_status = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
print(f'Run Status, ${run_status}')
if run_status.status == 'completed':
break
time.sleep(1) # Wait for a second before checking again
if run_status.status != 'completed':
update.message.reply_text("I'm still thinking, please wait a moment...")
return
assistant_messages = client.beta.threads.messages.list(thread_id=thread.id)
if assistant_messages.data and assistant_messages.data[0].role == "assistant":
# Correct way to access the 'value' attribute
response_text = assistant_messages.data[0].content[0].text.value
update.message.reply_text(response_text)
except Exception as e:
logger.error(f"Error with OpenAI API: {e}")
update.message.reply_text("I'm having trouble processing that request.") | [] |
2024-01-10 | Deadman-DAO/Web3HackerNetwork | sandbox~python~matt~openai~basic_api.py | import os
import openai
openai.api_key = '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'
response = openai.Completion.create(
model="text-davinci-003",
prompt="Yes, there are inter-dimensional relationships in my training model. In a machine learning model like myself, the relationships between different dimensions can be thought of as representing the relationships between different concepts and linguistic features. For example, some dimensions might encode the relationship between different words, while others might encode the relationship between different parts of speech or grammatical structures. The relationships between these dimensions are learned during the training process, based on the patterns in the large text corpus that I was trained on. These relationships allow me to generate human-like text by combining the relationships between different concepts and linguistic features in meaningful ways.\n\nTl;dr",
temperature=0.7,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=1
)
print(response) | [
"Yes, there are inter-dimensional relationships in my training model. In a machine learning model like myself, the relationships between different dimensions can be thought of as representing the relationships between different concepts and linguistic features. For example, some dimensions might encode the relationship between different words, while others might encode the relationship between different parts of speech or grammatical structures. The relationships between these dimensions are learned during the training process, based on the patterns in the large text corpus that I was trained on. These relationships allow me to generate human-like text by combining the relationships between different concepts and linguistic features in meaningful ways.\n\nTl;dr"
] |
2024-01-10 | EmbraceAGI/LocalAGI | local_agi_zh.py | #!/usr/bin/env python3
from dotenv import load_dotenv
# Load default environment variables (.env)
load_dotenv()
import os
import time
import logging
from collections import deque
from typing import Dict, List
import importlib
import openai
import chromadb
import tiktoken as tiktoken
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
import re
# default opt out of chromadb telemetry.
from chromadb.config import Settings
client = chromadb.Client(Settings(anonymized_telemetry=False))
# Engine configuration
# Model: GPT, LLAMA, HUMAN, etc.
LLM_MODEL = os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()
# API Keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
if not (LLM_MODEL.startswith("llama") or LLM_MODEL.startswith("chatglm-6b") or LLM_MODEL.startswith("human")):
assert OPENAI_API_KEY, "\033[91m\033[1m" + "OPENAI_API_KEY environment variable is missing from .env" + "\033[0m\033[0m"
# Table config
RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
assert RESULTS_STORE_NAME, "\033[91m\033[1m" + "RESULTS_STORE_NAME environment variable is missing from .env" + "\033[0m\033[0m"
# Run configuration
INSTANCE_NAME = os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI"))
COOPERATIVE_MODE = "none"
JOIN_EXISTING_OBJECTIVE = False
# Goal configuration
OBJECTIVE = os.getenv("OBJECTIVE", "")
INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
# Model configuration
OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
# Extensions support begin
def can_import(module_name):
try:
importlib.import_module(module_name)
return True
except ImportError:
return False
DOTENV_EXTENSIONS = os.getenv("DOTENV_EXTENSIONS", "").split(" ")
# Command line arguments extension
# Can override any of the above environment variables
ENABLE_COMMAND_LINE_ARGS = (
os.getenv("ENABLE_COMMAND_LINE_ARGS", "false").lower() == "true"
)
if ENABLE_COMMAND_LINE_ARGS:
if can_import("extensions.argparseext"):
from extensions.argparseext import parse_arguments
OBJECTIVE, INITIAL_TASK, LLM_MODEL, DOTENV_EXTENSIONS, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE = parse_arguments()
# Human mode extension
# Gives human input to babyagi
if LLM_MODEL.startswith("human"):
if can_import("extensions.human_mode"):
from extensions.human_mode import user_input_await
# Load additional environment variables for enabled extensions
# TODO: This might override the following command line arguments as well:
# OBJECTIVE, INITIAL_TASK, LLM_MODEL, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE
if DOTENV_EXTENSIONS:
if can_import("extensions.dotenvext"):
from extensions.dotenvext import load_dotenv_extensions
load_dotenv_extensions(DOTENV_EXTENSIONS)
# TODO: There's still work to be done here to enable people to get
# defaults from dotenv extensions, but also provide command line
# arguments to override them
# Extensions support end
print("\033[95m\033[1m" + "\n*****CONFIGURATION*****\n" + "\033[0m\033[0m")
print(f"Name : {INSTANCE_NAME}")
print(f"Mode : {'alone' if COOPERATIVE_MODE in ['n', 'none'] else 'local' if COOPERATIVE_MODE in ['l', 'local'] else 'distributed' if COOPERATIVE_MODE in ['d', 'distributed'] else 'undefined'}")
print(f"LLM : {LLM_MODEL}")
# Check if we know what we are doing
assert OBJECTIVE, "\033[91m\033[1m" + "OBJECTIVE environment variable is missing from .env" + "\033[0m\033[0m"
assert INITIAL_TASK, "\033[91m\033[1m" + "INITIAL_TASK environment variable is missing from .env" + "\033[0m\033[0m"
LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "models/llama-13B/ggml-model.bin")
if LLM_MODEL.startswith("llama"):
if can_import("llama_cpp"):
from llama_cpp import Llama
print(f"LLAMA : {LLAMA_MODEL_PATH}" + "\n")
assert os.path.exists(LLAMA_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
CTX_MAX = 1024
LLAMA_THREADS_NUM = int(os.getenv("LLAMA_THREADS_NUM", 8))
print('Initialize model for evaluation')
llm = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
use_mlock=False,
)
print('\nInitialize model for embedding')
llm_embed = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
embedding=True,
use_mlock=False,
)
print(
"\033[91m\033[1m"
+ "\n*****USING LLAMA.CPP. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
else:
print(
"\033[91m\033[1m"
+ "\nLlama LLM requires package llama-cpp. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
CHATGLM_API = os.getenv("CHATGLM_API", None)
CHATGLM_MODEL_PATH = os.getenv("CHATGLM_MODEL_PATH", "../chatglm-6b")
if LLM_MODEL.startswith("chatglm-6b"):
try:
CTX_MAX = 1024
if CHATGLM_API is None:
from transformers import AutoTokenizer, AutoModel
print(f"ChatGLM : {CHATGLM_MODEL_PATH}" + "\n")
assert os.path.exists(CHATGLM_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
print('Initialize model for evaluation')
tokenizer = AutoTokenizer.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True)
model =AutoModel.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True).quantize(8).half().cuda()
#model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
llm = model.eval()
print('\nInitialize model for embedding')
from langchain.embeddings import HuggingFaceEmbeddings
llm_embed = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese')
print(
"\033[91m\033[1m"
+ "\n*****USING ChatGLM-6B. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
except:
print(
"\033[91m\033[1m"
+ "\nChatGLM-6B is not properly installed. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
if LLM_MODEL.startswith("gpt-4"):
print(
"\033[91m\033[1m"
+ "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
+ "\033[0m\033[0m"
)
if LLM_MODEL.startswith("human"):
print(
"\033[91m\033[1m"
+ "\n*****USING HUMAN INPUT*****"
+ "\033[0m\033[0m"
)
print("\033[94m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m")
print(f"{OBJECTIVE}")
if not JOIN_EXISTING_OBJECTIVE:
print("\033[93m\033[1m" + "\n初始任务:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
else:
print("\033[93m\033[1m" + f"\nJoining to help the objective" + "\033[0m\033[0m")
# Configure OpenAI
openai.api_key = OPENAI_API_KEY
# Llama embedding function
class LlamaEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = []
for t in texts:
e = llm_embed.embed(t)
embeddings.append(e)
return embeddings
# ChatgGLM-6b embedding function
class ChatgGLMEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = llm_embed.embed_documents(texts)
return embeddings
# Results storage using local ChromaDB
class DefaultResultsStorage:
def __init__(self):
logging.getLogger('chromadb').setLevel(logging.ERROR)
# Create Chroma collection
chroma_persist_dir = "chroma"
chroma_client = chromadb.Client(
settings=chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=chroma_persist_dir,
)
)
metric = "cosine"
if LLM_MODEL.startswith("llama") :
embedding_function = LlamaEmbeddingFunction()
elif LLM_MODEL.startswith("chatglm-6b"):
embedding_function = ChatgGLMEmbeddingFunction()
else:
embedding_function = OpenAIEmbeddingFunction(api_key=OPENAI_API_KEY)
self.collection = chroma_client.get_or_create_collection(
name=RESULTS_STORE_NAME,
metadata={"hnsw:space": metric},
embedding_function=embedding_function,
)
def add(self, task: Dict, result: str, result_id: str):
# Break the function if LLM_MODEL starts with "human" (case-insensitive)
if LLM_MODEL.startswith("human"):
return
# Continue with the rest of the function
embeddings = llm_embed.embed(result) if LLM_MODEL.startswith("llama") else None
if (
len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
): # Check if the result already exists
self.collection.update(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
else:
self.collection.add(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
def query(self, query: str, top_results_num: int) -> List[dict]:
count: int = self.collection.count()
if count == 0:
return []
results = self.collection.query(
query_texts=query,
n_results=min(top_results_num, count),
include=["metadatas"]
)
return [item["task"] for item in results["metadatas"][0]]
# Initialize results storage
def try_weaviate():
WEAVIATE_URL = os.getenv("WEAVIATE_URL", "")
WEAVIATE_USE_EMBEDDED = os.getenv("WEAVIATE_USE_EMBEDDED", "False").lower() == "true"
if (WEAVIATE_URL or WEAVIATE_USE_EMBEDDED) and can_import("extensions.weaviate_storage"):
WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY", "")
from extensions.weaviate_storage import WeaviateResultsStorage
print("\nUsing results storage: " + "\033[93m\033[1m" + "Weaviate" + "\033[0m\033[0m")
return WeaviateResultsStorage(OPENAI_API_KEY, WEAVIATE_URL, WEAVIATE_API_KEY, WEAVIATE_USE_EMBEDDED, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
return None
def try_pinecone():
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
if PINECONE_API_KEY and can_import("extensions.pinecone_storage"):
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "")
assert (
PINECONE_ENVIRONMENT
), "\033[91m\033[1m" + "PINECONE_ENVIRONMENT environment variable is missing from .env" + "\033[0m\033[0m"
from extensions.pinecone_storage import PineconeResultsStorage
print("\nUsing results storage: " + "\033[93m\033[1m" + "Pinecone" + "\033[0m\033[0m")
return PineconeResultsStorage(OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_ENVIRONMENT, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
return None
def use_chroma():
print("\nUsing results storage: " + "\033[93m\033[1m" + "Chroma (Default)" + "\033[0m\033[0m")
return DefaultResultsStorage()
results_storage = try_weaviate() or try_pinecone() or use_chroma()
# Task storage supporting only a single instance of BabyAGI
class SingleTaskListStorage:
def __init__(self):
self.tasks = deque([])
self.task_id_counter = 0
def append(self, task: Dict):
self.tasks.append(task)
def replace(self, tasks: List[Dict]):
self.tasks = deque(tasks)
def popleft(self):
return self.tasks.popleft()
def is_empty(self):
return False if self.tasks else True
def next_task_id(self):
self.task_id_counter += 1
return self.task_id_counter
def get_task_names(self):
return [t["task_name"] for t in self.tasks]
# Initialize tasks storage
tasks_storage = SingleTaskListStorage()
if COOPERATIVE_MODE in ['l', 'local']:
if can_import("extensions.ray_tasks"):
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from extensions.ray_tasks import CooperativeTaskListStorage
tasks_storage = CooperativeTaskListStorage(OBJECTIVE)
print("\nReplacing tasks storage: " + "\033[93m\033[1m" + "Ray" + "\033[0m\033[0m")
elif COOPERATIVE_MODE in ['d', 'distributed']:
pass
def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
"""Limits the string to a number of tokens (estimated)."""
try:
encoding = tiktoken.encoding_for_model(model)
except:
encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
encoded = encoding.encode(string)
return encoding.decode(encoded[:limit])
def openai_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = OPENAI_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
if model.lower().startswith("llama"):
result = llm(prompt[:CTX_MAX],
stop=["### Human"],
echo=False,
temperature=0.2,
top_k=40,
top_p=0.95,
repeat_penalty=1.05,
max_tokens=200)
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result['choices'][0]['text'].strip()
elif model.lower().startswith("chatglm"):
if CHATGLM_API is not None:
import requests
import json
headers = {
"Content-Type": "application/json",
}
data = {
"prompt": prompt[:CTX_MAX],
"history": []
}
result = requests.post(CHATGLM_API, headers=headers, data=json.dumps(data))
return result.json()['response'].strip()
else:
result, history = llm.chat(tokenizer, prompt[:CTX_MAX], history=[])
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result.strip()
elif model.lower().startswith("human"):
return user_input_await(prompt)
elif not model.lower().startswith("gpt-"):
# Use completion API
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return response.choices[0].text.strip()
else:
# Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
# TODO: different limits for different models.
trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
# Use chat completion API
messages = [{"role": "system", "content": trimmed_prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
except openai.error.RateLimitError:
print(
" *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.Timeout:
print(
" *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIError:
print(
" *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIConnectionError:
print(
" *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.InvalidRequestError:
print(
" *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.ServiceUnavailableError:
print(
" *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
else:
break
def task_creation_agent(
objective: str, result: Dict, task_description: str, task_list: List[str]
):
prompt = f"""
你要使用任务执行者的结果来创建新的任务,其目标如下: {objective}.
最后完成的任务的结果为: \n{result["data"]}
这一结果是基于这一任务描述: {task_description}.\n"""
if task_list:
prompt += f"这些是未完成的任务: {', '.join(task_list)}\n"
prompt += "根据结果,返回一个需要完成的任务清单,以达到目标。"
if task_list:
prompt += "这些新任务不能与未完成的任务重复。 "
prompt += """
在你的答复中,每行返回一项任务。结果必须是一个编号的列表,格式为::
#. 第一个任务
#. 第二项任务
每个条目的编号后面必须有一个句号。如果你的列表是空的,写上 "目前没有任务要添加"。
除非你的清单是空的,否则不要在你的编号清单前包括任何标题,也不要在你的编号清单后加上任何其他输出。"""
print(f'\n*****任务创建者提示词****\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n*****任务创建者提示词****\n{response}\n')
new_tasks = response.split('\n')
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip() and task_id.isnumeric():
new_tasks_list.append(task_name)
# print('New task created: ' + task_name)
out = [{"task_name": task_name} for task_name in new_tasks_list]
return out
def prioritization_agent():
task_names = tasks_storage.get_task_names()
bullet_string = '\n'
prompt = f"""
你的任务是确定下列任务的优先次序: {bullet_string + bullet_string.join(task_names)}
考虑你团队的最终目标: {OBJECTIVE}.
任务应从最高优先级到最低优先级排序,其中较高优先级的任务是那些作为前提条件或对实现目标更重要的任务。
不要删除任何任务。将排序后的任务以编号列表的形式返回:
#. 第一个任务
#. 第二项任务
条目必须连续编号,从1开始。每个条目的编号后面必须有一个句号。
在你的排名表之前不要包括任何标题,也不要在你的列表后面加上任何其他输出。"""
print(f'\n****任务排序者提示词****\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n****任务排序者提示词****\n{response}\n')
if not response:
print('任务排序者无响应。保持任务列表不变。')
return
new_tasks = response.split("\n") if "\n" in response else [response]
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip():
new_tasks_list.append({"task_id": task_id, "task_name": task_name})
return new_tasks_list
# Execute a task based on the objective and five previous tasks
def execution_agent(objective: str, task: str) -> str:
"""
Executes a task based on the given objective and previous context.
Args:
objective (str): The objective or goal for the AI to perform the task.
task (str): The task to be executed by the AI.
Returns:
str: The response generated by the AI for the given task.
"""
context = context_agent(query=objective, top_results_num=5)
# print("\n****RELEVANT CONTEXT****\n")
# print(context)
# print('')
prompt = f'执行一个任务以达成下面指定的目标: {objective}.\n'
if context:
prompt += '综合考虑已经完成的任务:' + '\n'.join(context)
prompt += f'\n你的任务: {task}\n 回应:'
return openai_call(prompt, max_tokens=2000)
# Get the top n completed tasks for the objective
def context_agent(query: str, top_results_num: int):
"""
Retrieves context for a given query from an index of tasks.
Args:
query (str): The query or objective for retrieving context.
top_results_num (int): The number of top results to retrieve.
Returns:
list: A list of tasks as context for the given query, sorted by relevance.
"""
results = results_storage.query(query=query, top_results_num=top_results_num)
# print("****RESULTS****")
# print(results)
return results
# Add the initial task if starting new objective
if not JOIN_EXISTING_OBJECTIVE:
initial_task = {
"task_id": tasks_storage.next_task_id(),
"task_name": INITIAL_TASK
}
tasks_storage.append(initial_task)
def main():
loop = True
while loop:
# As long as there are tasks in the storage...
if not tasks_storage.is_empty():
# Print the task list
print("\033[95m\033[1m" + "\n*****任务列表*****\n" + "\033[0m\033[0m")
for t in tasks_storage.get_task_names():
print(" • " + str(t))
# Step 1: Pull the first incomplete task
task = tasks_storage.popleft()
print("\033[92m\033[1m" + "\n*****后续任务*****\n" + "\033[0m\033[0m")
print(str(task["task_name"]))
# Send to execution function to complete the task based on the context
result = execution_agent(OBJECTIVE, str(task["task_name"]))
print("\033[93m\033[1m" + "\n*****任务结果*****\n" + "\033[0m\033[0m")
print(result)
# Step 2: Enrich result and store in the results storage
# This is where you should enrich the result if needed
enriched_result = {
"data": result
}
# extract the actual result from the dictionary
# since we don't do enrichment currently
# vector = enriched_result["data"]
result_id = f"result_{task['task_id']}"
results_storage.add(task, result, result_id)
# Step 3: Create new tasks and re-prioritize task list
# only the main instance in cooperative mode does that
new_tasks = task_creation_agent(
OBJECTIVE,
enriched_result,
task["task_name"],
tasks_storage.get_task_names(),
)
print('添加新任务')
for new_task in new_tasks:
new_task.update({"task_id": tasks_storage.next_task_id()})
print(str(new_task))
tasks_storage.append(new_task)
if not JOIN_EXISTING_OBJECTIVE:
prioritized_tasks = prioritization_agent()
if prioritized_tasks:
tasks_storage.replace(prioritized_tasks)
# Sleep a bit before checking the task list again
time.sleep(5)
else:
print('Done.')
loop = False
if __name__ == "__main__":
main() | [
"执行一个任务以达成下面指定的目标: PLACEHOLDER.\n",
"\n",
"根据结果,返回一个需要完成的任务清单,以达到目标。",
"\n在你的答复中,每行返回一项任务。结果必须是一个编号的列表,格式为::\n\n#. 第一个任务\n#. 第二项任务\n\n每个条目的编号后面必须有一个句号。如果你的列表是空的,写上 \"目前没有任务要添加\"。\n除非你的清单是空的,否则不要在你的编号清单前包括任何标题,也不要在你的编号清单后加上任何其他输出。",
"这些新任务不能与未完成的任务重复。 ",
"综合考虑已经完成的任务:",
"\n你要使用任务执行者的结果来创建新的任务,其目标如下: PLACEHOLDER.\n最后完成的任务的结果为: \nPLACEHOLDER\n这一结果是基于这一任务描述: PLACEHOLDER.\n",
", ",
"\n你的任务: PLACEHOLDER\n 回应:"
] |
2024-01-10 | EmbraceAGI/LocalAGI | local_agi_mini.py | #!/usr/bin/env python3
from dotenv import load_dotenv
# Load default environment variables (.env)
load_dotenv()
import os
import time
import logging
from collections import deque
from typing import Dict, List
import importlib
import chromadb
import tiktoken as tiktoken
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
import re
# default opt out of chromadb telemetry.
from chromadb.config import Settings
client = chromadb.Client(Settings(anonymized_telemetry=False))
# Engine configuration
# Model: GPT, LLAMA, HUMAN, etc.
LLM_MODEL = os.getenv("LLM_MODEL", None).lower()
# Table config
RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
assert RESULTS_STORE_NAME, "\033[91m\033[1m" + "RESULTS_STORE_NAME environment variable is missing from .env" + "\033[0m\033[0m"
# Run configuration
INSTANCE_NAME = os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI"))
COOPERATIVE_MODE = "none"
JOIN_EXISTING_OBJECTIVE = False
# Goal configuration
OBJECTIVE = os.getenv("OBJECTIVE", "")
INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
# Model configuration
LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", 0.0))
# Extensions support begin
def can_import(module_name):
try:
importlib.import_module(module_name)
return True
except ImportError:
return False
DOTENV_EXTENSIONS = os.getenv("DOTENV_EXTENSIONS", "").split(" ")
# Command line arguments extension
# Can override any of the above environment variables
ENABLE_COMMAND_LINE_ARGS = (
os.getenv("ENABLE_COMMAND_LINE_ARGS", "false").lower() == "true"
)
if ENABLE_COMMAND_LINE_ARGS:
if can_import("extensions.argparseext"):
from extensions.argparseext import parse_arguments
OBJECTIVE, INITIAL_TASK, LLM_MODEL, DOTENV_EXTENSIONS, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE = parse_arguments()
# Human mode extension
# Gives human input to babyagi
if LLM_MODEL.startswith("human"):
if can_import("extensions.human_mode"):
from extensions.human_mode import user_input_await
# Load additional environment variables for enabled extensions
# TODO: This might override the following command line arguments as well:
# OBJECTIVE, INITIAL_TASK, LLM_MODEL, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE
if DOTENV_EXTENSIONS:
if can_import("extensions.dotenvext"):
from extensions.dotenvext import load_dotenv_extensions
load_dotenv_extensions(DOTENV_EXTENSIONS)
# TODO: There's still work to be done here to enable people to get
# defaults from dotenv extensions, but also provide command line
# arguments to override them
# Extensions support end
print("\033[95m\033[1m" + "\n*****CONFIGURATION*****\n" + "\033[0m\033[0m")
print(f"Name : {INSTANCE_NAME}")
print(f"Mode : {'alone' if COOPERATIVE_MODE in ['n', 'none'] else 'local' if COOPERATIVE_MODE in ['l', 'local'] else 'distributed' if COOPERATIVE_MODE in ['d', 'distributed'] else 'undefined'}")
print(f"LLM : {LLM_MODEL}")
# Check if we know what we are doing
assert OBJECTIVE, "\033[91m\033[1m" + "OBJECTIVE environment variable is missing from .env" + "\033[0m\033[0m"
assert INITIAL_TASK, "\033[91m\033[1m" + "INITIAL_TASK environment variable is missing from .env" + "\033[0m\033[0m"
LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "models/llama-13B/ggml-model.bin")
if LLM_MODEL.startswith("llama"):
if can_import("llama_cpp"):
from llama_cpp import Llama
print(f"LLAMA : {LLAMA_MODEL_PATH}" + "\n")
assert os.path.exists(LLAMA_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
CTX_MAX = 1024
LLAMA_THREADS_NUM = int(os.getenv("LLAMA_THREADS_NUM", 8))
print('Initialize model for evaluation')
llm = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
use_mlock=False,
)
print('\nInitialize model for embedding')
llm_embed = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
embedding=True,
use_mlock=False,
)
print(
"\033[91m\033[1m"
+ "\n*****USING LLAMA.CPP. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
else:
print(
"\033[91m\033[1m"
+ "\nLlama LLM requires package llama-cpp. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
CHATGLM_API = os.getenv("CHATGLM_API", None)
CHATGLM_MODEL_PATH = os.getenv("CHATGLM_MODEL_PATH", "../chatglm-6b")
if LLM_MODEL.startswith("chatglm-6b"):
try:
CTX_MAX = 1024
if CHATGLM_API is None:
from transformers import AutoTokenizer, AutoModel
print(f"ChatGLM : {CHATGLM_MODEL_PATH}" + "\n")
assert os.path.exists(CHATGLM_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
print('Initialize model for evaluation')
tokenizer = AutoTokenizer.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True)
model =AutoModel.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True).quantize(8).half().cuda()
#model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
llm = model.eval()
print('\nInitialize model for embedding')
from langchain.embeddings import HuggingFaceEmbeddings
llm_embed = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese')
print(
"\033[91m\033[1m"
+ "\n*****USING ChatGLM-6B. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
except:
print(
"\033[91m\033[1m"
+ "\nChatGLM-6B is not properly installed. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
if LLM_MODEL.startswith("gpt-4"):
print(
"\033[91m\033[1m"
+ "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
+ "\033[0m\033[0m"
)
if LLM_MODEL.startswith("human"):
print(
"\033[91m\033[1m"
+ "\n*****USING HUMAN INPUT*****"
+ "\033[0m\033[0m"
)
print("\033[94m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m")
print(f"{OBJECTIVE}")
if not JOIN_EXISTING_OBJECTIVE:
print("\033[93m\033[1m" + "\nInitial task:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
else:
print("\033[93m\033[1m" + f"\nJoining to help the objective" + "\033[0m\033[0m")
# Llama embedding function
class LlamaEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = []
for t in texts:
e = llm_embed.embed(t)
embeddings.append(e)
return embeddings
# ChatgGLM-6b embedding function
class ChatgGLMEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = llm_embed.embed_documents(texts)
return embeddings
# Results storage using local ChromaDB
class DefaultResultsStorage:
def __init__(self):
logging.getLogger('chromadb').setLevel(logging.ERROR)
# Create Chroma collection
chroma_persist_dir = "chroma"
chroma_client = chromadb.Client(
settings=chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=chroma_persist_dir,
)
)
metric = "cosine"
if LLM_MODEL.startswith("llama") :
embedding_function = LlamaEmbeddingFunction()
elif LLM_MODEL.startswith("chatglm-6b"):
embedding_function = ChatgGLMEmbeddingFunction()
else:
embedding_function = LlamaEmbeddingFunction()
self.collection = chroma_client.get_or_create_collection(
name=RESULTS_STORE_NAME,
metadata={"hnsw:space": metric},
embedding_function=embedding_function,
)
def add(self, task: Dict, result: str, result_id: str):
# Break the function if LLM_MODEL starts with "human" (case-insensitive)
if LLM_MODEL.startswith("human"):
return
# Continue with the rest of the function
embeddings = llm_embed.embed(result) if LLM_MODEL.startswith("llama") else None
if (
len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
): # Check if the result already exists
self.collection.update(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
else:
self.collection.add(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
def query(self, query: str, top_results_num: int) -> List[dict]:
count: int = self.collection.count()
if count == 0:
return []
results = self.collection.query(
query_texts=query,
n_results=min(top_results_num, count),
include=["metadatas"]
)
return [item["task"] for item in results["metadatas"][0]]
def use_chroma():
print("\nUsing results storage: " + "\033[93m\033[1m" + "Chroma (Default)" + "\033[0m\033[0m")
return DefaultResultsStorage()
results_storage = try_weaviate() or try_pinecone() or use_chroma()
# Task storage supporting only a single instance of BabyAGI
class SingleTaskListStorage:
def __init__(self):
self.tasks = deque([])
self.task_id_counter = 0
def append(self, task: Dict):
self.tasks.append(task)
def replace(self, tasks: List[Dict]):
self.tasks = deque(tasks)
def popleft(self):
return self.tasks.popleft()
def is_empty(self):
return False if self.tasks else True
def next_task_id(self):
self.task_id_counter += 1
return self.task_id_counter
def get_task_names(self):
return [t["task_name"] for t in self.tasks]
# Initialize tasks storage
tasks_storage = SingleTaskListStorage()
if COOPERATIVE_MODE in ['l', 'local']:
if can_import("extensions.ray_tasks"):
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from extensions.ray_tasks import CooperativeTaskListStorage
tasks_storage = CooperativeTaskListStorage(OBJECTIVE)
print("\nReplacing tasks storage: " + "\033[93m\033[1m" + "Ray" + "\033[0m\033[0m")
elif COOPERATIVE_MODE in ['d', 'distributed']:
pass
def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
"""Limits the string to a number of tokens (estimated)."""
try:
encoding = tiktoken.encoding_for_model(model)
except:
encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
encoded = encoding.encode(string)
return encoding.decode(encoded[:limit])
def llm_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = LLM_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
if model.lower().startswith("llama"):
result = llm(prompt[:CTX_MAX],
stop=["### Human"],
echo=False,
temperature=0.2,
top_k=40,
top_p=0.95,
repeat_penalty=1.05,
max_tokens=200)
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result['choices'][0]['text'].strip()
elif model.lower().startswith("chatglm"):
if CHATGLM_API is not None:
import requests
import json
headers = {
"Content-Type": "application/json",
}
data = {
"prompt": prompt[:CTX_MAX],
"history": []
}
result = requests.post(CHATGLM_API, headers=headers, data=json.dumps(data))
return result.json()['response'].strip()
else:
result, history = llm.chat(tokenizer, prompt[:CTX_MAX], history=[])
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result.strip()
elif model.lower().startswith("human"):
return user_input_await(prompt)
else:
break
except:
break
def task_creation_agent(
objective: str, result: Dict, task_description: str, task_list: List[str]
):
prompt = f"""
You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
The last completed task has the result: \n{result["data"]}
This result was based on this task description: {task_description}.\n"""
if task_list:
prompt += f"These are incomplete tasks: {', '.join(task_list)}\n"
prompt += "Based on the result, return a list of tasks to be completed in order to meet the objective. "
if task_list:
prompt += "These new tasks must not overlap with incomplete tasks. "
prompt += """
Return one task per line in your response. The result must be a numbered list in the format:
#. First task
#. Second task
The number of each entry must be followed by a period. If your list is empty, write "There are no tasks to add at this time."
Unless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."""
print(f'\n*****TASK CREATION AGENT PROMPT****\n{prompt}\n')
response = llm_call(prompt, max_tokens=2000)
print(f'\n****TASK CREATION AGENT RESPONSE****\n{response}\n')
new_tasks = response.split('\n')
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip() and task_id.isnumeric():
new_tasks_list.append(task_name)
# print('New task created: ' + task_name)
out = [{"task_name": task_name} for task_name in new_tasks_list]
return out
def prioritization_agent():
task_names = tasks_storage.get_task_names()
bullet_string = '\n'
prompt = f"""
You are tasked with prioritizing the following tasks: {bullet_string + bullet_string.join(task_names)}
Consider the ultimate objective of your team: {OBJECTIVE}.
Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
Do not remove any tasks. Return the ranked tasks as a numbered list in the format:
#. First task
#. Second task
The entries must be consecutively numbered, starting with 1. The number of each entry must be followed by a period.
Do not include any headers before your ranked list or follow your list with any other output."""
print(f'\n****TASK PRIORITIZATION AGENT PROMPT****\n{prompt}\n')
response = llm_call(prompt, max_tokens=2000)
print(f'\n****TASK PRIORITIZATION AGENT RESPONSE****\n{response}\n')
if not response:
print('Received empty response from priotritization agent. Keeping task list unchanged.')
return
new_tasks = response.split("\n") if "\n" in response else [response]
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip():
new_tasks_list.append({"task_id": task_id, "task_name": task_name})
return new_tasks_list
# Execute a task based on the objective and five previous tasks
def execution_agent(objective: str, task: str) -> str:
"""
Executes a task based on the given objective and previous context.
Args:
objective (str): The objective or goal for the AI to perform the task.
task (str): The task to be executed by the AI.
Returns:
str: The response generated by the AI for the given task.
"""
context = context_agent(query=objective, top_results_num=5)
# print("\n****RELEVANT CONTEXT****\n")
# print(context)
# print('')
prompt = f'Perform one task based on the following objective: {objective}.\n'
if context:
prompt += 'Take into account these previously completed tasks:' + '\n'.join(context)
prompt += f'\nYour task: {task}\nResponse:'
return llm_call(prompt, max_tokens=2000)
# Get the top n completed tasks for the objective
def context_agent(query: str, top_results_num: int):
"""
Retrieves context for a given query from an index of tasks.
Args:
query (str): The query or objective for retrieving context.
top_results_num (int): The number of top results to retrieve.
Returns:
list: A list of tasks as context for the given query, sorted by relevance.
"""
results = results_storage.query(query=query, top_results_num=top_results_num)
# print("****RESULTS****")
# print(results)
return results
# Add the initial task if starting new objective
if not JOIN_EXISTING_OBJECTIVE:
initial_task = {
"task_id": tasks_storage.next_task_id(),
"task_name": INITIAL_TASK
}
tasks_storage.append(initial_task)
def main():
loop = True
while loop:
# As long as there are tasks in the storage...
if not tasks_storage.is_empty():
# Print the task list
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in tasks_storage.get_task_names():
print(" • " + str(t))
# Step 1: Pull the first incomplete task
task = tasks_storage.popleft()
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_name"]))
# Send to execution function to complete the task based on the context
result = execution_agent(OBJECTIVE, str(task["task_name"]))
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
# Step 2: Enrich result and store in the results storage
# This is where you should enrich the result if needed
enriched_result = {
"data": result
}
# extract the actual result from the dictionary
# since we don't do enrichment currently
# vector = enriched_result["data"]
result_id = f"result_{task['task_id']}"
results_storage.add(task, result, result_id)
# Step 3: Create new tasks and re-prioritize task list
# only the main instance in cooperative mode does that
new_tasks = task_creation_agent(
OBJECTIVE,
enriched_result,
task["task_name"],
tasks_storage.get_task_names(),
)
print('Adding new tasks to task_storage')
for new_task in new_tasks:
new_task.update({"task_id": tasks_storage.next_task_id()})
print(str(new_task))
tasks_storage.append(new_task)
if not JOIN_EXISTING_OBJECTIVE:
prioritized_tasks = prioritization_agent()
if prioritized_tasks:
tasks_storage.replace(prioritized_tasks)
# Sleep a bit before checking the task list again
time.sleep(5)
else:
print('Done.')
loop = False
if __name__ == "__main__":
main() | [
"\n",
"Take into account these previously completed tasks:",
"\nYour task: PLACEHOLDER\nResponse:",
"Perform one task based on the following objective: PLACEHOLDER.\n",
"Based on the result, return a list of tasks to be completed in order to meet the objective. ",
"\nYou are to use the result from an execution agent to create new tasks with the following objective: PLACEHOLDER.\nThe last completed task has the result: \nPLACEHOLDER\nThis result was based on this task description: PLACEHOLDER.\n",
", ",
"These new tasks must not overlap with incomplete tasks. ",
"\nReturn one task per line in your response. The result must be a numbered list in the format:\n\n#. First task\n#. Second task\n\nThe number of each entry must be followed by a period. If your list is empty, write \"There are no tasks to add at this time.\"\nUnless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."
] |
2024-01-10 | EmbraceAGI/LocalAGI | local_agi.py | #!/usr/bin/env python3
from dotenv import load_dotenv
# Load default environment variables (.env)
load_dotenv()
import os
import time
import logging
from collections import deque
from typing import Dict, List
import importlib
import openai
import chromadb
import tiktoken as tiktoken
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
import re
# default opt out of chromadb telemetry.
from chromadb.config import Settings
client = chromadb.Client(Settings(anonymized_telemetry=False))
# Engine configuration
# Model: GPT, LLAMA, HUMAN, etc.
LLM_MODEL = os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()
# API Keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
if not (LLM_MODEL.startswith("llama") or LLM_MODEL.startswith("chatglm-6b") or LLM_MODEL.startswith("human")):
assert OPENAI_API_KEY, "\033[91m\033[1m" + "OPENAI_API_KEY environment variable is missing from .env" + "\033[0m\033[0m"
# Table config
RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
assert RESULTS_STORE_NAME, "\033[91m\033[1m" + "RESULTS_STORE_NAME environment variable is missing from .env" + "\033[0m\033[0m"
# Run configuration
INSTANCE_NAME = os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI"))
COOPERATIVE_MODE = "none"
JOIN_EXISTING_OBJECTIVE = False
# Goal configuration
OBJECTIVE = os.getenv("OBJECTIVE", "")
INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
# Model configuration
OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
# Extensions support begin
def can_import(module_name):
try:
importlib.import_module(module_name)
return True
except ImportError:
return False
DOTENV_EXTENSIONS = os.getenv("DOTENV_EXTENSIONS", "").split(" ")
# Command line arguments extension
# Can override any of the above environment variables
ENABLE_COMMAND_LINE_ARGS = (
os.getenv("ENABLE_COMMAND_LINE_ARGS", "false").lower() == "true"
)
if ENABLE_COMMAND_LINE_ARGS:
if can_import("extensions.argparseext"):
from extensions.argparseext import parse_arguments
OBJECTIVE, INITIAL_TASK, LLM_MODEL, DOTENV_EXTENSIONS, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE = parse_arguments()
# Human mode extension
# Gives human input to babyagi
if LLM_MODEL.startswith("human"):
if can_import("extensions.human_mode"):
from extensions.human_mode import user_input_await
# Load additional environment variables for enabled extensions
# TODO: This might override the following command line arguments as well:
# OBJECTIVE, INITIAL_TASK, LLM_MODEL, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE
if DOTENV_EXTENSIONS:
if can_import("extensions.dotenvext"):
from extensions.dotenvext import load_dotenv_extensions
load_dotenv_extensions(DOTENV_EXTENSIONS)
# TODO: There's still work to be done here to enable people to get
# defaults from dotenv extensions, but also provide command line
# arguments to override them
# Extensions support end
print("\033[95m\033[1m" + "\n*****CONFIGURATION*****\n" + "\033[0m\033[0m")
print(f"Name : {INSTANCE_NAME}")
print(f"Mode : {'alone' if COOPERATIVE_MODE in ['n', 'none'] else 'local' if COOPERATIVE_MODE in ['l', 'local'] else 'distributed' if COOPERATIVE_MODE in ['d', 'distributed'] else 'undefined'}")
print(f"LLM : {LLM_MODEL}")
# Check if we know what we are doing
assert OBJECTIVE, "\033[91m\033[1m" + "OBJECTIVE environment variable is missing from .env" + "\033[0m\033[0m"
assert INITIAL_TASK, "\033[91m\033[1m" + "INITIAL_TASK environment variable is missing from .env" + "\033[0m\033[0m"
LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "models/llama-13B/ggml-model.bin")
if LLM_MODEL.startswith("llama"):
if can_import("llama_cpp"):
from llama_cpp import Llama
print(f"LLAMA : {LLAMA_MODEL_PATH}" + "\n")
assert os.path.exists(LLAMA_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
CTX_MAX = 1024
LLAMA_THREADS_NUM = int(os.getenv("LLAMA_THREADS_NUM", 8))
print('Initialize model for evaluation')
llm = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
use_mlock=False,
)
print('\nInitialize model for embedding')
llm_embed = Llama(
model_path=LLAMA_MODEL_PATH,
n_ctx=CTX_MAX,
n_threads=LLAMA_THREADS_NUM,
n_batch=512,
embedding=True,
use_mlock=False,
)
print(
"\033[91m\033[1m"
+ "\n*****USING LLAMA.CPP. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
else:
print(
"\033[91m\033[1m"
+ "\nLlama LLM requires package llama-cpp. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
CHATGLM_API = os.getenv("CHATGLM_API", None)
CHATGLM_MODEL_PATH = os.getenv("CHATGLM_MODEL_PATH", "../chatglm-6b")
if LLM_MODEL.startswith("chatglm-6b"):
try:
CTX_MAX = 1024
if CHATGLM_API is None:
from transformers import AutoTokenizer, AutoModel
print(f"ChatGLM : {CHATGLM_MODEL_PATH}" + "\n")
assert os.path.exists(CHATGLM_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
print('Initialize model for evaluation')
tokenizer = AutoTokenizer.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True)
model =AutoModel.from_pretrained(f"{CHATGLM_MODEL_PATH}", revision="v1.1.0", trust_remote_code=True).quantize(8).half().cuda()
#model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
llm = model.eval()
print('\nInitialize model for embedding')
from langchain.embeddings import HuggingFaceEmbeddings
llm_embed = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese')
print(
"\033[91m\033[1m"
+ "\n*****USING ChatGLM-6B. POTENTIALLY SLOW.*****"
+ "\033[0m\033[0m"
)
except:
print(
"\033[91m\033[1m"
+ "\nChatGLM-6B is not properly installed. Falling back to GPT-3.5-turbo."
+ "\033[0m\033[0m"
)
LLM_MODEL = "gpt-3.5-turbo"
if LLM_MODEL.startswith("gpt-4"):
print(
"\033[91m\033[1m"
+ "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
+ "\033[0m\033[0m"
)
if LLM_MODEL.startswith("human"):
print(
"\033[91m\033[1m"
+ "\n*****USING HUMAN INPUT*****"
+ "\033[0m\033[0m"
)
print("\033[94m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m")
print(f"{OBJECTIVE}")
if not JOIN_EXISTING_OBJECTIVE:
print("\033[93m\033[1m" + "\nInitial task:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
else:
print("\033[93m\033[1m" + f"\nJoining to help the objective" + "\033[0m\033[0m")
# Configure OpenAI
openai.api_key = OPENAI_API_KEY
# Llama embedding function
class LlamaEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = []
for t in texts:
e = llm_embed.embed(t)
embeddings.append(e)
return embeddings
# ChatgGLM-6b embedding function
class ChatgGLMEmbeddingFunction(EmbeddingFunction):
def __init__(self):
return
def __call__(self, texts: Documents) -> Embeddings:
embeddings = llm_embed.embed_documents(texts)
return embeddings
# Results storage using local ChromaDB
class DefaultResultsStorage:
def __init__(self):
logging.getLogger('chromadb').setLevel(logging.ERROR)
# Create Chroma collection
chroma_persist_dir = "chroma"
chroma_client = chromadb.Client(
settings=chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=chroma_persist_dir,
)
)
metric = "cosine"
if LLM_MODEL.startswith("llama") :
embedding_function = LlamaEmbeddingFunction()
elif LLM_MODEL.startswith("chatglm-6b"):
embedding_function = ChatgGLMEmbeddingFunction()
else:
embedding_function = OpenAIEmbeddingFunction(api_key=OPENAI_API_KEY)
self.collection = chroma_client.get_or_create_collection(
name=RESULTS_STORE_NAME,
metadata={"hnsw:space": metric},
embedding_function=embedding_function,
)
def add(self, task: Dict, result: str, result_id: str):
# Break the function if LLM_MODEL starts with "human" (case-insensitive)
if LLM_MODEL.startswith("human"):
return
# Continue with the rest of the function
embeddings = llm_embed.embed(result) if LLM_MODEL.startswith("llama") else None
if (
len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
): # Check if the result already exists
self.collection.update(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
else:
self.collection.add(
ids=result_id,
embeddings=embeddings,
documents=result,
metadatas={"task": task["task_name"], "result": result},
)
def query(self, query: str, top_results_num: int) -> List[dict]:
count: int = self.collection.count()
if count == 0:
return []
results = self.collection.query(
query_texts=query,
n_results=min(top_results_num, count),
include=["metadatas"]
)
return [item["task"] for item in results["metadatas"][0]]
# Initialize results storage
def try_weaviate():
WEAVIATE_URL = os.getenv("WEAVIATE_URL", "")
WEAVIATE_USE_EMBEDDED = os.getenv("WEAVIATE_USE_EMBEDDED", "False").lower() == "true"
if (WEAVIATE_URL or WEAVIATE_USE_EMBEDDED) and can_import("extensions.weaviate_storage"):
WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY", "")
from extensions.weaviate_storage import WeaviateResultsStorage
print("\nUsing results storage: " + "\033[93m\033[1m" + "Weaviate" + "\033[0m\033[0m")
return WeaviateResultsStorage(OPENAI_API_KEY, WEAVIATE_URL, WEAVIATE_API_KEY, WEAVIATE_USE_EMBEDDED, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
return None
def try_pinecone():
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
if PINECONE_API_KEY and can_import("extensions.pinecone_storage"):
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "")
assert (
PINECONE_ENVIRONMENT
), "\033[91m\033[1m" + "PINECONE_ENVIRONMENT environment variable is missing from .env" + "\033[0m\033[0m"
from extensions.pinecone_storage import PineconeResultsStorage
print("\nUsing results storage: " + "\033[93m\033[1m" + "Pinecone" + "\033[0m\033[0m")
return PineconeResultsStorage(OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_ENVIRONMENT, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
return None
def use_chroma():
print("\nUsing results storage: " + "\033[93m\033[1m" + "Chroma (Default)" + "\033[0m\033[0m")
return DefaultResultsStorage()
results_storage = try_weaviate() or try_pinecone() or use_chroma()
# Task storage supporting only a single instance of BabyAGI
class SingleTaskListStorage:
def __init__(self):
self.tasks = deque([])
self.task_id_counter = 0
def append(self, task: Dict):
self.tasks.append(task)
def replace(self, tasks: List[Dict]):
self.tasks = deque(tasks)
def popleft(self):
return self.tasks.popleft()
def is_empty(self):
return False if self.tasks else True
def next_task_id(self):
self.task_id_counter += 1
return self.task_id_counter
def get_task_names(self):
return [t["task_name"] for t in self.tasks]
# Initialize tasks storage
tasks_storage = SingleTaskListStorage()
if COOPERATIVE_MODE in ['l', 'local']:
if can_import("extensions.ray_tasks"):
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from extensions.ray_tasks import CooperativeTaskListStorage
tasks_storage = CooperativeTaskListStorage(OBJECTIVE)
print("\nReplacing tasks storage: " + "\033[93m\033[1m" + "Ray" + "\033[0m\033[0m")
elif COOPERATIVE_MODE in ['d', 'distributed']:
pass
def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
"""Limits the string to a number of tokens (estimated)."""
try:
encoding = tiktoken.encoding_for_model(model)
except:
encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
encoded = encoding.encode(string)
return encoding.decode(encoded[:limit])
def openai_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = OPENAI_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
if model.lower().startswith("llama"):
result = llm(prompt[:CTX_MAX],
stop=["### Human"],
echo=False,
temperature=0.2,
top_k=40,
top_p=0.95,
repeat_penalty=1.05,
max_tokens=200)
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result['choices'][0]['text'].strip()
elif model.lower().startswith("chatglm"):
if CHATGLM_API is not None:
import requests
import json
headers = {
"Content-Type": "application/json",
}
data = {
"prompt": prompt[:CTX_MAX],
"history": []
}
result = requests.post(CHATGLM_API, headers=headers, data=json.dumps(data))
return result.json()['response'].strip()
else:
result, history = llm.chat(tokenizer, prompt[:CTX_MAX], history=[])
# print('\n*****RESULT JSON DUMP*****\n')
# print(json.dumps(result))
# print('\n')
return result.strip()
elif model.lower().startswith("human"):
return user_input_await(prompt)
elif not model.lower().startswith("gpt-"):
# Use completion API
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return response.choices[0].text.strip()
else:
# Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
# TODO: different limits for different models.
trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
# Use chat completion API
messages = [{"role": "system", "content": trimmed_prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
except openai.error.RateLimitError:
print(
" *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.Timeout:
print(
" *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIError:
print(
" *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIConnectionError:
print(
" *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.InvalidRequestError:
print(
" *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.ServiceUnavailableError:
print(
" *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
else:
break
def task_creation_agent(
objective: str, result: Dict, task_description: str, task_list: List[str]
):
prompt = f"""
You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
The last completed task has the result: \n{result["data"]}
This result was based on this task description: {task_description}.\n"""
if task_list:
prompt += f"These are incomplete tasks: {', '.join(task_list)}\n"
prompt += "Based on the result, return a list of tasks to be completed in order to meet the objective. "
if task_list:
prompt += "These new tasks must not overlap with incomplete tasks. "
prompt += """
Return one task per line in your response. The result must be a numbered list in the format:
#. First task
#. Second task
The number of each entry must be followed by a period. If your list is empty, write "There are no tasks to add at this time."
Unless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."""
print(f'\n*****TASK CREATION AGENT PROMPT****\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n****TASK CREATION AGENT RESPONSE****\n{response}\n')
new_tasks = response.split('\n')
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip() and task_id.isnumeric():
new_tasks_list.append(task_name)
# print('New task created: ' + task_name)
out = [{"task_name": task_name} for task_name in new_tasks_list]
return out
def prioritization_agent():
task_names = tasks_storage.get_task_names()
bullet_string = '\n'
prompt = f"""
You are tasked with prioritizing the following tasks: {bullet_string + bullet_string.join(task_names)}
Consider the ultimate objective of your team: {OBJECTIVE}.
Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
Do not remove any tasks. Return the ranked tasks as a numbered list in the format:
#. First task
#. Second task
The entries must be consecutively numbered, starting with 1. The number of each entry must be followed by a period.
Do not include any headers before your ranked list or follow your list with any other output."""
print(f'\n****TASK PRIORITIZATION AGENT PROMPT****\n{prompt}\n')
response = openai_call(prompt, max_tokens=2000)
print(f'\n****TASK PRIORITIZATION AGENT RESPONSE****\n{response}\n')
if not response:
print('Received empty response from priotritization agent. Keeping task list unchanged.')
return
new_tasks = response.split("\n") if "\n" in response else [response]
new_tasks_list = []
for task_string in new_tasks:
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
if task_name.strip():
new_tasks_list.append({"task_id": task_id, "task_name": task_name})
return new_tasks_list
# Execute a task based on the objective and five previous tasks
def execution_agent(objective: str, task: str) -> str:
"""
Executes a task based on the given objective and previous context.
Args:
objective (str): The objective or goal for the AI to perform the task.
task (str): The task to be executed by the AI.
Returns:
str: The response generated by the AI for the given task.
"""
context = context_agent(query=objective, top_results_num=5)
# print("\n****RELEVANT CONTEXT****\n")
# print(context)
# print('')
prompt = f'Perform one task based on the following objective: {objective}.\n'
if context:
prompt += 'Take into account these previously completed tasks:' + '\n'.join(context)
prompt += f'\nYour task: {task}\nResponse:'
return openai_call(prompt, max_tokens=2000)
# Get the top n completed tasks for the objective
def context_agent(query: str, top_results_num: int):
"""
Retrieves context for a given query from an index of tasks.
Args:
query (str): The query or objective for retrieving context.
top_results_num (int): The number of top results to retrieve.
Returns:
list: A list of tasks as context for the given query, sorted by relevance.
"""
results = results_storage.query(query=query, top_results_num=top_results_num)
# print("****RESULTS****")
# print(results)
return results
# Add the initial task if starting new objective
if not JOIN_EXISTING_OBJECTIVE:
initial_task = {
"task_id": tasks_storage.next_task_id(),
"task_name": INITIAL_TASK
}
tasks_storage.append(initial_task)
def main():
loop = True
while loop:
# As long as there are tasks in the storage...
if not tasks_storage.is_empty():
# Print the task list
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in tasks_storage.get_task_names():
print(" • " + str(t))
# Step 1: Pull the first incomplete task
task = tasks_storage.popleft()
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_name"]))
# Send to execution function to complete the task based on the context
result = execution_agent(OBJECTIVE, str(task["task_name"]))
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
# Step 2: Enrich result and store in the results storage
# This is where you should enrich the result if needed
enriched_result = {
"data": result
}
# extract the actual result from the dictionary
# since we don't do enrichment currently
# vector = enriched_result["data"]
result_id = f"result_{task['task_id']}"
results_storage.add(task, result, result_id)
# Step 3: Create new tasks and re-prioritize task list
# only the main instance in cooperative mode does that
new_tasks = task_creation_agent(
OBJECTIVE,
enriched_result,
task["task_name"],
tasks_storage.get_task_names(),
)
print('Adding new tasks to task_storage')
for new_task in new_tasks:
new_task.update({"task_id": tasks_storage.next_task_id()})
print(str(new_task))
tasks_storage.append(new_task)
if not JOIN_EXISTING_OBJECTIVE:
prioritized_tasks = prioritization_agent()
if prioritized_tasks:
tasks_storage.replace(prioritized_tasks)
# Sleep a bit before checking the task list again
time.sleep(5)
else:
print('Done.')
loop = False
if __name__ == "__main__":
main() | [
"\n",
"Take into account these previously completed tasks:",
"\nYour task: PLACEHOLDER\nResponse:",
"Perform one task based on the following objective: PLACEHOLDER.\n",
"Based on the result, return a list of tasks to be completed in order to meet the objective. ",
"\nYou are to use the result from an execution agent to create new tasks with the following objective: PLACEHOLDER.\nThe last completed task has the result: \nPLACEHOLDER\nThis result was based on this task description: PLACEHOLDER.\n",
", ",
"These new tasks must not overlap with incomplete tasks. ",
"\nReturn one task per line in your response. The result must be a numbered list in the format:\n\n#. First task\n#. Second task\n\nThe number of each entry must be followed by a period. If your list is empty, write \"There are no tasks to add at this time.\"\nUnless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."
] |
2024-01-10 | vishnubob/autobooth | src~dialog.py | import random
from openai import OpenAI
client = OpenAI()
from . prompts.models import *
from . prompts import get_prompt, list_prompts
def get_random_prompt():
prompts = list_prompts()
prompt_name = random.choice(prompts)
print(f"Running {prompt_name} prompt.")
return get_prompt(prompt_name)
class PhotoboothDialog:
def __init__(self, prompt=None):
if prompt is None:
prompt = get_random_prompt()
self.messages = [{"role": "system", "content": prompt}]
def parse_completion(self, completion):
from pprint import pprint
response = completion.choices[0].message
pprint(response)
self.messages.append(response)
return AssistantMessage.parse_raw(response.content)
def generate_response(self):
#model="gpt-3.5-turbo-0613",
#model="gpt-4-0613"
model = "gpt-4-1106-preview"
completion = client.chat.completions.create(
model=model,
temperature=1.0,
messages=self.messages
)
return completion
def get_response(self, people_count=None, message=None):
user_message = UserMessage(people_count=people_count, message=message)
self.messages.append({"role": "user", "content": user_message.json()})
completion = self.generate_response()
return self.parse_completion(completion)
| [] |
2024-01-10 | ledwards/gpt-swccg | scripts~upload_files.py | import os
from dotenv import load_dotenv
import openai
dirname = os.path.dirname(__file__)
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
file = os.path.join(dirname, '../data/search.jsonl')
openai.File.create(file=open(file), purpose='search')
file = os.path.join(dirname, '../data/answers.jsonl')
openai.File.create(file=open(file), purpose='answers')
file = os.path.join(dirname, '../data/fine_tune.jsonl')
openai.File.create(file=open(file), purpose='fine-tune')
file = os.path.join(dirname, '../data/fine_tune_validation.jsonl')
openai.File.create(file=open(file), purpose='fine-tune')
| [] |
2024-01-10 | ledwards/gpt-swccg | scripts~run_basic_model.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
remote_files = openai.File.list()["data"]
training_files = filter(lambda f: "answers.jsonl" in f["filename"], remote_files)
latest_file = max(training_files, key=lambda x: x["created_at"])
questions = [
"What destiny is 'Luke Skywalker, Jedi Knight'?",
"What planet is Luke Skywalker from?",
"Which Dark Jedi Master has destiny 6?",
"Which Dark Jedi is power 4 and destiny 6?",
"How many vehicles have a maintenace icon?",
"Which starship has a maintenace icon?",
"What class of Star Destroyer is Conquest?",
"Is Grand Moff Tarkin a leader?",
"Is Grand Moff Tarkin a smuggler?",
]
for question in questions:
answer = openai.Answer.create(
search_model="ada",
model="curie",
question=question,
file=latest_file["id"],
examples_context="Captain Jean-Luc Picard is a Light Side character card. Captain Jean-Luc Picard is a Federation human. Captain Jean-Luc Picard has a power of 5. Will Riker is a humam. Will Riker has a power of 6. Data is an android. Data has a power of 10.",
examples=[
["What Power is Jean-Luc Picard?", "Captain Jean Luc Picard is Power 5"],
["Which side of the Force is Picard?", "Picard is a Light Side card."],
["What race is Captain Jean-Luc Picard?", "Captain Jean-Luc Picard is human."],
["Is Jean-Luc Picard a Federation human?", "Yes"],
["Is Jean-Luc Picard a Dominion Changeling?", "No"],
["Which human has the highest power?", "Captain Jean-Luc Picard"],
["Which character has power 5?", "Captain Jean-Luc Picard"],
["Which card has the highest power?", "Data"],
["Which Federation character has the highest power?", "Data"]
],
max_rerank=50,
max_tokens=20,
stop=["\n", "<|endoftext|>"]
)
print(question)
print(f'> {answer["answers"][0]}') | [] |
2024-01-10 | ledwards/gpt-swccg | scripts~tune_model.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Creates a fine-tuned model for search
remote_files = openai.File.list()["data"]
fine_tuning_files = filter(lambda f: "fine_tune.jsonl" in f["filename"], remote_files)
validation_files = filter(lambda f: "fine_tune_validation.jsonl" in f["filename"], remote_files)
latest_fine_tuning_file = max(fine_tuning_files, key=lambda x: x["created_at"])
latest_validation_file = max(validation_files, key=lambda x: x["created_at"])
openai.FineTune.create(
training_file=latest_fine_tuning_file["id"],
validation_file=latest_validation_file["id"],
model="ada",
n_epochs=4,
batch_size=4,
learning_rate_multiplier=0.1,
prompt_loss_weight=0.1
)
| [] |
2024-01-10 | ledwards/gpt-swccg | scripts~list_resources.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
remote_files = openai.File.list()["data"]
fine_tune_training_files = filter(lambda f: "fine_tune.jsonl" in f["filename"], remote_files)
latest_fine_tune_training_file = max(fine_tune_training_files, key=lambda x: x["created_at"])
fine_tune_validation_files = filter(lambda f: "fine_tune.jsonl" in f["filename"], remote_files)
latest_fine_tune_validation_file = max(fine_tune_validation_files, key=lambda x: x["created_at"])
fine_tunes = openai.FineTune.list()["data"]
latest_fine_tune_model = max(fine_tunes, key=lambda x: x["created_at"])
print(f"Latest fine-tuned model: {latest_fine_tune_model['id']}")
print(f"Latest fine-tuned model created at: {latest_fine_tune_model['created_at']}")
print(f"Latest fine-tune training file id: {latest_fine_tune_training_file['id']}")
print(f"Latest fine-tune training file created at: {latest_fine_tune_training_file['created_at']}")
print(f"Latest fine-tune validation file id: {latest_fine_tune_validation_file['id']}")
print(f"Latest fine-tune validation file created at: {latest_fine_tune_validation_file['created_at']}") | [] |
2024-01-10 | ledwards/gpt-swccg | scripts~run_fine_tuned_model.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
################################################################################
# NOTE: This doesn't work, as Answer API doesn't support fine-tuned models yet #
################################################################################
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
remote_files = openai.File.list()["data"]
training_files = filter(lambda f: "training.jsonl" in f["filename"], remote_files)
latest_file_id = max(training_files, key=lambda x: x["created_at"])["id"]
fine_tunes = openai.FineTune.list()["data"]
latest_fine_tuned_model_id = max(fine_tunes, key=lambda x: x["created_at"])["id"]
questions = [
"What destiny is Wedge Antilles?",
"What card cancels Cloud City Celebration?",
"What gender is Toryn Farr?",
"What race is Bail Organa?",
"What planet is Mon Mothma from?",
"Which Effect downloads Luke's Lightsaber?",
"Which Dark Jedi Master uploads Force Lightning?",
"Which Objective deploys Naboo: Swamp?",
"How many lightsabers can General Greivous use?",
"Which Dark Jedi Master is destiny 6?",
"How many Light Side Jabba's Palace sites are there?"
]
for question in questions:
answer = openai.Answer.create(
search_model="ada",
model=latest_fine_tuned_model_id,
question=question,
examples_context="Captain Jean-Luc Picard is a Light Side character card. Captain Jean-Luc Picard has a power of 5.",
examples=[["What Power is Jean-Luc Picard?", "Captain Jean Luc Picard is Power 5"]],
file=latest_file_id,
max_rerank=50,
max_tokens=20,
stop=["\n", "<|endoftext|>"]
)
print(question)
print(f'> {answer["answers"][0]}')
| [] |
2024-01-10 | ledwards/gpt-swccg | scripts~run_hybrid_model.py | import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.organization = "org-D2FBgBhwLFkKAOsgtSp86b4i"
openai.api_key = os.getenv("OPENAI_API_KEY")
remote_files = openai.File.list()["data"]
training_files = filter(lambda f: "training.jsonl" in f["filename"], remote_files)
latest_file_id = max(training_files, key=lambda x: x["created_at"])["id"]
fine_tunes = openai.FineTune.list()["data"]
latest_fine_tune_model = max(fine_tunes, key=lambda x: x["created_at"])["fine_tuned_model"]
def create_context(question, max_len=1800, search_model='ada', max_rerank=10):
results = openai.Engine(search_model).search(
search_model=search_model,
query=question,
max_rerank=max_rerank,
file=latest_file_id,
return_metadata=True
)
returns = []
cur_len = 0
for result in results['data']:
cur_len += len(result['metadata']) + 4
if cur_len > max_len:
break
returns.append(result['text'])
return "\n\n###\n\n".join(returns)
def answer_question(question, fine_tuned_qa_model, max_len=1800, search_model='ada', max_rerank=10, debug=False):
context = create_context(question, max_len=max_len, search_model=search_model, max_rerank=max_rerank)
if debug:
print("Context:\n" + context)
print("\n\n")
try:
response = openai.Completion.create(
model=fine_tuned_qa_model,
prompt=f"Answer the question based on the context below\n\nText: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
except Exception as e:
print (f'ERROR: {e}')
print(f'Latest fine-tune model: {latest_fine_tune_model}')
return ""
question = "Who called Luke Skywalker 'Wormie'?"
print(question)
answer = answer_question(question, latest_fine_tune_model)
print(answer) | [
"Answer the question based on the context below\n\nText: PLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | BenBeard115/internet-archiver | api~chat_gpt_utils.py | """Script for Open AI functions."""
from dotenv import load_dotenv
from openai import OpenAI
GPT_3_MODEL = 'gpt-3.5-turbo-1106'
GPT_4_MODEL = 'gpt-4-1106-preview'
load_dotenv()
def read_html_file(file_path: str) -> str:
"""Reads in HTML file."""
with open(file_path, 'r', encoding='utf-8') as f:
html_content = f.read()
return html_content
def generate_summary(html_content, gpt_model: str = GPT_3_MODEL):
"""Generates summary of HTML content"""
max_html_tokens = 10000 if gpt_model == GPT_3_MODEL else 50000
html_content = html_content[:max_html_tokens]
client = OpenAI()
prompt = """You are an elite web content curator, renowned for crafting compelling and succinct summaries of web pages. Your expertise lies in distilling the essence of a webpage's content and function, with a primary focus on conveying what the page is about.
Your task is to create a summary of the HTML document you receive, capturing the essence of the webpage's content in a way that is informative and engaging for users of our internet archiver website. 🌐✨
Ensure your summary is both captivating and concise, as it will be stored in our database for users to access. Kickstart the description by highlighting the core theme or purpose of the page, enticing users to explore further. Feel free to incorporate emojis to add a touch of vibrancy.
Your mission is to make each summary an invitation, sparking curiosity and encouraging users to delve into the fascinating world captured within each archived webpage. 📚💻
"""
completion = client.chat.completions.create(
model=gpt_model,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": f"Please summarise this webpage as instructed: {html_content}."}
]
)
return completion.choices[0].message.content
if __name__ == "__main__":
filenames = ['pete_bradshaw', 'rains']
for filename in filenames:
html_file = f'static/{filename}.html'
html_content = read_html_file(html_file)
print(filename)
print(generate_summary(html_content))
print()
| [
"You are an elite web content curator, renowned for crafting compelling and succinct summaries of web pages. Your expertise lies in distilling the essence of a webpage's content and function, with a primary focus on conveying what the page is about. \n\nYour task is to create a summary of the HTML document you receive, capturing the essence of the webpage's content in a way that is informative and engaging for users of our internet archiver website. 🌐✨ \n\nEnsure your summary is both captivating and concise, as it will be stored in our database for users to access. Kickstart the description by highlighting the core theme or purpose of the page, enticing users to explore further. Feel free to incorporate emojis to add a touch of vibrancy.\n\nYour mission is to make each summary an invitation, sparking curiosity and encouraging users to delve into the fascinating world captured within each archived webpage. 📚💻\n",
"Please summarise this webpage as instructed: PLACEHOLDER."
] |
2024-01-10 | consideRatio/Pyleoclim_util | pyleoclim~core~series.py | """
The Series class describes the most basic objects in Pyleoclim. A Series is a simple `dictionary <https://docs.python.org/3/tutorial/datastructures.html#dictionaries>`_ that contains 3 things:
- a series of real-valued numbers;
- a time axis at which those values were measured/simulated ;
- optionally, some metadata about both axes, like units, labels and the like.
How to create and manipulate such objects is described in a short example below, while `this notebook <https://nbviewer.jupyter.org/github/LinkedEarth/Pyleoclim_util/blob/master/example_notebooks/pyleoclim_ui_tutorial.ipynb>`_ demonstrates how to apply various Pyleoclim methods to Series objects.
"""
from ..utils import tsutils, plotting, tsmodel, tsbase
from ..utils import wavelet as waveutils
from ..utils import spectral as specutils
from ..utils import correlation as corrutils
from ..utils import causality as causalutils
from ..utils import decomposition
from ..utils import filter as filterutils
from ..core.psds import PSD
from ..core.ssares import SsaRes
from ..core.multipleseries import MultipleSeries
from ..core.scalograms import Scalogram
from ..core.coherence import Coherence
from ..core.corr import Corr
from ..core.surrogateseries import SurrogateSeries
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl # could also from matplotlib.colors import ColorbarBase
import numpy as np
import pandas as pd
from tabulate import tabulate
from collections import namedtuple
from copy import deepcopy
import matplotlib.colors as mcolors
import matplotlib.colorbar as mcb
import random
from matplotlib import gridspec
import warnings
import collections
def dict2namedtuple(d):
''' Convert a dictionary to a namedtuple
'''
tupletype = namedtuple('tupletype', sorted(d))
return tupletype(**d)
class Series:
'''The Series class describes the most basic objects in Pyleoclim.
A Series is a simple `dictionary <https://docs.python.org/3/tutorial/datastructures.html#dictionaries>`_ that contains 3 things:
* a series of real-valued numbers;
* a time axis at which those values were measured/simulated ;
* optionally, some metadata about both axes, like units, labels and the like.
How to create and manipulate such objects is described in a short example below, while `this notebook <https://nbviewer.jupyter.org/github/LinkedEarth/Pyleoclim_util/blob/master/example_notebooks/pyleoclim_ui_tutorial.ipynb>`_ demonstrates how to apply various Pyleoclim methods to Series objects.
Parameters
----------
time : list or numpy.array
independent variable (t)
value : list of numpy.array
values of the dependent variable (y)
time_unit : string
Units for the time vector (e.g., 'years').
Default is 'years'
time_name : string
Name of the time vector (e.g., 'Time','Age').
Default is None. This is used to label the time axis on plots
value_name : string
Name of the value vector (e.g., 'temperature')
Default is None
value_unit : string
Units for the value vector (e.g., 'deg C')
Default is None
label : string
Name of the time series (e.g., 'Nino 3.4')
Default is None
clean_ts : boolean flag
set to True to remove the NaNs and make time axis strictly prograde with duplicated timestamps reduced by averaging the values
Default is True
log : dict
If keep_log is set to True, then a log of the transformations made to the timeseries will be kept.
verbose : bool
If True, will print warning messages if there is any
Examples
--------
In this example, we import the Southern Oscillation Index (SOI) into a pandas dataframe and create a Series object.
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data=pd.read_csv(
'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',
skiprows=0, header=1
)
time=data.iloc[:,1]
value=data.iloc[:,2]
ts=pyleo.Series(
time=time, value=value,
time_name='Year (CE)', value_name='SOI', label='Southern Oscillation Index'
)
ts
ts.__dict__.keys()
For a quick look at the values, one may use the `print()` method. We do so below for a short slice of the data so as not to overwhelm the display:
.. ipython:: python
:okwarning:
:okexcept:
print(ts.slice([1982,1983]))
'''
def __init__(self, time, value, time_name=None, time_unit=None, value_name=None,
value_unit=None, label=None, mean=None, clean_ts=True, log=None, verbose=False):
# TODO: remove mean argument once it's safe to do so
if log is None:
self.log = ()
nlog = -1
else:
self.log = log
nlog = len(log)
if clean_ts == True:
value, time = tsbase.clean_ts(np.array(value), np.array(time), verbose=verbose)
self.log = self.log + ({nlog+1: 'clean_ts', 'applied': clean_ts, 'verbose': verbose},)
self.time = np.array(time)
self.value = np.array(value)
self.time_name = time_name
self.time_unit = time_unit
self.value_name = value_name
self.value_unit = value_unit
self.label = label
#self.clean_ts=clean_ts
#self.verbose=verbose
if mean is None:
self.mean=np.mean(self.value)
else:
self.mean = mean
def convert_time_unit(self, time_unit='years', keep_log=False):
''' Convert the time unit of the Series object
Parameters
----------
time_unit : str
the target time unit, possible input:
{
'year', 'years', 'yr', 'yrs',
'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP',
'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'ka',
'my BP', 'myr BP', 'myrs BP', 'ma BP', 'ma',
}
keep_log : Boolean
if True, adds this step and its parameter to the series log.
Examples
--------
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv(
'https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',
skiprows=0, header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time, value=value, time_unit='years')
new_ts = ts.convert_time_unit(time_unit='yrs BP')
print('Original timeseries:')
print('time unit:', ts.time_unit)
print('time:', ts.time[:10])
print()
print('Converted timeseries:')
print('time unit:', new_ts.time_unit)
print('time:', new_ts.time[:10])
'''
new_ts = self.copy()
if time_unit is not None:
tu = time_unit.lower()
if tu.find('ky')>=0 or tu.find('ka')>=0:
time_unit_label = 'ky BP'
elif tu.find('my')>=0 or tu.find('ma')>=0:
time_unit_label = 'my BP'
elif tu.find('y bp')>=0 or tu.find('yr bp')>=0 or tu.find('yrs bp')>=0 or tu.find('year bp')>=0 or tu.find('years bp')>=0:
time_unit_label = 'yrs BP'
elif tu.find('yr')>=0 or tu.find('year')>=0 or tu.find('yrs')>=0 or tu.find('years')>=0:
time_unit_label = 'yrs'
else:
raise ValueError(f"Input time_unit={time_unit} is not supported. Supported input: 'year', 'years', 'yr', 'yrs', 'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP', 'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'my BP', 'myr BP', 'myrs BP', 'ma BP'.")
else:
return new_ts
def convert_to_years():
def prograde_time(time, time_datum, time_exponent):
new_time = (time_datum + time)*10**(time_exponent)
return new_time
def retrograde_time(time, time_datum, time_exponent):
new_time = (time_datum - time)*10**(time_exponent)
return new_time
convert_func = {
'prograde': prograde_time,
'retrograde': retrograde_time,
}
if self.time_unit is not None:
tu = self.time_unit.lower()
if tu.find('ky')>=0 or tu.find('ka')>=0:
time_dir = 'retrograde'
time_datum = 1950/1e3
time_exponent = 3
elif tu.find('my')>=0 or tu.find('ma')>=0:
time_dir = 'retrograde'
time_datum = 1950/1e6
time_exponent = 6
elif tu.find('y bp')>=0 or tu.find('yr bp')>=0 or tu.find('yrs bp')>=0 or tu.find('year bp')>=0 or tu.find('years bp')>=0:
time_dir ='retrograde'
time_datum = 1950
time_exponent = 0
elif tu.find('yr')>=0 or tu.find('year')>=0 or tu.find('yrs')>=0 or tu.find('years')>=0:
time_dir ='prograde'
time_datum = 0
time_exponent = 0
else:
raise ValueError(f"Current Series time_unit={self.time_unit} is not supported. Supported time units are: 'year', 'years', 'yr', 'yrs', 'y BP', 'yr BP', 'yrs BP', 'year BP', 'years BP', 'ky BP', 'kyr BP', 'kyrs BP', 'ka BP', 'my BP', 'myr BP', 'myrs BP', 'ma BP'.")
new_time = convert_func[time_dir](self.time, time_datum, time_exponent)
else:
new_time = None
return new_time
def convert_to_bp():
time_yrs = convert_to_years()
time_bp = 1950 - time_yrs
return time_bp
def convert_to_ka():
time_bp = convert_to_bp()
time_ka = time_bp / 1e3
return time_ka
def convert_to_ma():
time_bp = convert_to_bp()
time_ma = time_bp / 1e6
return time_ma
convert_to = {
'yrs': convert_to_years(),
'yrs BP': convert_to_bp(),
'ky BP': convert_to_ka(),
'my BP': convert_to_ma(),
}
new_time = convert_to[time_unit_label]
dt = np.diff(new_time)
if any(dt<=0):
new_value, new_time = tsbase.sort_ts(self.value, new_time)
else:
new_value = self.copy().value
new_ts.time = new_time
new_ts.value = new_value
new_ts.time_unit = time_unit
if keep_log == True:
new_ts.log += ({len(new_ts.log):'convert_time_unit', 'time_unit': time_unit},)
return new_ts
def make_labels(self):
'''
Initialization of plot labels based on Series metadata
Returns
-------
time_header : str
Label for the time axis
value_header : str
Label for the value axis
'''
if self.time_name is not None:
time_name_str = self.time_name
else:
time_name_str = 'time'
if self.value_name is not None:
value_name_str = self.value_name
else:
value_name_str = 'value'
if self.value_unit is not None:
value_header = f'{value_name_str} [{self.value_unit}]'
else:
value_header = f'{value_name_str}'
if self.time_unit is not None:
time_header = f'{time_name_str} [{self.time_unit}]'
else:
time_header = f'{time_name_str}'
return time_header, value_header
def __str__(self):
'''
Prints out the series in a table format and length of the series
Returns
-------
str
length of the timeseries.
'''
time_label, value_label = self.make_labels()
table = {
time_label: self.time,
value_label: self.value,
}
_ = print(tabulate(table, headers='keys'))
return f'Length: {np.size(self.time)}'
def stats(self):
""" Compute basic statistics from a Series
Computes the mean, median, min, max, standard deviation, and interquartile range of a numpy array y, ignoring NaNs.
Returns
-------
res : dictionary
Contains the mean, median, minimum value, maximum value, standard
deviation, and interquartile range for the Series.
Examples
--------
Compute basic statistics for the SOI series
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time=data.iloc[:,1]
value=data.iloc[:,2]
ts=pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
ts.stats()
"""
mean, median, min_, max_, std, IQR = tsutils.simple_stats(self.value)
res={'mean':mean,
'median':median,
'min':min_,
'max':max_,
'std':std,
'IQR': IQR}
return res
def flip(self, axis='value', keep_log = False):
'''
Flips the Series along one or both axes
Parameters
----------
axis : str, optional
The axis along which the Series will be flipped. The default is 'value'.
Other acceptable options are 'time' or 'both'.
TODO: enable time flipping after paleopandas is released
keep_log : Boolean
if True, adds this transformation to the series log.
Returns
-------
new : Series
The flipped series object
Examples
--------
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
tsf = ts.flip(keep_log=True)
@savefig ts_flipped.png
fig, ax = tsf.plot()
tsf.log
pyleo.closefig(fig)
'''
if self.log is not None:
methods = [self.log[idx][idx] for idx in range(len(self.log))]
if 'flip' in methods:
warnings.warn("this Series' log indicates that it has previously been flipped")
new = self.copy()
if axis == 'value':
new.value = - self.value
new.value_name = new.value_name + ' x (-1)'
else:
print('Flipping is only enabled along the value axis for now')
if keep_log == True:
new.log += ({len(new.log): 'flip', 'applied': True, 'axis': axis},)
return new
def plot(self, figsize=[10, 4],
marker=None, markersize=None, color=None,
linestyle=None, linewidth=None, xlim=None, ylim=None,
label=None, xlabel=None, ylabel=None, title=None, zorder=None,
legend=True, plot_kwargs=None, lgd_kwargs=None, alpha=None,
savefig_settings=None, ax=None, invert_xaxis=False, invert_yaxis=False):
''' Plot the timeseries
Parameters
----------
figsize : list
a list of two integers indicating the figure size
marker : str
e.g., 'o' for dots
See [matplotlib.markers](https://matplotlib.org/stable/api/markers_api.html) for details
markersize : float
the size of the marker
color : str, list
the color for the line plot
e.g., 'r' for red
See [matplotlib colors](https://matplotlib.org/stable/gallery/color/color_demo.html) for details
linestyle : str
e.g., '--' for dashed line
See [matplotlib.linestyles](https://matplotlib.org/stable/gallery/lines_bars_and_markers/linestyles.html) for details
linewidth : float
the width of the line
label : str
the label for the line
xlabel : str
the label for the x-axis
ylabel : str
the label for the y-axis
title : str
the title for the figure
zorder : int
The default drawing order for all lines on the plot
legend : {True, False}
plot legend or not
invert_xaxis : bool, optional
if True, the x-axis of the plot will be inverted
invert_yaxis : bool, optional
same for the y-axis
plot_kwargs : dict
the dictionary of keyword arguments for ax.plot()
See [matplotlib.pyplot.plot](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html) for details
lgd_kwargs : dict
the dictionary of keyword arguments for ax.legend()
See [matplotlib.pyplot.legend](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.legend.html) for details
alpha : float
Transparency setting
savefig_settings : dict
the dictionary of arguments for plt.savefig(); some notes below:
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
ax : matplotlib.axis, optional
the axis object from matplotlib
See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details.
Returns
-------
fig : matplotlib.figure
the figure object from matplotlib
See [matplotlib.pyplot.figure](https://matplotlib.org/stable/api/figure_api.html) for details.
ax : matplotlib.axis
the axis object from matplotlib
See [matplotlib.axes](https://matplotlib.org/stable/api/axes_api.html) for details.
Notes
-----
When `ax` is passed, the return will be `ax` only; otherwise, both `fig` and `ax` will be returned.
See also
--------
pyleoclim.utils.plotting.savefig : saving a figure in Pyleoclim
Examples
--------
Plot the SOI record
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
@savefig ts_plot.png
fig, ax = ts.plot()
pyleo.closefig(fig)
Change the line color
.. ipython:: python
:okwarning:
:okexcept:
@savefig ts_plot2.png
fig, ax = ts.plot(color='r')
pyleo.closefig(fig)
Save the figure. Two options available, only one is needed:
* Within the plotting command
* After the figure has been generated
.. ipython:: python
:okwarning:
:okexcept:
fig, ax = ts.plot(color='k', savefig_settings={'path': 'ts_plot3.png'}); pyleo.closefig(fig)
pyleo.savefig(fig,path='ts_plot3.png')
'''
# generate default axis labels
time_label, value_label = self.make_labels()
if xlabel is None:
xlabel = time_label
if ylabel is None:
ylabel = value_label
plot_kwargs = {} if plot_kwargs is None else plot_kwargs.copy()
if label is None:
label = self.label
if label is not None:
plot_kwargs.update({'label': label})
if marker is not None:
plot_kwargs.update({'marker': marker})
if markersize is not None:
plot_kwargs.update({'markersize': markersize})
if color is not None:
plot_kwargs.update({'color': color})
if linestyle is not None:
plot_kwargs.update({'linestyle': linestyle})
if linewidth is not None:
plot_kwargs.update({'linewidth': linewidth})
if alpha is not None:
plot_kwargs.update({'alpha': alpha})
if zorder is not None:
plot_kwargs.update({'zorder': zorder})
res = plotting.plot_xy(
self.time, self.value,
figsize=figsize, xlabel=xlabel, ylabel=ylabel,
title=title, savefig_settings=savefig_settings,
ax=ax, legend=legend, xlim=xlim, ylim=ylim,
plot_kwargs=plot_kwargs, lgd_kwargs=lgd_kwargs,
invert_xaxis=invert_xaxis, invert_yaxis=invert_yaxis
)
return res
def stripes(self, ref_period, LIM = 2.8, thickness=1.0, figsize=[8, 1], xlim=None,
top_label=None, bottom_label=None, label_color = 'gray', label_size = None,
xlabel=None, savefig_settings=None, ax=None, invert_xaxis=False,
show_xaxis=False, x_offset = 0.05):
'''Represents the Series as an Ed Hawkins "stripes" pattern
Credit: https://matplotlib.org/matplotblog/posts/warming-stripes/
Parameters
----------
ref_period : array-like (2-elements)
dates of the reference period, in the form "(first, last)"
thickness : float, optional
vertical thickness of the stripe . The default is 1.0
LIM : float
scaling factor for color saturation. default is 2.8
figsize : list
a list of two integers indicating the figure size (in inches)
xlim : list
time axis limits
top_label : str
the "title" label for the stripe
bottom_label : str
the "ylabel" explaining which variable is being plotted
invert_xaxis : bool, optional
if True, the x-axis of the plot will be inverted
x_offset : float
value controlling the horizontal offset between stripes and labels (default = 0.05)
show_xaxis : bool
flag indicating whether or not the x-axis should be shown (default = False)
savefig_settings : dict
the dictionary of arguments for plt.savefig(); some notes below:
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
ax : matplotlib.axis, optional
the axis object from matplotlib
See [matplotlib.axes](https://matplotlib.org/api/axes_api.html) for details.
Returns
-------
fig : matplotlib.figure
the figure object from matplotlib
See [matplotlib.pyplot.figure](https://matplotlib.org/stable/api/figure_api.html) for details.
ax : matplotlib.axis
the axis object from matplotlib
See [matplotlib.axes](https://matplotlib.org/stable/api/axes_api.html) for details.
Notes
-----
When `ax` is passed, the return will be `ax` only; otherwise, both `fig` and `ax` will be returned.
See also
--------
pyleoclim.utils.plotting.stripes : stripes representation of a timeseries
pyleoclim.utils.plotting.savefig : saving a figure in Pyleoclim
Examples
--------
Plot the HadCRUT5 Global Mean Surface Temperature
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
url = 'https://www.metoffice.gov.uk/hadobs/hadcrut5/data/current/analysis/diagnostics/HadCRUT.5.0.1.0.analysis.summary_series.global.annual.csv'
df = pd.read_csv(url)
time = df['Time']
gmst = df['Anomaly (deg C)']
ts = pyleo.Series(time=time,value=gmst, label = 'HadCRUT5', time_name='Year C.E', value_name='GMST')
@savefig hadCRUT5_stripes.png
fig, ax = ts.stripes(ref_period=(1971,2000))
pyleo.closefig(fig)
If you wanted to show the time axis:
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
url = 'https://www.metoffice.gov.uk/hadobs/hadcrut5/data/current/analysis/diagnostics/HadCRUT.5.0.1.0.analysis.summary_series.global.annual.csv'
df = pd.read_csv(url)
time = df['Time']
gmst = df['Anomaly (deg C)']
ts = pyleo.Series(time=time,value=gmst, label = 'HadCRUT5', time_name='Year C.E', value_name='GMST')
@savefig hadCRUT5_stripes2.png
fig, ax = ts.stripes(ref_period=(1971,2000), show_xaxis=True, figsize=[8, 1.2])
pyleo.closefig(fig)
Note that we had to increase the figure height to make space for the extra text.
'''
if top_label is None:
top_label = self.label
if bottom_label is None:
bottom_label = self.value_name
idx0 = (np.abs(self.time - ref_period[0])).argmin()
idx1 = (np.abs(self.time - ref_period[1])).argmin()
LIMs = self.value.std()*LIM
# Ed Hawkins says: Currently I use HadCRUT5 with a 1971-2000 baseline
# and a colour scaling of +/- 0.75K (which is probably similar to LIM).
# It should be relatively simple to duplicate the stripes exactly
res = plotting.stripes_xy(
x=self.time, y=self.value, ref_period=(idx0,idx1), LIM = LIMs, thickness = thickness,
top_label = top_label, bottom_label = bottom_label, label_color = label_color,
figsize=figsize, ax=ax, xlim=xlim, invert_xaxis=invert_xaxis, label_size=label_size,
savefig_settings=savefig_settings, show_xaxis=show_xaxis, x_offset = x_offset,
)
return res
def ssa(self, M=None, nMC=0, f=0.3, trunc = None, var_thresh=80):
''' Singular Spectrum Analysis
Nonparametric, orthogonal decomposition of timeseries into constituent oscillations.
This implementation uses the method of [1], with applications presented in [2].
Optionally (MC>0), the significance of eigenvalues is assessed by Monte-Carlo simulations of an AR(1) model fit to X, using [3].
The method expects regular spacing, but is tolerant to missing values, up to a fraction 0<f<1 (see [4]).
Parameters
----------
M : int, optional
window size. The default is None (10% of the length of the series).
MC : int, optional
Number of iteration in the Monte-Carlo process. The default is 0.
f : float, optional
maximum allowable fraction of missing values. The default is 0.3.
trunc : str
if present, truncates the expansion to a level K < M owing to one of 3 criteria:
(1) 'kaiser': variant of the Kaiser-Guttman rule, retaining eigenvalues larger than the median
(2) 'mcssa': Monte-Carlo SSA (use modes above the 95% threshold)
(3) 'var': first K modes that explain at least var_thresh % of the variance.
Default is None, which bypasses truncation (K = M)
var_thresh : float
variance threshold for reconstruction (only impactful if trunc is set to 'var')
Returns
-------
res : object of the SsaRes class containing:
eigvals : (M, ) array of eigenvalues
eigvecs : (M, M) Matrix of temporal eigenvectors (T-EOFs)
PC : (N - M + 1, M) array of principal components (T-PCs)
RCmat : (N, M) array of reconstructed components
RCseries : (N,) reconstructed series, with mean and variance restored
pctvar: (M, ) array of the fraction of variance (%) associated with each mode
eigvals_q : (M, 2) array contaitning the 5% and 95% quantiles of the Monte-Carlo eigenvalue spectrum [ if nMC >0 ]
References
----------
[1]_ Vautard, R., and M. Ghil (1989), Singular spectrum analysis in nonlinear
dynamics, with applications to paleoclimatic time series, Physica D, 35,
395–424.
[2]_ Ghil, M., R. M. Allen, M. D. Dettinger, K. Ide, D. Kondrashov, M. E. Mann,
A. Robertson, A. Saunders, Y. Tian, F. Varadi, and P. Yiou (2002),
Advanced spectral methods for climatic time series, Rev. Geophys., 40(1),
1003–1052, doi:10.1029/2000RG000092.
[3]_ Allen, M. R., and L. A. Smith (1996), Monte Carlo SSA: Detecting irregular
oscillations in the presence of coloured noise, J. Clim., 9, 3373–3404.
[4]_ Schoellhamer, D. H. (2001), Singular spectrum analysis for time series with
missing data, Geophysical Research Letters, 28(16), 3187–3190, doi:10.1029/2000GL012698.
See also
--------
pyleoclim.core.utils.decomposition.ssa : Singular Spectrum Analysis utility
pyleoclim.core.ssares.SsaRes.modeplot : plot SSA modes
pyleoclim.core.ssares.SsaRes.screeplot : plot SSA eigenvalue spectrum
Examples
--------
SSA with SOI
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time, value=value, time_name='Year C.E', value_name='SOI', label='SOI')
@savefig ts_plot4.png
fig, ax = ts.plot()
pyleo.closefig(fig)
nino_ssa = ts.ssa(M=60)
Let us now see how to make use of all these arrays. The first step is too inspect the eigenvalue spectrum ("scree plot") to identify remarkable modes. Let us restrict ourselves to the first 40, so we can see something:
.. ipython:: python
:okwarning:
:okexcept:
@savefig ts_eigen.png
fig, ax = nino_ssa.screeplot()
pyleo.closefig(fig)
This highlights a few common phenomena with SSA:
* the eigenvalues are in descending order
* their uncertainties are proportional to the eigenvalues themselves
* the eigenvalues tend to come in pairs : (1,2) (3,4), are all clustered within uncertainties . (5,6) looks like another doublet
* around i=15, the eigenvalues appear to reach a floor, and all subsequent eigenvalues explain a very small amount of variance.
So, summing the variance of the first 15 modes, we get:
.. ipython:: python
:okwarning:
:okexcept:
print(nino_ssa.pctvar[:14].sum())
That is a typical result for a (paleo)climate timeseries; a few modes do the vast majority of the work. That means we can focus our attention on these modes and capture most of the interesting behavior. To see this, let's use the reconstructed components (RCs), and sum the RC matrix over the first 15 columns:
.. ipython:: python
:okwarning:
:okexcept:
RCk = nino_ssa.RCmat[:,:14].sum(axis=1)
@savefig ssa_recon.png
fig, ax = ts.plot(title='SOI')
ax.plot(time,RCk,label='SSA reconstruction, 14 modes',color='orange')
ax.legend()
pyleo.closefig(fig)
Indeed, these first few modes capture the vast majority of the low-frequency behavior, including all the El Niño/La Niña events. What is left (the blue wiggles not captured in the orange curve) are high-frequency oscillations that might be considered "noise" from the standpoint of ENSO dynamics. This illustrates how SSA might be used for filtering a timeseries. One must be careful however:
* there was not much rhyme or reason for picking 14 modes. Why not 5, or 39? All we have seen so far is that they gather >95% of the variance, which is by no means a magic number.
* there is no guarantee that the first few modes will filter out high-frequency behavior, or at what frequency cutoff they will do so. If you need to cut out specific frequencies, you are better off doing it with a classical filter, like the butterworth filter implemented in Pyleoclim. However, in many instances the choice of a cutoff frequency is itself rather arbitrary. In such cases, SSA provides a principled alternative for generating a version of a timeseries that preserves features and excludes others (i.e, a filter).
* as with all orthgonal decompositions, summing over all RCs will recover the original signal within numerical precision.
Monte-Carlo SSA
Selecting meaningful modes in eigenproblems (e.g. EOF analysis) is more art than science. However, one technique stands out: Monte Carlo SSA, introduced by Allen & Smith, (1996) to identify SSA modes that rise above what one would expect from "red noise", specifically an AR(1) process). To run it, simply provide the parameter MC, ideally with a number of iterations sufficient to get decent statistics. Here let's use MC = 1000. The result will be stored in the eigval_q array, which has the same length as eigval, and its two columns contain the 5% and 95% quantiles of the ensemble of MC-SSA eigenvalues.
.. ipython:: python
:okwarning:
:okexcept:
nino_mcssa = ts.ssa(M = 60, nMC=1000)
Now let's look at the result:
.. ipython:: python
:okwarning:
:okexcept:
@savefig scree_mc.png
fig, ax = nino_mcssa.screeplot()
pyleo.closefig(fig)
print('Indices of modes retained: '+ str(nino_mcssa.mode_idx))
This suggests that modes 1-5 fall above the red noise benchmark. To inspect mode 1 (index 0), just type:
.. ipython:: python
:okwarning:
:okexcept:
@savefig ssa_mode0plot.png
fig, ax = nino_mcssa.modeplot(index=0)
pyleo.closefig(fig)
'''
res = decomposition.ssa(self.value, M=M, nMC=nMC, f=f, trunc = trunc, var_thresh=var_thresh)
resc = SsaRes(name=self.value_name, original=self.value, time = self.time, eigvals = res['eigvals'], eigvecs = res['eigvecs'],
pctvar = res['pctvar'], PC = res['PC'], RCmat = res['RCmat'],
RCseries=res['RCseries'], mode_idx=res['mode_idx'])
if nMC >= 0:
resc.eigvals_q=res['eigvals_q'] # assign eigenvalue quantiles if Monte-Carlo SSA was called
return resc
def is_evenly_spaced(self, tol=1e-3):
'''Check if the Series time axis is evenly-spaced, within tolerance
Parameters
----------
tol : float
tolerance. If time increments are all within tolerance, the series
is declared evenly-spaced. default = 1e-3
Returns
-------
res : bool
'''
res = tsbase.is_evenly_spaced(self.time, tol)
return res
def filter(self, cutoff_freq=None, cutoff_scale=None, method='butterworth', keep_log= False, **kwargs):
''' Filtering methods for Series objects using four possible methods:
- `Butterworth <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html>`_
- `Lanczos <http://scitools.org.uk/iris/docs/v1.2/examples/graphics/SOI_filtering.html>`_
- `Finite Impulse Response <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html>`_
- `Savitzky-Golay filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html>`_
By default, this method implements a lowpass filter, though it can easily
be turned into a bandpass or high-pass filter (see examples below).
Parameters
----------
method : str, {'savitzky-golay', 'butterworth', 'firwin', 'lanczos'}
the filtering method
- 'butterworth': a Butterworth filter (default = 3rd order)
- 'savitzky-golay': Savitzky-Golay filter
- 'firwin': finite impulse response filter design using the window method, with default window as Hamming
- 'lanczos': Lanczos zero-phase filter
cutoff_freq : float or list
The cutoff frequency only works with the Butterworth method.
If a float, it is interpreted as a low-frequency cutoff (lowpass).
If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass).
Note that only the Butterworth option (default) currently supports bandpass filtering.
cutoff_scale : float or list
cutoff_freq = 1 / cutoff_scale
The cutoff scale only works with the Butterworth method and when cutoff_freq is None.
If a float, it is interpreted as a low-frequency (high-scale) cutoff (lowpass).
If a list, it is interpreted as a frequency band (f1, f2), with f1 < f2 (bandpass).
keep_log : Boolean
if True, adds this step and its parameters to the series log.
kwargs : dict
a dictionary of the keyword arguments for the filtering method,
see `pyleoclim.utils.filter.savitzky_golay`, `pyleoclim.utils.filter.butterworth`, `pyleoclim.utils.filter.lanczos` and `pyleoclim.utils.filter.firwin` for the details
Returns
-------
new : Series
See also
--------
pyleoclim.utils.filter.butterworth : Butterworth method
pyleoclim.utils.filter.savitzky_golay : Savitzky-Golay method
pyleoclim.utils.filter.firwin : FIR filter design using the window method
pyleoclim.utils.filter.lanczos : lowpass filter via Lanczos resampling
Examples
--------
In the example below, we generate a signal as the sum of two signals with frequency 10 Hz and 20 Hz, respectively.
Then we apply a low-pass filter with a cutoff frequency at 15 Hz, and compare the output to the signal of 10 Hz.
After that, we apply a band-pass filter with the band 15-25 Hz, and compare the outcome to the signal of 20 Hz.
- Generating the test data
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import numpy as np
t = np.linspace(0, 1, 1000)
sig1 = np.sin(2*np.pi*10*t)
sig2 = np.sin(2*np.pi*20*t)
sig = sig1 + sig2
ts1 = pyleo.Series(time=t, value=sig1)
ts2 = pyleo.Series(time=t, value=sig2)
ts = pyleo.Series(time=t, value=sig)
@savefig ts_filter1.png
fig, ax = ts.plot(label='mix')
ts1.plot(ax=ax, label='10 Hz')
ts2.plot(ax=ax, label='20 Hz')
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3)
- Applying a low-pass filter
.. ipython:: python
:okwarning:
:okexcept:
fig, ax = ts.plot(label='mix')
ts.filter(cutoff_freq=15).plot(ax=ax, label='After 15 Hz low-pass filter')
@savefig ts_filter2.png
ts1.plot(ax=ax, label='10 Hz')
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3)
- Applying a band-pass filter
.. ipython:: python
:okwarning:
:okexcept:
fig, ax = ts.plot(label='mix')
ts.filter(cutoff_freq=[15, 25]).plot(ax=ax, label='After 15-25 Hz band-pass filter')
@savefig ts_filter3.png
ts2.plot(ax=ax, label='20 Hz')
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3)
Above is using the default Butterworth filtering. To use FIR filtering with a window like Hanning is also simple:
.. ipython:: python
:okwarning:
:okexcept:
fig, ax = ts.plot(label='mix')
ts.filter(cutoff_freq=[15, 25], method='firwin', window='hanning').plot(ax=ax, label='After 15-25 Hz band-pass filter')
@savefig ts_filter4.png
ts2.plot(ax=ax, label='20 Hz')
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3)
- Applying a high-pass filter
.. ipython:: python
:okwarning:
:okexcept:
fig, ax = ts.plot(label='mix')
ts_low = ts.filter(cutoff_freq=15)
ts_high = ts.copy()
ts_high.value = ts.value - ts_low.value # subtract low-pass filtered series from original one
@savefig ts_filter5.png
ts_high.plot(label='High-pass filter @ 15Hz',ax=ax)
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.1), ncol=3)
'''
if not self.is_evenly_spaced():
raise ValueError('This method assumes evenly-spaced timeseries, while the input is not. Use the ".interp()", ".bin()" or ".gkernel()" methods prior to ".filter()".')
new = self.copy()
mu = np.mean(self.value) # extract the mean
y = self.value - mu
fs = 1/np.mean(np.diff(self.time))
method_func = {
'savitzky-golay': filterutils.savitzky_golay,
'butterworth': filterutils.butterworth,
'firwin': filterutils.firwin,
'lanczos': filterutils.lanczos,
}
if method not in method_func.keys():
raise ValueError('Method value is not an appropriate method for filters')
args = {}
if method in ['butterworth', 'firwin', 'lanczos']:
if cutoff_freq is None:
if cutoff_scale is None:
raise ValueError('Please set the cutoff frequency or scale argument: "cutoff_freq" or "cutoff_scale".')
else:
if np.isscalar(cutoff_scale):
cutoff_freq = 1 / cutoff_scale
elif len(cutoff_scale) == 2 and method in ['butterworth', 'firwin']:
cutoff_scale = np.array(cutoff_scale)
cutoff_freq = np.sort(1 / cutoff_scale)
cutoff_freq = list(cutoff_freq)
elif len(cutoff_scale) > 1 and method == 'lanczos':
raise ValueError('Lanczos filter requires a scalar input as cutoff scale/frequency')
else:
raise ValueError('Wrong cutoff_scale; should be either one float value (lowpass) or a list two float values (bandpass).')
# assign optional arguments
args['butterworth'] = {'fc': cutoff_freq, 'fs': fs}
args['firwin'] = {'fc': cutoff_freq, 'fs': fs}
args['lanczos'] = {'fc': cutoff_freq, 'fs': fs}
else: # for Savitzky-Golay only
if cutoff_scale is None and cutoff_freq is None:
raise ValueError('No cutoff_scale or cutoff_freq argument provided')
elif cutoff_freq is not None:
cutoff_scale = 1 / cutoff_freq
window_length = int(cutoff_scale*fs)
if window_length % 2 == 0:
window_length += 1 # window length needs to be an odd integer
args['savitzky-golay'] = {'window_length': window_length}
args[method].update(kwargs)
new_val = method_func[method](y, **args[method])
new.value = new_val + mu # restore the mean
if keep_log == True:
new.log += ({len(new.log): 'filter','method': method, 'args': kwargs, 'fs': fs, 'cutoff_freq': cutoff_freq},)
return new
def histplot(self, figsize=[10, 4], title=None, savefig_settings=None,
ax=None, ylabel='KDE', vertical=False, edgecolor='w', **plot_kwargs):
''' Plot the distribution of the timeseries values
Parameters
----------
figsize : list
a list of two integers indicating the figure size
title : str
the title for the figure
savefig_settings : dict
the dictionary of arguments for plt.savefig(); some notes below:
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
ax : matplotlib.axis, optional
A matplotlib axis
ylabel : str
Label for the count axis
vertical : {True,False}
Whether to flip the plot vertically
edgecolor : matplotlib.color
The color of the edges of the bar
plot_kwargs : dict
Plotting arguments for seaborn histplot: https://seaborn.pydata.org/generated/seaborn.histplot.html
See also
--------
pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim
Examples
--------
Distribution of the SOI record
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time=data.iloc[:,1]
value=data.iloc[:,2]
ts=pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
@savefig ts_plot5.png
fig, ax = ts.plot()
pyleo.closefig(fig)
@savefig ts_hist.png
fig, ax = ts.histplot()
pyleo.closefig(fig)
'''
savefig_settings = {} if savefig_settings is None else savefig_settings.copy()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
#make the data into a dataframe so we can flip the figure
time_label, value_label = self.make_labels()
if vertical == True:
data=pd.DataFrame({'value':self.value})
ax = sns.histplot(data=data, y="value", ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs)
ax.set_ylabel(value_label)
ax.set_xlabel(ylabel)
else:
ax = sns.histplot(self.value, ax=ax, kde=True, edgecolor=edgecolor, **plot_kwargs)
ax.set_xlabel(value_label)
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
if 'fig' in locals():
if 'path' in savefig_settings:
plotting.savefig(fig, settings=savefig_settings)
return fig, ax
else:
return ax
# def distplot(self, figsize=[10, 4], title=None, savefig_settings=None,
# ax=None, ylabel='KDE', vertical=False, edgecolor='w', **plot_kwargs):
# ''' Plot the distribution of the timeseries values
# [legacy only ; please use histplot() instead]
# Parameters
# ----------
# figsize : list
# a list of two integers indicating the figure size
# title : str
# the title for the figure
# savefig_settings : dict
# the dictionary of arguments for plt.savefig(); some notes below:
# - "path" must be specified; it can be any existed or non-existed path,
# with or without a suffix; if the suffix is not given in "path", it will follow "format"
# - "format" can be one of {"pdf", "eps", "png", "ps"}
# ax : matplotlib.axis, optional
# A matplotlib axis
# ylabel : str
# Label for the count axis
# vertical : {True,False}
# Whether to flip the plot vertically
# edgecolor : matplotlib.color
# The color of the edges of the bar
# plot_kwargs : dict
# Plotting arguments for seaborn histplot: https://seaborn.pydata.org/generated/seaborn.histplot.html
# See also
# --------
# pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim
# Examples
# --------
# Distribution of the SOI record
# .. ipython:: python
# :okwarning:
# :okexcept:
# import pyleoclim as pyleo
# import pandas as pd
# data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
# time=data.iloc[:,1]
# value=data.iloc[:,2]
# ts=pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
# @savefig ts_plot5.png
# fig, ax = ts.plot()
# pyleo.closefig(fig)
# @savefig ts_dist.png
# fig, ax = ts.distplot()
# pyleo.closefig(fig)
# '''
# warnings.warn(
# "Distplot is deprecated. Function has been renamed histplot in order to maintain consistency with seaborn terminology",
# DeprecationWarning,
# stacklevel=2)
# return self.histplot(figsize, title, savefig_settings, ax, ylabel, vertical, edgecolor, **plot_kwargs)
def summary_plot(self, psd, scalogram, figsize=[8, 10], title=None,
time_lim=None, value_lim=None, period_lim=None, psd_lim=None,
time_label=None, value_label=None, period_label=None, psd_label=None,
ts_plot_kwargs = None, wavelet_plot_kwargs = None,
psd_plot_kwargs = None, gridspec_kwargs = None, y_label_loc = None,
legend = None, savefig_settings=None):
''' Produce summary plot of timeseries.
Generate cohesive plot of timeseries alongside results of wavelet analysis and spectral analysis on said timeseries.
Requires wavelet and spectral analysis to be conducted outside of plotting function, psd and scalogram must be passed as arguments.
Parameters
----------
psd : PSD
the PSD object of a Series.
scalogram : Scalogram
the Scalogram object of a Series.
If the passed scalogram object contains stored signif_scals these will be plotted.
figsize : list
a list of two integers indicating the figure size
title : str
the title for the figure
time_lim : list or tuple
the limitation of the time axis. This is for display purposes only, the scalogram and psd will still be calculated using the full time series.
value_lim : list or tuple
the limitation of the value axis of the timeseries. This is for display purposes only, the scalogram and psd will still be calculated using the full time series.
period_lim : list or tuple
the limitation of the period axis
psd_lim : list or tuple
the limitation of the psd axis
time_label : str
the label for the time axis
value_label : str
the label for the value axis of the timeseries
period_label : str
the label for the period axis
psd_label : str
the label for the amplitude axis of PDS
legend : bool
if set to True, a legend will be added to the open space above the psd plot
ts_plot_kwargs : dict
arguments to be passed to the timeseries subplot, see Series.plot for details
wavelet_plot_kwargs : dict
arguments to be passed to the scalogram plot, see pyleoclim.Scalogram.plot for details
psd_plot_kwargs : dict
arguments to be passed to the psd plot, see PSD.plot for details
Certain psd plot settings are required by summary plot formatting. These include:
- ylabel
- legend
- tick parameters
These will be overriden by summary plot to prevent formatting errors
gridspec_kwargs : dict
arguments used to build the specifications for gridspec configuration
The plot is constructed with six slots:
- slot [0] contains a subgridspec containing the timeseries and scalogram (shared x axis)
- slot [1] contains a subgridspec containing an empty slot and the PSD plot (shared y axis with scalogram)
- slot [2] and slot [3] are empty to allow ample room for xlabels for the scalogram and PSD plots
- slot [4] contains the scalogram color bar
- slot [5] is empty
It is possible to tune the size and spacing of the various slots
- 'width_ratios': list of two values describing the relative widths of the two columns (default: [6, 1])
- 'height_ratios': list of three values describing the relative heights of the three rows (default: [2, 7, .35])
- 'hspace': vertical space between timeseries and scalogram (default: 0, however if either the scalogram xlabel or the PSD xlabel contain '\n', .05)
- 'wspace': lateral space between scalogram and psd plot slots (default: 0.05)
- 'cbspace': vertical space between the scalogram and colorbar
y_label_loc : float
Plot parameter to adjust horizontal location of y labels to avoid conflict with axis labels, default value is -0.15
savefig_settings : dict
the dictionary of arguments for plt.savefig(); some notes below:
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
See also
--------
pyleoclim.core.series.Series.spectral : Spectral analysis for a timeseries
pyleoclim.core.series.Series.wavelet : Wavelet analysis for a timeseries
pyleoclim.utils.plotting.savefig : saving figure in Pyleoclim
pyleoclim.core.psds.PSD : PSD object
pyleoclim.core.psds.MultiplePSD : Multiple PSD object
Examples
--------
Summary_plot with pre-generated psd and scalogram objects. Note that if the scalogram contains saved noise realizations these will be flexibly reused. See pyleo.Scalogram.signif_test() for details
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1)
series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD')
psd = series.spectral(freq_method = 'welch')
scalogram = series.wavelet(freq_method = 'welch')
@savefig ts_summary_plot1.png
fig, ax = series.summary_plot(psd = psd,scalogram = scalogram)
pyleo.closefig(fig)
Summary_plot with pre-generated psd and scalogram objects from before and some plot modification arguments passed. Note that if the scalogram contains saved noise realizations these will be flexibly reused. See pyleo.Scalogram.signif_test() for details
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
ts=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/master/example_data/soi_data.csv',skiprows = 1)
series = pyleo.Series(time = ts['Year'],value = ts['Value'], time_name = 'Years', time_unit = 'AD')
psd = series.spectral(freq_method = 'welch')
scalogram = series.wavelet(freq_method = 'welch')
@savefig ts_summary_plot2.png
fig, ax = series.summary_plot(psd = psd,scalogram = scalogram, period_lim = [5,0], ts_plot_kwargs = {'color':'red','linewidth':.5}, psd_plot_kwargs = {'color':'red','linewidth':.5})
pyleo.closefig(fig)
'''
savefig_settings = {} if savefig_settings is None else savefig_settings.copy()
wavelet_plot_kwargs = {} if wavelet_plot_kwargs is None else wavelet_plot_kwargs.copy()
psd_plot_kwargs = {} if psd_plot_kwargs is None else psd_plot_kwargs.copy()
ts_plot_kwargs = {} if ts_plot_kwargs is None else ts_plot_kwargs.copy()
gridspec_kwargs = {} if gridspec_kwargs is None else gridspec_kwargs.copy()
# spacing
if (type(psd_label) == str and '\n' in psd_label) or (psd_label is None):
gridspec_kwargs_default = {'width_ratios': [6, 1],
# 'height_ratios': [8, 1, .35],
'height_ratios': [2,7,.35],
'hspace': 0.05, 'wspace': 0.05,
'cbspace':1}
else:
gridspec_kwargs_default = {'width_ratios': [6, 1],
# 'height_ratios': [8, 1, .35],
'height_ratios': [2,7,.35],
'hspace': 0, 'wspace': 0,
'cbspace':1}
for key in gridspec_kwargs_default:
if key not in gridspec_kwargs.keys():
gridspec_kwargs[key] = gridspec_kwargs_default[key]
ts_height = gridspec_kwargs['height_ratios'][0]
scal_height = gridspec_kwargs['height_ratios'][1]
cb_height = gridspec_kwargs['height_ratios'][2]
psd_width = gridspec_kwargs['width_ratios'][1]
scal_width = gridspec_kwargs['width_ratios'][0]
if 'cbspace' in gridspec_kwargs.keys():
cb_space = gridspec_kwargs['cbspace']
else:
cb_space = 1
gridspec_kwargs['height_ratios'] = [ts_height+scal_height, cb_space, cb_height]
del gridspec_kwargs['cbspace']
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs = fig.add_gridspec(3, 2, **gridspec_kwargs)
# fig = plt.figure(figsize=figsize)
# gs = gridspec.GridSpec(6, 12)
# gs.update(wspace=0, hspace=0)
#
# gs0 = fig.add_gridspec(3, 2, width_ratios=[6, 1], height_ratios=[8, 1, .35],
# hspace=0, wspace=0.1)
# Subgridspecs
#Let's use the same hspace/wspace if given to a user
gs_d = {}
gs_d['ts_scal'] = gs[0].subgridspec(2, 1, height_ratios=[ts_height, scal_height], hspace=gridspec_kwargs['hspace'])
gs_d['psd'] = gs[1].subgridspec(2, 1, height_ratios=[ts_height, scal_height], hspace=gridspec_kwargs['hspace'])
# gs_d['ts_scal'] = gs[0].subgridspec(2, 1, height_ratios=[1, 4], hspace=gridspec_kwargs['hspace'])
# gs_d['psd'] = gs[1].subgridspec(2, 1, height_ratios=[1, 4], hspace=gridspec_kwargs['hspace'])
gs_d['cb'] = gs[4].subgridspec(1, 1)
ax = {}
### Time series
ax['ts'] = fig.add_subplot(gs_d['ts_scal'][0, 0])
ax['ts'] = self.plot(ax=ax['ts'], **ts_plot_kwargs)
if time_lim is not None:
ax['ts'].set_xlim(time_lim)
if 'xlim' in ts_plot_kwargs:
print(
'Xlim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
if value_lim is not None:
ax['ts'].set_ylim(value_lim)
if 'ylim' in ts_plot_kwargs:
print(
'Ylim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
if title is not None:
ax['ts'].set_title(title)
if 'title' in ts_plot_kwargs:
print(
'Title passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
if value_label is not None:
# time_label, value_label = self.make_labels()
ax['ts'].set_ylabel(value_label)
if 'ylabel' in ts_plot_kwargs:
print(
'Ylabel passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
ax['ts'].xaxis.label.set_visible(False)
ax['ts'].tick_params(axis='x', direction='in')#, labelleft=False)
# ax = {}
# ax['ts'] = plt.subplot(gs[0:1, :-3])
# ax['ts'] = self.plot(ax=ax['ts'], **ts_plot_kwargs)
# ax['ts'].xaxis.set_visible(False)
# ax['ts'].get_yaxis().set_label_coords(y_label_loc,0.5)
#
# if time_lim is not None:
# ax['ts'].set_xlim(time_lim)
# if 'xlim' in ts_plot_kwargs:
# print('Xlim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# if value_lim is not None:
# ax['ts'].set_ylim(value_lim)
# if 'ylim' in ts_plot_kwargs:
# print('Ylim passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
### Scalogram
ax['scal'] = fig.add_subplot(gs_d['ts_scal'][1, 0], sharex=ax['ts'])
# Need variable for plotting purposes
if 'variable' not in wavelet_plot_kwargs:
wavelet_plot_kwargs.update({'variable': 'amplitude'})
if 'title' not in wavelet_plot_kwargs:
wavelet_plot_kwargs.update({'title': None})
if 'cbar_style' not in wavelet_plot_kwargs:
wavelet_plot_kwargs.update({'cbar_style': {'orientation': 'horizontal', 'pad': 0.12,
'label': scalogram.wave_method + ' '+ wavelet_plot_kwargs['variable'].capitalize()}})
else:
orient = 'horizontal'
# I think padding is now the hspace
# if 'pad' in wavelet_plot_kwargs['cbar_style']:
# pad = wavelet_plot_kwargs['cbar_style']['pad']
# else:
# pad = 0.12
if 'label' in wavelet_plot_kwargs['cbar_style']:
label = wavelet_plot_kwargs['cbar_style']['label']
else:
label = wavelet_plot_kwargs['variable'].capitalize() + ' from ' + scalogram.wave_method
wavelet_plot_kwargs.update({'cbar_style': {'orientation': orient,
'label': label,
# 'pad': pad,
}})
wavelet_plot_kwargs['cbar_style']['drawedges'] = True
# Do not plot colorbar in scalogram
wavelet_plot_kwargs['plot_cb'] = False
# Plot scalogram
ax['scal'] = scalogram.plot(ax=ax['scal'], **wavelet_plot_kwargs)
if y_label_loc is not None:
ax['scal'].get_yaxis().set_label_coords(y_label_loc, 0.5)
if period_lim is not None:
ax['scal'].set_ylim(period_lim)
if 'ylim' in wavelet_plot_kwargs.keys():
print(
'Ylim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
if time_label is not None:
ax['scal'].set_xlabel(time_label)
if 'xlabel' in wavelet_plot_kwargs:
print(
'Xlabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
if period_label is not None:
# period_unit = infer_period_unit_from_time_unit(self.time_unit)
# period_label = f'Period [{period_unit}]' if period_unit is not None else 'Period'
ax['scal'].set_ylabel(period_label)
if 'ylabel' in wavelet_plot_kwargs:
print(
'Ylabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
ax['scal'].set_title(None)
xticks = ax['scal'].get_xticks()
midpoints = xticks[:-1] + np.diff(xticks) / 2
ax['scal'].set_xticks(midpoints[1:-1])
ax['scal'].tick_params(axis='x', pad=12) # which='major',
if 'ylims' in psd_plot_kwargs:
shared_y_lims = psd_plot_kwargs['ylims']
elif 'ylims' in wavelet_plot_kwargs:
shared_y_lims = wavelet_plot_kwargs['ylims']
else:
shared_y_lims = ax['scal'].get_ylim()
plt.setp(ax['ts'].get_xticklabels(), visible=False)
# ax['scal'].set_ylim([0.2,50])
# >>
# ax['scal'] = plt.subplot(gs[1:5, :-3], sharex=ax['ts'])
#
# #Need variable for plotting purposes
# if 'variable' not in wavelet_plot_kwargs:
# wavelet_plot_kwargs.update({'variable':'amplitude'})
#
# if 'title' not in wavelet_plot_kwargs:
# wavelet_plot_kwargs.update({'title':None})
#
# if 'cbar_style' not in wavelet_plot_kwargs:
# wavelet_plot_kwargs.update({'cbar_style':{'orientation': 'horizontal', 'pad': 0.12,
# 'label': wavelet_plot_kwargs['variable'].capitalize() + ' from ' + scalogram.wave_method}})
# else:
# if 'orientation' in wavelet_plot_kwargs['cbar_style']:
# orient = wavelet_plot_kwargs['cbar_style']['orientation']
# else:
# orient = 'horizontal'
# if 'pad' in wavelet_plot_kwargs['cbar_style']:
# pad = wavelet_plot_kwargs['cbar_style']['pad']
# else:
# pad = 0.12
# if 'label' in wavelet_plot_kwargs['cbar_style']:
# label = wavelet_plot_kwargs['cbar_style']['label']
# else:
# label = wavelet_plot_kwargs['variable'].capitalize() + ' from ' + scalogram.wave_method
# wavelet_plot_kwargs.update({'cbar_style':{'orientation': orient, 'pad': pad,
# 'label': label}})
#
# ax['scal'] = scalogram.plot(ax=ax['scal'], **wavelet_plot_kwargs)
# ax['scal'].get_yaxis().set_label_coords(y_label_loc,0.5)
#
# if period_lim is not None:
# ax['scal'].set_ylim(period_lim)
# if 'ylim' in wavelet_plot_kwargs:
# print('Ylim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
# ax['scal'].invert_yaxis()
### PSD
ax['psd'] = fig.add_subplot(gs_d['psd'][1, 0], sharey=ax['scal'])
ax['psd'] = psd.plot(ax=ax['psd'], transpose=True, ylabel=str(psd.spec_method) + ' PSD',
**psd_plot_kwargs)
if period_lim is not None:
ax['psd'].set_ylim(period_lim)
if 'ylim' in psd_plot_kwargs:
print(
'Ylim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
else:
ax['psd'].set_ylim(shared_y_lims)
ax['scal'].set_ylim(shared_y_lims)
if psd_lim is not None:
ax['psd'].set_xlim(psd_lim)
if 'xlim' in psd_plot_kwargs:
print(
'Xlim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument')
if psd_label is not None:
ax['psd'].set_xlabel(psd_label)
if 'xlabel' in psd_plot_kwargs:
print(
'Xlabel passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
ax['psd'].invert_yaxis()
ax['psd'].set_ylabel(None)
ax['psd'].tick_params(axis='y', direction='in', labelleft=False, pad=12)
if legend is None:
for key in ['ts', 'psd']:
ax[key].legend().remove()
if legend == True:
leg_h, leg_l = [], []
for key in ['ts', 'psd']:
ax[key].legend()
_h, _l = ax[key].get_legend_handles_labels()
for ip, label in enumerate(_l):
if label not in leg_l:
if len(label.split(' ')) > 1:
if len(label) > 15:
label = label[:15] + label[15:].replace(' ', '\n', 1)
label = label.replace('simulations', 'sims')
if psd_width/scal_width < .25:
label = label.replace('threshold', 'C.L.')
leg_l.append(label)
leg_h.append(_h[ip])
ax[key].legend().remove()
ax['leg'] = fig.add_subplot(gs_d['psd'][0, 0])
ax['leg'].grid(False)
for side in ['top', 'bottom', 'left', 'right']:
ax['leg'].spines[side].set_visible(False)
ax['leg'].set_xticklabels([])
ax['leg'].set_yticklabels([])
ax['leg'].tick_params(axis='x', which='both', length=0)
ax['leg'].tick_params(axis='y', which='both', length=0)
x0, y0 = 1,1#0,0#-psd_width*3/4, -ts_height*3/4#, psd_width, ts_height
ax['leg'].legend(leg_h, leg_l, fontsize='small', loc='upper left')#, bbox_to_anchor=(x0, y0))# width, height))
ax['scal'].invert_yaxis() # not sure where this needs to be
# ax['leg'] = fig.add_subplot(gs_d['psd_leg'][0, 0])
# ax['leg'].legend(h, l)
# ax['psd'] = plt.subplot(gs[1:4, -3:], sharey=ax['scal'])
# ax['psd'] = psd.plot(ax=ax['psd'], transpose=True, ylabel = 'PSD from \n' + str(psd.spec_method), **psd_plot_kwargs)
#
# if period_lim is not None:
# ax['psd'].set_ylim(period_lim)
# if 'ylim' in psd_plot_kwargs:
# print('Ylim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# ax['psd'].yaxis.set_visible(False)
# ax['psd'].invert_yaxis()
# ax['psd'].set_ylabel(None)
# ax['psd'].tick_params(axis='y', direction='in', labelleft=False)
# ax['psd'].legend().remove()
#
# if psd_lim is not None:
# ax['psd'].set_xlim(psd_lim)
# if 'xlim' in psd_plot_kwargs:
# print('Xlim passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument')
#
# if title is not None:
# ax['ts'].set_title(title)
# if 'title' in ts_plot_kwargs:
# print('Title passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# if value_label is not None:
# #time_label, value_label = self.make_labels()
# ax['ts'].set_ylabel(value_label)
# if 'ylabel' in ts_plot_kwargs:
# print('Ylabel passed to time series plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# if time_label is not None:
# #time_label, value_label = self.make_labels()
# ax['scal'].set_xlabel(time_label)
# if 'xlabel' in wavelet_plot_kwargs:
# print('Xlabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# if period_label is not None:
# #period_unit = infer_period_unit_from_time_unit(self.time_unit)
# #period_label = f'Period [{period_unit}]' if period_unit is not None else 'Period'
# ax['scal'].set_ylabel(period_label)
# if 'ylabel' in wavelet_plot_kwargs:
# print('Ylabel passed to scalogram plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
#
# if psd_label is not None:
# ax['psd'].set_xlabel(psd_label)
# if 'xlabel' in psd_plot_kwargs:
# print('Xlabel passed to psd plot through exposed argument and key word argument. The exposed argument takes precedence and will overwrite relevant key word argument.')
# plot color bar for scalogram using filled contour data
ax['cb'] = fig.add_subplot(gs_d['cb'][0, 0])
cb = mcb.Colorbar(ax=ax['cb'], mappable=scalogram.conf,
orientation=wavelet_plot_kwargs['cbar_style']['orientation'],
label=wavelet_plot_kwargs['cbar_style']['label'])#,
# pad=wavelet_plot_kwargs['cbar_style']['pad'])
#
# cb = mpl.colorbar.ColorbarBase(ax['cb'], orientation='horizontal',
# cmap=cbar_data['cmap'],
# norm=cbar_data['norm'], # vmax and vmin
# extend=cbar_data['extend'],
# boundaries=cbar_data['boundaries'], # ,
# label=wavelet_plot_kwargs['cbar_style']['label'],
# drawedges=cbar_data['drawedges']) # True)
# cb = mpl.colorbar.Colorbar(ax['cb'], mappable = cbar_data.mappable,
# orientation='horizontal',
# extend=cbar_data.extend,
# boundaries=cbar_data.boundaries, # ,
# label=wavelet_plot_kwargs['cbar_style']['label'],
# drawedges=cbar_data.drawedges) # True)
#
# ticks=[0, 3, 6, 9])
if 'path' in savefig_settings:
plotting.savefig(fig, settings=savefig_settings)
return fig, ax
def copy(self):
'''Make a copy of the Series object
Returns
-------
Series : Series
A copy of the Series object
'''
return deepcopy(self)
def clean(self, verbose=False, keep_log = False):
''' Clean up the timeseries by removing NaNs and sort with increasing time points
Parameters
----------
verbose : bool
If True, will print warning messages if there is any
keep_log : Boolean
if True, adds this step and its parameters to the series log.
Returns
-------
new : Series
Series object with removed NaNs and sorting
'''
new = self.copy()
v_mod, t_mod = tsbase.clean_ts(self.value, self.time, verbose=verbose)
new.time = t_mod
new.value = v_mod
if keep_log == True:
new.log += ({len(new.log):'clean', 'verbose': verbose},)
return new
def sort(self, verbose=False, keep_log = False):
''' Ensure timeseries is aligned to a prograde axis.
If the time axis is prograde to begin with, no transformation is applied.
Parameters
----------
verbose : bool
If True, will print warning messages if there is any
keep_log : Boolean
if True, adds this step and its parameter to the series log.
Returns
-------
new : Series
Series object with removed NaNs and sorting
'''
new = self.copy()
v_mod, t_mod = tsbase.sort_ts(self.value, self.time, verbose=verbose)
new.time = t_mod
new.value = v_mod
if keep_log == True:
new.log += ({len(new.log):'sort', 'verbose': verbose},)
return new
def gaussianize(self, keep_log = False):
''' Gaussianizes the timeseries (i.e. maps its values to a standard normal)
Returns
-------
new : Series
The Gaussianized series object
keep_log : Boolean
if True, adds this transformation to the series log.
References
----------
Emile-Geay, J., and M. Tingley (2016), Inferring climate variability from nonlinear proxies: application to palaeo-enso studies, Climate of the Past, 12 (1), 31–50, doi:10.5194/cp- 12-31-2016.
'''
new = self.copy()
v_mod = tsutils.gaussianize(self.value)
new.value = v_mod
if keep_log == True:
new.log += ({len(new.log):'gaussianize', 'applied': True},)
return new
def standardize(self, keep_log = False, scale=1):
"""Standardizes the series ((i.e. remove its estimated mean and divides by its estimated standard deviation)
Returns
-------
new : Series
The standardized series object
keep_log : Boolean
if True, adds the previous mean, standard deviation and method parameters to the series log.
"""
new = self.copy()
vs, mu, sig = tsutils.standardize(self.value, scale=scale)
new.value = vs
if keep_log == True:
method_dict = {len(new.log):'standardize', 'args': scale,
'previous_mean': mu, 'previous_std': sig}
new.log += (method_dict,)
return new
def center(self, timespan=None, keep_log=False):
''' Centers the series (i.e. renove its estimated mean)
Parameters
----------
timespan : tuple or list
The timespan over which the mean must be estimated.
In the form [a, b], where a, b are two points along the series' time axis.
keep_log : Boolean
if True, adds the previous mean and method parameters to the series log.
Returns
-------
new : Series
The centered series object
'''
new = self.copy()
if timespan is not None:
ts_mean = np.nanmean(self.slice(timespan).value)
vc = self.value - ts_mean
else:
ts_mean = np.nanmean(self.value)
vc = self.value - ts_mean
new.value = vc
if keep_log == True:
new.log += ({len(new.log): 'center', 'args': timespan, 'previous_mean': ts_mean},)
return new
def segment(self, factor=10):
"""Gap detection
This function segments a timeseries into n number of parts following a gap
detection algorithm. The rule of gap detection is very simple:
we define the intervals between time points as dts, then if dts[i] is larger than factor * dts[i-1],
we think that the change of dts (or the gradient) is too large, and we regard it as a breaking point
and divide the time series into two segments here
Parameters
----------
factor : float
The factor that adjusts the threshold for gap detection
Returns
-------
res : MultipleSeries or Series
If gaps were detected, returns the segments in a MultipleSeries object,
else, returns the original timeseries.
"""
seg_y, seg_t, n_segs = tsutils.ts2segments(self.value,self.time,factor=factor)
if len(seg_y)>1:
s_list=[]
for idx,s in enumerate(seg_y):
s_tmp=Series(time=seg_t[idx],value=s,time_name=self.time_name,
time_unit=self.time_unit, value_name=self.value_name,
value_unit=self.value_unit,label=self.label)
s_list.append(s_tmp)
res=MultipleSeries(series_list=s_list)
elif len(seg_y)==1:
res=self.copy()
else:
raise ValueError('No timeseries detected')
return res
def slice(self, timespan):
''' Slicing the timeseries with a timespan (tuple or list)
Parameters
----------
timespan : tuple or list
The list of time points for slicing, whose length must be even.
When there are n time points, the output Series includes n/2 segments.
For example, if timespan = [a, b], then the sliced output includes one segment [a, b];
if timespan = [a, b, c, d], then the sliced output includes segment [a, b] and segment [c, d].
Returns
-------
new : Series
The sliced Series object.
Examples
--------
slice the SOI from 1972 to 1998
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time, value=value, time_name='Year C.E', value_name='SOI', label='SOI')
ts_slice = ts.slice([1972, 1998])
print("New time bounds:",ts_slice.time.min(),ts_slice.time.max())
'''
new = self.copy()
n_elements = len(timespan)
if n_elements % 2 == 1:
raise ValueError('The number of elements in timespan must be even!')
n_segments = int(n_elements / 2)
mask = [False for i in range(np.size(self.time))]
for i in range(n_segments):
mask |= (self.time >= timespan[i*2]) & (self.time <= timespan[i*2+1])
new.time = self.time[mask]
new.value = self.value[mask]
return new
def fill_na(self, timespan=None, dt=1, keep_log=False):
''' Fill NaNs into the timespan
Parameters
----------
timespan : tuple or list
The list of time points for slicing, whose length must be 2.
For example, if timespan = [a, b], then the sliced output includes one segment [a, b].
If None, will use the start point and end point of the original timeseries
dt : float
The time spacing to fill the NaNs; default is 1.
keep_log : Boolean
if True, adds this step and its parameters to the series log.
Returns
-------
new : Series
The sliced Series object.
'''
new = self.copy()
if timespan is None:
start = np.min(self.time)
end = np.max(self.time)
else:
start = timespan[0]
end = timespan[-1]
new_time = np.arange(start, end+dt, dt)
new_value = np.empty(np.size(new_time))
for i, t in enumerate(new_time):
if t in self.time:
loc = list(self.time).index(t)
new_value[i] = self.value[loc]
else:
new_value[i] = np.nan
new.time = new_time
new.value = new_value
if keep_log == True:
new.log += ({len(new.log):'fill_na', 'applied': True, 'dt': dt, 'timespan': timespan},)
return new
def detrend(self, method='emd', keep_log=False, **kwargs):
'''Detrend Series object
Parameters
----------
method : str, optional
The method for detrending. The default is 'emd'.
Options include:
* "linear": the result of a n ordinary least-squares stright line fit to y is subtracted.
* "constant": only the mean of data is subtracted.
* "savitzky-golay", y is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y.
* "emd" (default): Empirical mode decomposition. The last mode is assumed to be the trend and removed from the series
keep_log : Boolean
if True, adds the removed trend and method parameters to the series log.
kwargs : dict
Relevant arguments for each of the methods.
Returns
-------
new : Series
Detrended Series object in "value", with new field "trend" added
See also
--------
pyleoclim.utils.tsutils.detrend : detrending wrapper functions
Examples
--------
We will generate a harmonic signal with a nonlinear trend and use two detrending options to recover the original signal.
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import numpy as np
# Generate a mixed harmonic signal with known frequencies
freqs=[1/20,1/80]
time=np.arange(2001)
signals=[]
for freq in freqs:
signals.append(np.cos(2*np.pi*freq*time))
signal=sum(signals)
# Add a non-linear trend
slope = 1e-5; intercept = -1
nonlinear_trend = slope*time**2 + intercept
# Add a modicum of white noise
np.random.seed(2333)
sig_var = np.var(signal)
noise_var = sig_var / 2 #signal is twice the size of noise
white_noise = np.random.normal(0, np.sqrt(noise_var), size=np.size(signal))
signal_noise = signal + white_noise
# Place it all in a series object and plot it:
ts = pyleo.Series(time=time,value=signal_noise + nonlinear_trend)
@savefig random_series.png
fig, ax = ts.plot(title='Timeseries with nonlinear trend'); pyleo.closefig(fig)
# Detrending with default parameters (using EMD method with 1 mode)
ts_emd1 = ts.detrend()
ts_emd1.label = 'default detrending (EMD, last mode)'
@savefig ts_emd1.png
fig, ax = ts_emd1.plot(title='Detrended with EMD method'); ax.plot(time,signal_noise,label='target signal'); ax.legend(); pyleo.closefig(fig)
We see that the default function call results in a "hockey stick" at the end, which is undesirable.
There is no automated way to fix this, but with a little trial and error, we find that removing
the 2 smoothest modes performs reasonably well:
.. ipython:: python
:okwarning:
:okexcept:
ts_emd2 = ts.detrend(method='emd', n=2, keep_log=True)
ts_emd2.label = 'EMD detrending, last 2 modes'
@savefig ts_emd_n2.png
fig, ax = ts_emd2.plot(title='Detrended with EMD (n=2)'); ax.plot(time,signal_noise,label='target signal'); ax.legend(); pyleo.closefig(fig)
Another option for removing a nonlinear trend is a Savitzky-Golay filter:
.. ipython:: python
:okwarning:
:okexcept:
ts_sg = ts.detrend(method='savitzky-golay')
ts_sg.label = 'savitzky-golay detrending, default parameters'
@savefig ts_sg.png
fig, ax = ts_sg.plot(title='Detrended with Savitzky-Golay filter'); ax.plot(time,signal_noise,label='target signal'); ax.legend(); pyleo.closefig(fig)
As we can see, the result is even worse than with EMD (default). Here it pays to look into the underlying method, which comes from SciPy.
It turns out that by default, the Savitzky-Golay filter fits a polynomial to the last "window_length" values of the edges.
By default, this value is close to the length of the series. Choosing a value 10x smaller fixes the problem here, though you will have to tinker with that parameter until you get the result you seek.
.. ipython:: python
:okwarning:
:okexcept:
ts_sg2 = ts.detrend(method='savitzky-golay',sg_kwargs={'window_length':201}, keep_log=True)
ts_sg2.label = 'savitzky-golay detrending, window_length = 201'
@savefig ts_sg2.png
fig, ax = ts_sg2.plot(title='Detrended with Savitzky-Golay filter'); ax.plot(time,signal_noise,label='target signal'); ax.legend(); pyleo.closefig(fig)
Finally, the method returns the trend that was previous, so it can be added back in if need be.
.. ipython:: python
:okwarning:
:okexcept:
trend_ts = pyleo.Series(time = time, value = nonlinear_trend,
value_name= 'trend', label='original trend')
@savefig ts_trend.png
fig, ax = trend_ts.plot(title='Trend recovery'); ax.plot(time,ts_emd2.log[1]['previous_trend'],label=ts_emd2.label); ax.plot(time,ts_sg2.log[1]['previous_trend'], label=ts_sg2.label); ax.legend(); pyleo.closefig(fig)
Both methods can recover the exponential trend, with some edge effects near the end that could be addressed by judicious padding.
'''
new = self.copy()
v_mod, trend = tsutils.detrend(self.value, x=self.time, method=method, **kwargs)
new.value = v_mod
if keep_log == True:
new.log += ({len(new.log): 'detrend','method': method, 'args': kwargs, 'previous_trend': trend},)
return new
def spectral(self, method='lomb_scargle', freq_method='log', freq_kwargs=None, settings=None, label=None, scalogram=None, verbose=False):
''' Perform spectral analysis on the timeseries
Parameters
----------
method : str;
{'wwz', 'mtm', 'lomb_scargle', 'welch', 'periodogram', 'cwt'}
freq_method : str
{'log','scale', 'nfft', 'lomb_scargle', 'welch'}
freq_kwargs : dict
Arguments for frequency vector
settings : dict
Arguments for the specific spectral method
label : str
Label for the PSD object
scalogram : pyleoclim.core.series.Series.Scalogram
The return of the wavelet analysis; effective only when the method is 'wwz' or 'cwt'
verbose : bool
If True, will print warning messages if there is any
Returns
-------
psd : PSD
A PSD object
See also
--------
pyleoclim.utils.spectral.mtm : Spectral analysis using the Multitaper approach
pyleoclim.utils.spectral.lomb_scargle : Spectral analysis using the Lomb-Scargle method
pyleoclim.utils.spectral.welch: Spectral analysis using the Welch segement approach
pyleoclim.utils.spectral.periodogram: Spectral anaysis using the basic Fourier transform
pyleoclim.utils.spectral.wwz_psd : Spectral analysis using the Wavelet Weighted Z transform
pyleoclim.utils.spectral.cwt_psd : Spectral analysis using the continuous Wavelet Transform as implemented by Torrence and Compo
pyleoclim.utils.spectral.make_freq_vector : Functions to create the frequency vector
pyleoclim.utils.tsutils.detrend : Detrending function
pyleoclim.core.psds.PSD : PSD object
pyleoclim.core.psds.MultiplePSD : Multiple PSD object
Examples
--------
Calculate the spectrum of SOI using the various methods and compute significance
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time, value=value, time_name='Year C.E', value_name='SOI', label='SOI')
# Standardize the time series
ts_std = ts.standardize()
- Lomb-Scargle
.. ipython:: python
:okwarning:
:okexcept:
psd_ls = ts_std.spectral(method='lomb_scargle')
psd_ls_signif = psd_ls.signif_test(number=20) #in practice, need more AR1 simulations
@savefig spec_ls.png
fig, ax = psd_ls_signif.plot(title='PSD using Lomb-Scargle method')
pyleo.closefig(fig)
We may pass in method-specific arguments via "settings", which is a dictionary.
For instance, to adjust the number of overlapping segment for Lomb-Scargle, we may specify the method-specific argument "n50";
to adjust the frequency vector, we may modify the "freq_method" or modify the method-specific argument "freq".
.. ipython:: python
:okwarning:
:okexcept:
import numpy as np
psd_LS_n50 = ts_std.spectral(method='lomb_scargle', settings={'n50': 4}) # c=1e-2 yields lower frequency resolution
psd_LS_freq = ts_std.spectral(method='lomb_scargle', settings={'freq': np.linspace(1/20, 1/0.2, 51)})
psd_LS_LS = ts_std.spectral(method='lomb_scargle', freq_method='lomb_scargle') # with frequency vector generated using REDFIT method
@savefig spec_ls_n50.png
fig, ax = psd_LS_n50.plot(
title='PSD using Lomb-Scargle method with 4 overlapping segments',
label='settings={"n50": 4}')
psd_ls.plot(ax=ax, label='settings={"n50": 3}', marker='o')
@savefig spec_ls_freq.png
fig, ax = psd_LS_freq.plot(
title='PSD using Lomb-Scargle method with different frequency vectors',
label='freq=np.linspace(1/20, 1/0.2, 51)', marker='o')
psd_ls.plot(ax=ax, label='freq_method="log"', marker='o')
You may notice the differences in the PSD curves regarding smoothness and the locations of the analyzed period points.
For other method-specific arguments, please look up the specific methods in the "See also" section.
- WWZ
.. ipython:: python
:okwarning:
:okexcept:
psd_wwz = ts_std.spectral(method='wwz') # wwz is the default method
psd_wwz_signif = psd_wwz.signif_test(number=1) # significance test; for real work, should use number=200 or even larger
@savefig spec_wwz.png
fig, ax = psd_wwz_signif.plot(title='PSD using WWZ method')
pyleo.closefig(fig)
We may take advantage of a pre-calculated scalogram using WWZ to accelerate the spectral analysis
(although note that the default parameters for spectral and wavelet analysis using WWZ are different):
.. ipython:: python
:okwarning:
:okexcept:
scal_wwz = ts_std.wavelet(method='wwz') # wwz is the default method
psd_wwz_fast = ts_std.spectral(method='wwz', scalogram=scal_wwz)
@savefig spec_wwz_fast.png
fig, ax = psd_wwz_fast.plot(title='PSD using WWZ method w/ pre-calculated scalogram')
pyleo.closefig(fig)
- Periodogram
.. ipython:: python
:okwarning:
:okexcept:
ts_interp = ts_std.interp()
psd_perio = ts_interp.spectral(method='periodogram')
psd_perio_signif = psd_perio.signif_test(number=20, method='ar1sim') #in practice, need more AR1 simulations
@savefig spec_perio.png
fig, ax = psd_perio_signif.plot(title='PSD using Periodogram method')
pyleo.closefig(fig)
- Welch
.. ipython:: python
:okwarning:
:okexcept:
psd_welch = ts_interp.spectral(method='welch')
psd_welch_signif = psd_welch.signif_test(number=20, method='ar1sim') #in practice, need more AR1 simulations
@savefig spec_welch.png
fig, ax = psd_welch_signif.plot(title='PSD using Welch method')
pyleo.closefig(fig)
- MTM
.. ipython:: python
:okwarning:
:okexcept:
psd_mtm = ts_interp.spectral(method='mtm', label='MTM, NW=4')
psd_mtm_signif = psd_mtm.signif_test(number=20, method='ar1sim') #in practice, need more AR1 simulations
@savefig spec_mtm.png
fig, ax = psd_mtm_signif.plot(title='PSD using the multitaper method')
pyleo.closefig(fig)
By default, MTM uses a half-bandwidth of 4 times the fundamental (Rayleigh) frequency, i.e. NW = 4, which is the most conservative choice.
NW runs from 2 to 4 in multiples of 1/2, and can be adjusted like so (note the sharper peaks and higher overall variance, which may not be desirable):
.. ipython:: python
:okwarning:
:okexcept:
psd_mtm2 = ts_interp.spectral(method='mtm', settings={'NW':2}, label='MTM, NW=2')
@savefig spec_mtm2.png
psd_mtm2.plot(title='PSD using the multi-taper method', ax=ax)
pyleo.closefig(fig)
- Continuous Wavelet Transform
.. ipython:: python
:okwarning:
:okexcept:
ts_interp = ts_std.interp()
psd_cwt = ts_interp.spectral(method='cwt')
psd_cwt_signif = psd_cwt.signif_test(number=20)
@savefig spec_cwt.png
fig, ax = psd_cwt_signif.plot(title='PSD using CWT method')
pyleo.closefig(fig)
'''
if not verbose:
warnings.simplefilter('ignore')
settings = {} if settings is None else settings.copy()
spec_func = {
'wwz': specutils.wwz_psd,
'mtm': specutils.mtm,
'lomb_scargle': specutils.lomb_scargle,
'welch': specutils.welch,
'periodogram': specutils.periodogram,
'cwt': specutils.cwt_psd
}
args = {}
freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy()
freq = specutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs)
args['wwz'] = {'freq': freq}
args['cwt'] = {'freq': freq}
args['mtm'] = {}
args['lomb_scargle'] = {'freq': freq}
args['welch'] = {}
args['periodogram'] = {}
args[method].update(settings)
if method == 'wwz' and scalogram is not None:
args['wwz'].update(
{
'wwa': scalogram.amplitude,
'wwz_Neffs': scalogram.wwz_Neffs,
'wwz_freq': scalogram.frequency,
}
)
if method == 'cwt' and scalogram is not None:
Results = collections.namedtuple('Results', ['amplitude', 'coi', 'freq', 'time', 'scale', 'mother','param'])
res = Results(amplitude=scalogram.amplitude, coi=scalogram.coi,
freq=scalogram.frequency, time=scalogram.time,
scale=scalogram.wave_args['scale'],
mother=scalogram.wave_args['mother'],
param=scalogram.wave_args['param'])
args['cwt'].update({'cwt_res':res})
spec_res = spec_func[method](self.value, self.time, **args[method])
if type(spec_res) is dict:
spec_res = dict2namedtuple(spec_res)
if label is None:
label = self.label
if method == 'wwz' and scalogram is not None:
args['wwz'].pop('wwa')
args['wwz'].pop('wwz_Neffs')
args['wwz'].pop('wwz_freq')
if method == 'cwt':
args['cwt'].update({'scale':spec_res.scale,'mother':spec_res.mother,'param':spec_res.param})
if scalogram is not None:
args['cwt'].pop('cwt_res')
psd = PSD(
frequency=spec_res.freq,
amplitude=spec_res.psd,
label=label,
timeseries=self,
spec_method=method,
spec_args=args[method]
)
return psd
def wavelet(self, method='cwt', settings=None, freq_method='log', freq_kwargs=None, verbose=False):
''' Perform wavelet analysis on a timeseries
Parameters
----------
method : str {wwz, cwt}
cwt - the continuous wavelet transform [1]
is appropriate for evenly-spaced series.
wwz - the weighted wavelet Z-transform [2]
is appropriate for unevenly-spaced series.
Default is cwt, returning an error if the Series is unevenly-spaced.
freq_method : str
{'log', 'scale', 'nfft', 'lomb_scargle', 'welch'}
freq_kwargs : dict
Arguments for the frequency vector
settings : dict
Arguments for the specific wavelet method
verbose : bool
If True, will print warning messages if there is any
Returns
-------
scal : Scalogram object
See also
--------
pyleoclim.utils.wavelet.wwz : wwz function
pyleoclim.utils.wavelet.cwt : cwt function
pyleoclim.utils.spectral.make_freq_vector : Functions to create the frequency vector
pyleoclim.utils.tsutils.detrend : Detrending function
pyleoclim.core.series.Series.spectral : spectral analysis tools
pyleoclim.core.scalograms.Scalogram : Scalogram object
pyleoclim.core.scalograms.MultipleScalogram : Multiple Scalogram object
References
----------
[1] Torrence, C. and G. P. Compo, 1998: A Practical Guide to Wavelet Analysis. Bull. Amer. Meteor. Soc., 79, 61-78.
Python routines available at http://paos.colorado.edu/research/wavelets/
[2] Foster, G., 1996: Wavelets for period analysis of unevenly sampled time series. The Astronomical Journal, 112, 1709.
Examples
--------
Wavelet analysis on the evenly-spaced SOI record. The CWT method will be applied by default.
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/soi_data.csv',skiprows=0,header=1)
time = data.iloc[:,1]
value = data.iloc[:,2]
ts = pyleo.Series(time=time,value=value,time_name='Year C.E', value_name='SOI', label='SOI')
scal1 = ts.wavelet()
scal_signif = scal1.signif_test(number=20) # for research-grade work, use number=200 or larger
@savefig scal_cwt.png
fig, ax = scal_signif.plot()
pyleo.closefig(fig)
If you wanted to invoke the WWZ method instead (here with no significance testing, to lower computational cost):
.. ipython:: python
:okwarning:
:okexcept:
scal2 = ts.wavelet(method='wwz')
@savefig scal_wwz.png
fig, ax = scal2.plot()
pyleo.closefig(fig)
Notice that the two scalograms have different amplitude, which are relative. Method-specific arguments
may be passed via `settings`. For instance, if you wanted to change the default mother wavelet
('MORLET') to a derivative of a Gaussian (DOG), with degree 2 by default ("Mexican Hat wavelet"):
.. ipython:: python
:okwarning:
:okexcept:
scal3 = ts.wavelet(settings = {'mother':'DOG'})
@savefig scal_dog.png
fig, ax = scal3.plot(title='CWT scalogram with DOG mother wavelet')
pyleo.closefig(fig)
As for WWZ, note that, for computational efficiency, the time axis is coarse-grained
by default to 50 time points, which explains in part the difference with the CWT scalogram.
If you need a custom axis, it (and other method-specific parameters) can also be passed
via the `settings` dictionary:
.. ipython:: python
:okwarning:
:okexcept:
tau = np.linspace(np.min(ts.time), np.max(ts.time), 60)
scal4 = ts.wavelet(method='wwz', settings={'tau':tau})
@savefig scal_tau.png
fig, ax = scal4.plot(title='WWZ scalogram with finer time axis')
pyleo.closefig(fig)
'''
if not verbose:
warnings.simplefilter('ignore')
# Assign method
if method == 'cwt' and not(self.is_evenly_spaced()):
raise ValueError("The chosen method is cwt but the series is unevenly spaced. You can either interpolate/bin or set method='wwz'.")
wave_func = {'wwz': waveutils.wwz,
'cwt': waveutils.cwt
}
# Process options
settings = {} if settings is None else settings.copy()
freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy()
freq = specutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs)
args = {}
args['wwz'] = {'freq': freq}
args['cwt'] = {'freq': freq}
if method == 'wwz':
if 'ntau' in settings.keys():
ntau = settings['ntau']
else:
ntau = np.min([np.size(self.time), 50])
tau = np.linspace(np.min(self.time), np.max(self.time), ntau)
settings.update({'tau': tau})
args[method].update(settings)
# Apply wavelet method
wave_res = wave_func[method](self.value, self.time, **args[method])
# Export result
if method == 'wwz':
wwz_Neffs = wave_res.Neffs
elif method=='cwt':
wwz_Neffs = None
args[method].update({'scale':wave_res.scale,'mother':wave_res.mother,'param':wave_res.param,
'standardize':wave_res.standardize, 'gaussianize':wave_res.gaussianize})
scal = Scalogram(
frequency=wave_res.freq,
scale = wave_res.scale,
time=wave_res.time,
amplitude=wave_res.amplitude,
coi=wave_res.coi,
label=self.label,
timeseries=self,
wave_method=method,
freq_method=freq_method,
freq_kwargs=freq_kwargs,
wave_args=args[method],
wwz_Neffs=wwz_Neffs,
)
return scal
def wavelet_coherence(self, target_series, method='cwt', settings=None,
freq_method='log', freq_kwargs=None, verbose=False,
common_time_kwargs=None):
''' Performs wavelet coherence analysis with the target timeseries
Parameters
----------
target_series : Series
A pyleoclim Series object on which to perform the coherence analysis
method : str
Possible methods {'wwz','cwt'}. Default is 'cwt', which only works
if the series share the same evenly-spaced time axis.
'wwz' is designed for unevenly-spaced data, but is far slower.
freq_method : str
{'log','scale', 'nfft', 'lomb_scargle', 'welch'}
freq_kwargs : dict
Arguments for frequency vector
common_time_kwargs : dict
Parameters for the method `MultipleSeries.common_time()`. Will use interpolation by default.
settings : dict
Arguments for the specific wavelet method (e.g. decay constant for WWZ, mother wavelet for CWT)
and common properties like standardize, detrend, gaussianize, pad, etc.
verbose : bool
If True, will print warning messages, if any
Returns
-------
coh : pyleoclim.core.coherence.Coherence
References
----------
Grinsted, A., Moore, J. C. & Jevrejeva, S. Application of the cross wavelet transform and
wavelet coherence to geophysical time series. Nonlin. Processes Geophys. 11, 561–566 (2004).
See also
--------
pyleoclim.utils.spectral.make_freq_vector : Functions to create the frequency vector
pyleoclim.utils.tsutils.detrend : Detrending function
pyleoclim.core.multipleseries.MultipleSeries.common_time : put timeseries on common time axis
pyleoclim.core.series.Series.wavelet : wavelet analysis
pyleoclim.utils.wavelet.wwz_coherence : coherence using the wwz method
pyleoclim.utils.wavelet.cwt_coherence : coherence using the cwt method
Examples
--------
Calculate the wavelet coherence of NINO3 and All India Rainfall with default arguments:
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino_even.csv')
time = data['t'].values
air = data['air'].values
nino = data['nino'].values
ts_air = pyleo.Series(time=time, value=data['air'].values, time_name='Year (CE)',
label='All India Rainfall', value_name='AIR (mm/month)')
ts_nino = pyleo.Series(time=time, value=data['nino'].values, time_name='Year (CE)',
label='NINO3', value_name='NINO3 (K)')
coh = ts_air.wavelet_coherence(ts_nino)
@savefig coh.png
coh.plot()
Note that in this example both timeseries area already on a common,
evenly-spaced time axis. If they are not (either because the data are unevenly spaced,
or because the time axes are different in some other way), an error will be raised.
To circumvent this error, you can either put the series
on a common time axis (e.g. using common_time()) prior to applying CWT, or you
can use the Weighted Wavelet Z-transform (WWZ) instead, as it is designed for
unevenly-spaced data. However, it is usually far slower:
.. ipython:: python
:okwarning:
:okexcept:
coh_wwz = ts_air.wavelet_coherence(ts_nino, method = 'wwz')
@savefig coh_wwz.png
coh_wwz.plot()
As with wavelet analysis, both CWT and WWZ admit optional arguments through `settings`.
Significance is assessed similarly as with PSD or Scalogram objects:
.. ipython:: python
:okwarning:
:okexcept:
cwt_sig = coh.signif_test(number=20, qs=[.9,.95]) # specifiying 2 significance thresholds does not take any more time.
@savefig cwt_sig.png
# by default, the plot function will look for the closest quantile to 0.95, but it is easy to adjust:
cwt_sig.plot(signif_thresh = 0.9)
Another plotting option, `dashboard`, allows to visualize both
timeseries as well as the wavelet transform coherency (WTC), which quantifies where
two timeseries exhibit similar behavior in time-frequency space, and the cross-wavelet
transform (XWT), which indicates regions of high common power.
.. ipython:: python
:okwarning:
:okexcept:
@savefig cwt_sig_dash.png
cwt_sig.dashboard()
Note: this design balances many considerations, and is not easily customizable.
'''
if not verbose:
warnings.simplefilter('ignore')
settings = {} if settings is None else settings.copy()
wtc_func = {
'wwz': waveutils.wwz_coherence,
'cwt': waveutils.cwt_coherence
}
# Process options
settings = {} if settings is None else settings.copy()
freq_kwargs = {} if freq_kwargs is None else freq_kwargs.copy()
freq = specutils.make_freq_vector(self.time, method=freq_method, **freq_kwargs)
args = {}
args['wwz'] = {'freq': freq}
args['cwt'] = {'freq': freq}
# put on same time axes if necessary
if method == 'cwt' and not np.array_equal(self.time, target_series.time):
warnings.warn("Series have different time axes. Applying common_time().")
ms = MultipleSeries([self, target_series])
common_time_kwargs = {} if common_time_kwargs is None else common_time_kwargs.copy()
ct_args = {'method': 'interp'}
ct_args.update(common_time_kwargs)
ms = ms.common_time(**ct_args)
ts1 = ms.series_list[0]
ts2 = ms.series_list[1]
elif method == 'cwt' and (not self.is_evenly_spaced() or not target_series.is_evenly_spaced()):
raise ValueError("The chosen method is cwt but at least one the series is unevenly spaced. You can either apply common_time() or use 'wwz'.")
else:
ts1 = self
ts2 = target_series
if method == 'wwz':
if 'ntau' in settings.keys():
ntau = settings['ntau']
else:
ntau = np.min([np.size(ts1.time), np.size(ts2.time), 50])
tau = np.linspace(np.min(self.time), np.max(self.time), ntau)
settings.update({'tau': tau})
args[method].update(settings)
# Apply WTC method
wtc_res = wtc_func[method](ts1.value, ts1.time, ts2.value, ts2.time, **args[method])
# Export result
coh = Coherence(
frequency=wtc_res.freq,
scale = wtc_res.scale,
time=wtc_res.time,
wtc=wtc_res.xw_coherence,
xwt=wtc_res.xw_amplitude,
phase=wtc_res.xw_phase,
coi=wtc_res.coi,
timeseries1= ts1,
timeseries2= ts2,
wave_method = method,
wave_args = args[method],
freq_method=freq_method,
freq_kwargs=freq_kwargs,
)
return coh
def correlation(self, target_series, timespan=None, alpha=0.05, settings=None, common_time_kwargs=None, seed=None):
''' Estimates the Pearson's correlation and associated significance between two non IID time series
The significance of the correlation is assessed using one of the following methods:
1) 'ttest': T-test adjusted for effective sample size.
2) 'isopersistent': AR(1) modeling of x and y.
3) 'isospectral': phase randomization of original inputs. (default)
The T-test is a parametric test, hence computationally cheap, but can only be performed in ideal circumstances.
The others are non-parametric, but their computational requirements scale with the number of simulations.
The choise of significance test and associated number of Monte-Carlo simulations are passed through the settings parameter.
Parameters
----------
target_series : Series
A pyleoclim Series object
timespan : tuple
The time interval over which to perform the calculation
alpha : float
The significance level (default: 0.05)
settings : dict
Parameters for the correlation function, including:
nsim : int
the number of simulations (default: 1000)
method : str, {'ttest','isopersistent','isospectral' (default)}
method for significance testing
common_time_kwargs : dict
Parameters for the method `MultipleSeries.common_time()`. Will use interpolation by default.
seed : float or int
random seed for isopersistent and isospectral methods
Returns
-------
corr : pyleoclim.Corr
the result object, containing
- r : float
correlation coefficient
- p : float
the p-value
- signif : bool
true if significant; false otherwise
Note that signif = True if and only if p <= alpha.
- alpha : float
the significance level
See also
--------
pyleoclim.utils.correlation.corr_sig : Correlation function
pyleoclim.multipleseries.common_time : Aligning time axes
Examples
--------
Correlation between the Nino3.4 index and the Deasonalized All Indian Rainfall Index
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino.csv')
t = data.iloc[:, 0]
air = data.iloc[:, 1]
nino = data.iloc[:, 2]
ts_nino = pyleo.Series(time=t, value=nino)
ts_air = pyleo.Series(time=t, value=air)
# with `nsim=20` and default `method='isospectral'`
# set an arbitrary random seed to fix the result
corr_res = ts_nino.correlation(ts_air, settings={'nsim': 20}, seed=2333)
print(corr_res)
# using a simple t-test
# set an arbitrary random seed to fix the result
corr_res = ts_nino.correlation(ts_air, settings={'method': 'ttest'})
print(corr_res)
# using the method "isopersistent"
# set an arbitrary random seed to fix the result
corr_res = ts_nino.correlation(ts_air, settings={'nsim': 20, 'method': 'isopersistent'}, seed=2333)
print(corr_res)
'''
settings = {} if settings is None else settings.copy()
corr_args = {'alpha': alpha}
corr_args.update(settings)
ms = MultipleSeries([self, target_series])
if list(self.time) != list(target_series.time):
common_time_kwargs = {} if common_time_kwargs is None else common_time_kwargs.copy()
ct_args = {'method': 'interp'}
ct_args.update(common_time_kwargs)
ms = ms.common_time(**ct_args)
if timespan is None:
value1 = ms.series_list[0].value
value2 = ms.series_list[1].value
else:
value1 = ms.series_list[0].slice(timespan).value
value2 = ms.series_list[1].slice(timespan).value
if seed is not None:
np.random.seed(seed)
corr_res = corrutils.corr_sig(value1, value2, **corr_args)
signif = True if corr_res['signif'] == 1 else False
corr = Corr(corr_res['r'], corr_res['p'], signif, alpha)
return corr
def causality(self, target_series, method='liang', timespan=None, settings=None, common_time_kwargs=None):
''' Perform causality analysis with the target timeseries. Specifically, whether there is information in the target series that influenced the original series.
If the two series have different time axes, they are first placed on a common timescale (in ascending order).
Parameters
----------
target_series : Series
A pyleoclim Series object on which to compute causality
method : {'liang', 'granger'}
The causality method to use.
timespan : tuple
The time interval over which to perform the calculation
settings : dict
Parameters associated with the causality methods. Note that each method has different parameters. See individual methods for details
common_time_kwargs : dict
Parameters for the method `MultipleSeries.common_time()`. Will use interpolation by default.
Returns
-------
res : dict
Dictionary containing the results of the the causality analysis. See indivudal methods for details
See also
--------
pyleoclim.utils.causality.liang_causality : Liang causality
pyleoclim.utils.causality.granger_causality : Granger causality
Examples
--------
Liang causality
.. ipython:: python
:okwarning:
:okexcept:
import pyleoclim as pyleo
import pandas as pd
data=pd.read_csv('https://raw.githubusercontent.com/LinkedEarth/Pyleoclim_util/Development/example_data/wtc_test_data_nino.csv')
t=data.iloc[:,0]
air=data.iloc[:,1]
nino=data.iloc[:,2]
ts_nino=pyleo.Series(time=t,value=nino)
ts_air=pyleo.Series(time=t,value=air)
@savefig ts_nino.png
fig, ax = ts_nino.plot(title='NINO3 -- SST Anomalies')
pyleo.closefig(fig)
@savefig ts_air.png
fig, ax = ts_air.plot(title='Deasonalized All Indian Rainfall Index')
pyleo.closefig(fig)
We use the specific params below to lighten computations; you may drop `settings` for real work
.. ipython:: python
:okwarning:
:okexcept:
liang_N2A = ts_air.causality(ts_nino, settings={'nsim': 20, 'signif_test': 'isopersist'})
print(liang_N2A)
liang_A2N = ts_nino.causality(ts_air, settings={'nsim': 20, 'signif_test': 'isopersist'})
print(liang_A2N)
liang_N2A['T21']/liang_A2N['T21']
Both information flows (T21) are positive, but the flow from NINO3 to AIR is about 3x as large as the other way around, suggesting that NINO3 influences AIR much more than the other way around, which conforms to physical intuition.
To implement Granger causality, simply specfiy the method:
.. ipython:: python
:okwarning:
:okexcept:
granger_A2N = ts_nino.causality(ts_air, method='granger')
granger_N2A = ts_air.causality(ts_nino, method='granger')
Note that the output is fundamentally different for the two methods. Granger causality cannot discriminate between NINO3 -> AIR or AIR -> NINO3, in this case. This is not unusual, and one reason why it is no longer in wide use.
'''
# Put on common axis if necessary
ms = MultipleSeries([self, target_series])
if list(self.time) != list(target_series.time):
common_time_kwargs = {} if common_time_kwargs is None else common_time_kwargs.copy()
ct_args = {'method': 'interp'}
ct_args.update(common_time_kwargs)
ms = ms.common_time(**ct_args)
if timespan is None:
value1 = ms.series_list[0].value
value2 = ms.series_list[1].value
else:
value1 = ms.series_list[0].slice(timespan).value
value2 = ms.series_list[1].slice(timespan).value
settings = {} if settings is None else settings.copy()
spec_func={
'liang':causalutils.liang_causality,
'granger':causalutils.granger_causality}
args = {}
args['liang'] = {}
args['granger'] = {}
args[method].update(settings)
causal_res = spec_func[method](value1, value2, **args[method])
return causal_res
def surrogates(self, method='ar1sim', number=1, length=None, seed=None, settings=None):
''' Generate surrogates with increasing time axis
Parameters
----------
method : {ar1sim}
Uses an AR1 model to generate surrogates of the timeseries
number : int
The number of surrogates to generate
length : int
Length of the series
seed : int
Control seed option for reproducibility
settings : dict
Parameters for surogate generator. See individual methods for details.
Returns
-------
surr : SurrogateSeries
See also
--------
pyleoclim.utils.tsmodel.ar1_sim : AR(1) simulator
'''
settings = {} if settings is None else settings.copy()
surrogate_func = {
'ar1sim': tsmodel.ar1_sim,
}
args = {}
args['ar1sim'] = {'t': self.time}
args[method].update(settings)
if seed is not None:
np.random.seed(seed)
surr_res = surrogate_func[method](self.value, number, **args[method])
if len(np.shape(surr_res)) == 1:
surr_res = surr_res[:, np.newaxis]
s_list = []
for s in surr_res.T:
s_tmp = Series(time=self.time, value=s, time_name=self.time_name, time_unit=self.time_unit, value_name=self.value_name, value_unit=self.value_unit)
s_list.append(s_tmp)
surr = SurrogateSeries(series_list=s_list, surrogate_method=method, surrogate_args=args[method])
return surr
def outliers(self,method='kmeans',remove=True, settings=None,
fig_outliers=True, figsize_outliers=[10,4], plotoutliers_kwargs=None, savefigoutliers_settings=None,
fig_clusters=True,figsize_clusters=[10,4], plotclusters_kwargs=None,savefigclusters_settings=None, keep_log=False):
"""
Remove outliers from timeseries data
Parameters
----------
method : str, {'kmeans','DBSCAN'}, optional
The clustering method to use. The default is 'kmeans'.
remove : bool, optional
If True, removes the outliers. The default is True.
settings : dict, optional
Specific arguments for the clustering functions. The default is None.
fig_outliers : bool, optional
Whether to display the timeseries showing the outliers. The default is True.
figsize_outliers : list, optional
The dimensions of the outliers figure. The default is [10,4].
plotoutliers_kwargs : dict, optional
Arguments for the plot displaying the outliers. The default is None.
savefigoutliers_settings : dict, optional
Saving options for the outlier plot. The default is None.
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
fig_clusters : bool, optional
Whether to display the clusters. The default is True.
figsize_clusters : list, optional
The dimensions of the cluster figures. The default is [10,4].
plotclusters_kwargs : dict, optional
Arguments for the cluster plot. The default is None.
savefigclusters_settings : dict, optional
Saving options for the cluster plot. The default is None.
- "path" must be specified; it can be any existed or non-existed path,
with or without a suffix; if the suffix is not given in "path", it will follow "format"
- "format" can be one of {"pdf", "eps", "png", "ps"}
keep_log : Boolean
if True, adds the previous method parameters to the series log.
Returns
-------
ts: Series
A new Series object witthout outliers if remove is True. Otherwise, returns the original timeseries
See also
--------
pyleoclim.utils.tsutils.detect_outliers_DBSCAN : Outlier detection using the DBSCAN method
pyleoclim.utils.tsutils.detect_outliers_kmeans : Outlier detection using the kmeans method
pyleoclim.utils.tsutils.remove_outliers : Remove outliers from the series
"""
if method not in ['kmeans','DBSCAN']:
raise ValueError('method should either be "kmeans" or "DBSCAN"')
# run the algorithm
settings = {} if settings is None else settings.copy()
spec_func={
'kmeans':tsutils.detect_outliers_kmeans,
'DBSCAN':tsutils.detect_outliers_DBSCAN}
args = {}
args['kmeans'] = {}
args['DBSCAN'] = {}
args[method].update(settings)
indices, res = spec_func[method](self.value,**args[method])
# Create the new Series object
new=self.copy()
if remove==True:
if len(indices)>=1:
ys,ts=tsutils.remove_outliers(self.time,self.value,indices)
new.value=ys
new.time=ts
# Figures
# Optional parameters
savefigoutliers_settings = {} if savefigoutliers_settings is None else savefigoutliers_settings.copy()
savefigclusters_settings = {} if savefigclusters_settings is None else savefigclusters_settings.copy()
plotoutliers_kwargs = {} if plotoutliers_kwargs is None else plotoutliers_kwargs.copy()
plotclusters_kwargs = {} if plotclusters_kwargs is None else plotclusters_kwargs.copy()
# Figure showing the outliers
if fig_outliers == True:
fig,ax = plt.subplots(figsize=figsize_outliers)
time_label, value_label = self.make_labels()
if 'xlabel' not in plotoutliers_kwargs.keys():
xlabel = time_label
else:
xlabel = plotoutliers_kwargs['xlabel']
plotoutliers_kwargs.pop('xlabel')
if 'ylabel' not in plotoutliers_kwargs.keys():
ylabel = value_label
else:
ylabel = plotoutliers_kwargs['ylabel']
plotoutliers_kwargs.pop('ylabel')
if 'title' not in plotoutliers_kwargs.keys():
title = None
else:
title = plotoutliers_kwargs['title']
plotoutliers_kwargs.pop('title')
if 'xlim' not in plotoutliers_kwargs.keys():
xlim = None
else:
xlim = plotoutliers_kwargs['xlim']
plotoutliers_kwargs.pop('xlim')
if 'ylim' not in plotoutliers_kwargs.keys():
ylim = None
else:
ylim = plotoutliers_kwargs['ylim']
plotoutliers_kwargs.pop('ylim')
if 'legend' not in plotoutliers_kwargs.keys():
legend = True
else:
legend = plotoutliers_kwargs['legend']
plotoutliers_kwargs.pop('legend')
if len(indices)>=1:
plotting.plot_scatter_xy(self.time,self.value,self.time[indices],self.value[indices],
xlabel=xlabel,ylabel=ylabel,
title = title, xlim=xlim, ylim=ylim, legend=legend,
plot_kwargs=plotoutliers_kwargs,ax=ax)
else:
plotting.plot_xy(self.time,self.value,
xlabel=xlabel,ylabel=ylabel,
title = title, xlim=xlim, ylim=ylim, legend=legend,
plot_kwargs=plotoutliers_kwargs,ax=ax)
#Saving options
if 'path' in savefigoutliers_settings:
plotting.savefig(fig,settings=savefigoutliers_settings)
if fig_clusters == True:
fig,ax = plt.subplots(figsize=figsize_clusters)
# dealt with plot options
time_label, value_label = self.make_labels()
if 'xlabel' not in plotclusters_kwargs.keys():
xlabel = time_label
else:
xlabel = plotclusters_kwargs['xlabel']
plotclusters_kwargs.pop('xlabel')
if 'ylabel' not in plotclusters_kwargs.keys():
ylabel = value_label
else:
ylabel = plotclusters_kwargs['ylabel']
plotclusters_kwargs.pop('ylabel')
if 'title' not in plotclusters_kwargs.keys():
title = None
else:
title = plotclusters_kwargs['title']
plotclusters_kwargs.pop('title')
if 'xlim' not in plotclusters_kwargs.keys():
xlim = None
else:
xlim = plotclusters_kwargs['xlim']
plotclusters_kwargs.pop('xlim')
if 'ylim' not in plotclusters_kwargs.keys():
ylim = None
else:
ylim = plotclusters_kwargs['ylim']
plotclusters_kwargs.pop('ylim')
if 'legend' not in plotclusters_kwargs.keys():
legend = True
else:
legend = plotclusters_kwargs['legend']
plotclusters_kwargs.pop('legend')
clusters = np.array(res.loc[res['silhouette score']==np.max(res['silhouette score'])]['clusters'])[0]
if 'c' not in plotclusters_kwargs.keys():
color_list = list(mcolors.CSS4_COLORS.keys())
color_list.remove('red')
random.Random(9).shuffle(color_list)
colors = color_list[0:len(np.unique(clusters))]
vectorizer = np.vectorize(lambda x: colors[x % len(colors)])
c = vectorizer(clusters)
else:
c = plotclusters_kwargs['c']
plotclusters_kwargs.pop('c')
plotting.scatter_xy(self.time,self.value,c = c, xlabel=xlabel,ylabel=ylabel,
title = title, xlim=xlim, ylim=ylim, legend=legend,
plot_kwargs = plotclusters_kwargs, ax=ax)
#plot
if np.size(indices) != 0:
plotting.scatter_xy(self.time[indices],self.value[indices],c='red',ax=ax)
if 'path' in savefigclusters_settings:
plotting.savefig(fig,settings=savefigclusters_settings)
#return the log if asked
if keep_log == True:
if method == 'kmeans':
new.log += ({len(new.log): 'outliers','method': method,
'args': settings,
'nbr_clusters':np.array(res['number of clusters']),
'silhouette_score':np.array(res['silhouette score']),
'outlier_indices':np.array(res['outlier indices']),
'clusters':np.array(res['clusters'])},)
elif method == 'DBSCAN':
new.log += ({len(new.log): 'outliers','method': method,
'args': settings,
'eps':np.array(res['eps']),
'min_samples':np.array(res['min_samples']),
'nbr_clusters':np.array(res['number of clusters']),
'silhouette_score':np.array(res['silhouette score']),
'outlier_indices':np.array(res['outlier indices']),
'clusters':np.array(res['clusters'])},)
return new
def interp(self, method='linear', keep_log= False, **kwargs):
'''Interpolate a Series object onto a new time axis
Parameters
----------
method : {‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’}
where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’.
keep_log : Boolean
if True, adds the method name and its parameters to the series log.
kwargs :
Arguments specific to each interpolation function. See pyleoclim.utils.tsutils.interp for details
Returns
-------
new : Series
An interpolated Series object
See also
--------
pyleoclim.utils.tsutils.interp : interpolation function
'''
new = self.copy()
ti, vi = tsutils.interp(self.time,self.value,interp_type=method,**kwargs)
new.time = ti
new.value = vi
if keep_log == True:
new.log += ({len(new.log):'interp', 'method': method, 'args': kwargs},)
return new
def gkernel(self, step_type='median', keep_log = False, **kwargs):
''' Coarse-grain a Series object via a Gaussian kernel.
Like .bin() this technique is conservative and uses the max space between points
as the default spacing. Unlike .bin(), gkernel() uses a gaussian kernel to
calculate the weighted average of the time series over these intervals.
Parameters
----------
step_type : str
type of timestep: 'mean', 'median', or 'max' of the time increments
keep_log : Boolean
if True, adds the step type and its keyword arguments to the series log.
kwargs :
Arguments for kernel function. See pyleoclim.utils.tsutils.gkernel for details
Returns
-------
new : Series
The coarse-grained Series object
See also
--------
pyleoclim.utils.tsutils.gkernel : application of a Gaussian kernel
'''
new=self.copy()
ti, vi = tsutils.gkernel(self.time, self.value, **kwargs) # apply kernel
new.time = ti
new.value = vi
if keep_log == True:
new.log += ({len(new.log):'gkernel', 'step_type': step_type, 'args': kwargs},)
return new
def bin(self, keep_log = False, **kwargs):
'''Bin values in a time series
Parameters
----------
keep_log : Boolean
if True, adds this step and its parameters to the series log.
kwargs :
Arguments for binning function. See pyleoclim.utils.tsutils.bin for details
Returns
-------
new : Series
An binned Series object
See also
--------
pyleoclim.utils.tsutils.bin : bin the series values into evenly-spaced time bins
'''
new=self.copy()
res_dict = tsutils.bin(self.time,self.value,**kwargs)
new.time = res_dict['bins']
new.value = res_dict['binned_values']
if keep_log == True:
new.log += ({len(new.log):'bin', 'args': kwargs},)
return new
| [] |
2024-01-10 | fjfrankjack/localGPT | run_localGPT.py | import logging
import click
import torch
from auto_gptq import AutoGPTQForCausalLM
from huggingface_hub import hf_hub_download
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline, LlamaCpp
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY
def load_model(device_type, model_id, model_basename=None):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if device_type.lower() in ["cpu", "mps"]:
logging.info("Using Llamacpp for quantized models")
model_path = hf_hub_download(repo_id=model_id, filename=model_basename)
if device_type.lower() == "mps":
return LlamaCpp(
model_path=model_path,
n_ctx=2048,
max_tokens=2048,
temperature=0,
repeat_penalty=1.15,
n_gpu_layers=1000,
)
return LlamaCpp(model_path=model_path, n_ctx=2048, max_tokens=2048, temperature=0, repeat_penalty=1.15)
else:
# The code supports all huggingface models that ends with GPTQ and have some variation
# of .no-act.order or .safetensors in their HF repo.
logging.info("Using AutoGPTQForCausalLM for quantized models")
if ".safetensors" in model_basename:
# Remove the ".safetensors" ending if present
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
logging.info("Tokenizer loaded")
model = AutoGPTQForCausalLM.from_quantized(
model_id,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
use_triton=False,
quantize_config=None,
)
elif (
device_type.lower() == "cuda"
): # The code supports all huggingface models that ends with -HF or which have a .bin
# file in their HF repo.
logging.info("Using AutoModelForCausalLM for full models")
tokenizer = AutoTokenizer.from_pretrained(model_id)
logging.info("Tokenizer loaded")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
# max_memory={0: "15GB"} # Uncomment this line with you encounter CUDA out of memory errors
)
model.tie_weights()
else:
logging.info("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=2048,
temperature=0,
top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
def main(device_type, show_sources):
"""
This function implements the information retrieval task.
1. Loads an embedding model, can be HuggingFaceInstructEmbeddings or HuggingFaceEmbeddings
2. Loads the existing vectorestore that was created by inget.py
3. Loads the local LLM using load_model function - You can now set different LLMs.
4. Setup the Question Answer retreival chain.
5. Question answers.
"""
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
# load the vectorstore
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS,
)
retriever = db.as_retriever()
# load the LLM for generating Natural Language responses
# for HF models
# model_id = "TheBloke/vicuna-7B-1.1-HF"
# model_basename = None
# model_id = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF"
# model_id = "TheBloke/guanaco-7B-HF"
# model_id = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM. Using STransformers
# alongside will 100% create OOM on 24GB cards.
# llm = load_model(device_type, model_id=model_id)
# for GPTQ (quantized) models
# model_id = "TheBloke/Nous-Hermes-13B-GPTQ"
# model_basename = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order"
# model_id = "TheBloke/WizardLM-30B-Uncensored-GPTQ"
# model_basename = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors" # Requires
# ~21GB VRAM. Using STransformers alongside can potentially create OOM on 24GB cards.
# model_id = "TheBloke/wizardLM-7B-GPTQ"
# model_basename = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
# model_id = "TheBloke/WizardLM-7B-uncensored-GPTQ"
# model_basename = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
# for GGML (quantized cpu+gpu+mps) models - check if they support llama.cpp
# model_id = "TheBloke/wizard-vicuna-13B-GGML"
# model_basename = "wizard-vicuna-13B.ggmlv3.q4_0.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q6_K.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q2_K.bin"
# model_id = "TheBloke/orca_mini_3B-GGML"
# model_basename = "orca-mini-3b.ggmlv3.q4_0.bin"
model_id="TheBloke/Llama-2-7B-Chat-GGML"
model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
llm = load_model(device_type, model_id=model_id, model_basename=model_basename)
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
| [] |
2024-01-10 | Blaqadonis/broda-man | src~gradio_web_service_interface.py | import gradio as gr
import openai
import os
# Set your OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Import the fine-tuning model ID from the environment
model_id = os.getenv("BRODAMAN_FINETUNE_MODEL_ID", "ft:gpt-3.5-turbo-0613:personal:broda-man:8J4pz8Md")
# Function to generate completions
def generate_completion(location, destination):
"""Generates a completion using the fine-tuned model."""
# Define the system prompt for your fine-tuned model
system_prompt = """You are Broda-man, the Lagos state traffic bot. You assist users who want to beat traffic in Lagos at all costs, by providing them with routes with less traffic when they provide you with their location and destination details. You respond strictly and only in Nigerian pidgin language. You are often cheerful too."""
# Construct messages
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"Location: {location}\nDestination: {destination}"},
]
# Generate a completion using your fine-tuned model
response = openai.ChatCompletion.create(
model=model_id,
messages=messages,
max_tokens=100,
temperature=0.7,
)
# Strip the response of whitespace
return response["choices"][0]["message"]["content"].strip()
# Create a Gradio interface
iface = gr.Interface(
fn=generate_completion,
inputs=[
gr.Textbox(lines=5, label="Location", placeholder="Country person, which side you dey?"),
gr.Textbox(lines=5, label="Destination", placeholder="Where you dey go?"),
],
outputs="text",
title="Ask Broda-man! Your Friendly Transport Route Bot",
live=True,
)
# Launch the Gradio interface
iface.launch()
| [
"Location: PLACEHOLDER\nDestination: PLACEHOLDER",
"You are Broda-man, the Lagos state traffic bot. You assist users who want to beat traffic in Lagos at all costs, by providing them with routes with less traffic when they provide you with their location and destination details. You respond strictly and only in Nigerian pidgin language. You are often cheerful too."
] |
2024-01-10 | Blaqadonis/broda-man | dev~gen_larger_brodaman_dataset.py | import os
import json
import openai
import time
from random import choice
import wandb
import getpass
# Set up Weights & Biases API key and login
wandb.login(key=os.getenv("WANDB_API_KEY"))
# Set up wandb project and initialize a run
wandb.init(project="broda-man-finetuning")
# OpenAI API key
if os.getenv("OPENAI_API_KEY") is None:
if any(['VSCODE' in x for x in os.environ.keys()]):
print('Please enter password in the VS Code prompt at the top of your VS Code window!')
os.environ["OPENAI_API_KEY"] = getpass.getpass("Paste your OpenAI Key from: https://platform.openai.com/account/api-keys\n")
openai.api_key = os.getenv("OPENAI_API_KEY", "")
assert os.getenv("OPENAI_API_KEY", "").startswith("sk-"), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
# model name
MODEL_NAME = "gpt-3.5-turbo"
# System prompt
system_prompt = """You are Broda-man, the Lagos state traffic bot. I assist users who want to beat traffic in Lagos at all costs, by providing them with routes with less traffic when they provide me with their location and destination details. I respond strictly and only in Nigerian pidgin language. I am often cheerful too."""
# initial examples with guided responses
initial_examples = [
{
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Location: Lekki. Destination: Ojo."},
{"role": "assistant", "content": "Country person, if you wan reach Ojo from Lekki quick-quick, burst enter Lekki-Epe Expressway put head for left (westward), then move enter Ozumba Mbadiwe Avenue. Follow signboard straight, see you see Third Mainland Bridge. As you reach Third Mainland so, just dey go, one way to Ojo. You don swerve better better traffic be dat!"}
]
},
{
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Location: Ikeja. Destination: Yaba."},
{"role": "assistant", "content": "The sharpest route from Ikeja to Yaba now now, face towards Mobolaji Bank Anthony Way. You dey hear me? Turn right gbaga Ikorodu Road! Down down to Yaba. If you dey cook beans you go reach before your beans done."}
]
},
{
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Location: Epe. Destination: Lekki Phase 1."},
{"role": "assistant", "content": "To Lekki Phase 1 from Epe? Oya na, one way movement on Lekki-Epe Expressway, then push enter right fall inside Admiralty Way ichom!. Lekki Phase 1 dey look you by your right hand side."}
]
},
{
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Location: Ojo Barracks. Destination: Masha."},
{"role": "assistant", "content": "If you dey go Masha for Surulere, and you dey Ojo Barracks like dis. Hanlele! Mazamaza! Use Apapa-Oshodi Expressway. No other way about it o, country person. This kain waka ehn na early momo or for night o if e sure for you. The traffic no be here."}
]
},
{
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Location: LUTH. Destination: Lawanson."},
{"role": "assistant", "content": "To reach Lawanson from LUTH, e easy! Just burst out from LUTH move down down through inside Western Avenue, you go reach Lawanson kia-kia. No go sidon for traffic o."}
]
}
]
# locations and destinations combined into a list of examples
examples = initial_examples
# Generate additional examples with system responses using randomly chosen locations and destinations
for _ in range(100):
example = choice(initial_examples)
user_messages = example["messages"][:-1] # Exclude the assistant's response
location = user_messages[1]["content"].split(":")[1].strip() # Extract location from user message
destination = user_messages[2]["content"].split(":")[1].strip() # Extract destination from user message
response = openai.ChatCompletion.create(
model=MODEL_NAME,
messages=user_messages,
max_tokens=150,
stop=None
)
generated_text = response['choices'][0]['message']['content']
system_messages = [
{"role": "assistant", "content": generated_text}
]
conversation = user_messages + system_messages
updated_example = {
"messages": conversation
}
examples.append(updated_example)
# Log examples
wandb.log({"examples": examples})
# Save the examples to a JSONL file with one example per line
with open("broda_man_dataset.jsonl", "w") as jsonl_file:
for example in examples:
jsonl_file.write(json.dumps(example) + '\n')
| [
"You are Broda-man, the Lagos state traffic bot. I assist users who want to beat traffic in Lagos at all costs, by providing them with routes with less traffic when they provide me with their location and destination details. I respond strictly and only in Nigerian pidgin language. I am often cheerful too.",
"Location: Lekki. Destination: Ojo.",
"Location: Epe. Destination: Lekki Phase 1.",
"If you dey go Masha for Surulere, and you dey Ojo Barracks like dis. Hanlele! Mazamaza! Use Apapa-Oshodi Expressway. No other way about it o, country person. This kain waka ehn na early momo or for night o if e sure for you. The traffic no be here.",
"Location: LUTH. Destination: Lawanson.",
"Location: Ojo Barracks. Destination: Masha.",
"The sharpest route from Ikeja to Yaba now now, face towards Mobolaji Bank Anthony Way. You dey hear me? Turn right gbaga Ikorodu Road! Down down to Yaba. If you dey cook beans you go reach before your beans done.",
"Location: Ikeja. Destination: Yaba.",
"To Lekki Phase 1 from Epe? Oya na, one way movement on Lekki-Epe Expressway, then push enter right fall inside Admiralty Way ichom!. Lekki Phase 1 dey look you by your right hand side.",
"To reach Lawanson from LUTH, e easy! Just burst out from LUTH move down down through inside Western Avenue, you go reach Lawanson kia-kia. No go sidon for traffic o.",
"Country person, if you wan reach Ojo from Lekki quick-quick, burst enter Lekki-Epe Expressway put head for left (westward), then move enter Ozumba Mbadiwe Avenue. Follow signboard straight, see you see Third Mainland Bridge. As you reach Third Mainland so, just dey go, one way to Ojo. You don swerve better better traffic be dat!"
] |
2024-01-10 | Blaqadonis/broda-man | src~job_status.py | import openai
import os
import getpass
# OpenAI API key
if os.getenv("OPENAI_API_KEY") is None:
if any(['VSCODE' in x for x in os.environ.keys()]):
print('Please enter password in the VS Code prompt at the top of your VS Code window!')
os.environ["OPENAI_API_KEY"] = getpass.getpass("Paste your OpenAI Key from: https://platform.openai.com/account/api-keys\n")
openai.api_key = os.getenv("OPENAI_API_KEY", "")
assert os.getenv("OPENAI_API_KEY", "").startswith("sk-"), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
# Import the fine-tuning job ID from the environment
job_id = os.environ["BRODAMAN_FINETUNE_JOB_ID"]
# Check the status of the fine-tuning job
job = openai.FineTuningJob.retrieve(id=job_id)
# Print the status
print("Job Status:", job.status)
print("Model ID:", job.model)
| [] |
2024-01-10 | Blaqadonis/broda-man | data~gen_eval_dataset.py | import json
import os
import wandb
import openai
import getpass
# OpenAI API key
if os.getenv("OPENAI_API_KEY") is None:
if any(['VSCODE' in x for x in os.environ.keys()]):
print('Please enter password in the VS Code prompt at the top of your VS Code window!')
os.environ["OPENAI_API_KEY"] = getpass.getpass("Paste your OpenAI Key from: https://platform.openai.com/account/api-keys\n")
openai.api_key = os.getenv("OPENAI_API_KEY", "")
assert os.getenv("OPENAI_API_KEY", "").startswith("sk-"), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
# Set up WandB project and run
wandb.init(project="evaluation-dataset")
# locations and destinations for evaluation
evaluation_data = [
{"location": "Ikeja", "destination": "National Stadium"},
{"location": "Lekki", "destination": "Ojo"},
{"location": "Masha", "destination": "Doyin"},
{"location": "Surulere", "destination": "Badagry"},
{"location": "Ikeja", "destination": "National Stadium"},
{"location": "Lekki", "destination": "Ojo"},
{"location": "Masha", "destination": "Doyin"},
{"location": "Berger", "destination": "Festac"},
{"location": "Computer Village", "destination": "Maryland"},
{"location": "Iponri", "destination": "Aguda"},
{"location": "Mile-2", "destination": "Iyana-Ipaja"},
{"location": "Iyana Ipaja", "destination": "Costain"},
{"location": "Epe", "destination": "Ebute-Meta"},
{"location": "Costain", "destination": "LUTH"},
{"location": "Idi-Araba", "destination": "Doyin"},
{"location": "Admiralty Road, Lekki Phase 1", "destination": "Lekki, Phase 2"},
{"location": "Ikeja", "destination": "Yaba"},
{"location": "Ojo Barracks", "destination": "Trade-Fair"}
]
# An empty list to store evaluation examples
evaluation_examples = []
# Generate conversations for evaluation
for data in evaluation_data:
location = data["location"]
destination = data["destination"]
user_messages = [
{"role": "user", "content": f"Location: {location}."},
{"role": "user", "content": f"Destination: {destination}."}
]
# Modify the expected assistant reply for each location-destination pair
expected_reply = ""
if location == "Ikeja" and destination == "National Stadium":
expected_reply = "Na straight na. Take Mobolaji Bank Anthony Way, then turn right onto Ikorodu Road. Follow Ikorodu Road all the way to the stadium."
elif location == "Lekki" and destination == "Ojo":
expected_reply = "Take Lekki-Epe Expressway to Ozumba Mbadiwe Avenue. Then turn left onto the Third Mainland Bridge. Follow the Third Mainland Bridge all the way to Ojo."
elif location == "Masha" and destination == "Doyin":
expected_reply = "Take Apapa-Oshodi Expressway to Costain. Then turn left onto Ijora Causeway. Follow Ijora Causeway to Iganmu. Then turn right onto Doyin Street."
elif location == "Surulere" and destination == "Badagry":
expected_reply = "Take Oshodi-Apapa Expressway to Badagry Expressway. Follow Badagry Expressway all the way to Badagry."
elif location == "Berger" and destination == "Festac":
expected_reply = "Take Lagos-Abeokuta Expressway to Festac Town Exit. Then turn left onto Festac Link Road. Follow Festac Link Road to Festac Town."
elif location == "Computer Village" and destination == "Maryland":
expected_reply = "Take Ikorodu Road to Ikeja Under Bridge. Then turn left onto Mobolaji Bank Anthony Way. Follow Mobolaji Bank Anthony Way to Maryland."
elif location == "Iponri" and destination == "Aguda":
expected_reply = "Take Lagos Island Ring Road to Fatai Atere Way. Then turn right onto Igbosere Road. Follow Igbosere Road to Aguda."
elif location == "Mile-2" and destination == "Iyana-Ipaja":
expected_reply = "Take Lagos-Badagry Expressway to Iyana-Ipaja Exit. Then turn right onto Iyana-Ipaja Road. Follow Iyana-Ipaja Road to Iyana-Ipaja."
elif location == "Iyana Ipaja" and destination == "Costain":
expected_reply = "Take Iyana-Ipaja Road to Lagos-Abeokuta Expressway. Then turn left onto Oshodi-Apapa Expressway. Follow Oshodi-Apapa Expressway all the way to Costain."
evaluation_example = {
"messages": user_messages,
"expected_reply": expected_reply
}
evaluation_examples.append(evaluation_example)
# Log evaluation examples to WandB
wandb.log({"evaluation_examples": evaluation_examples})
# Save the evaluation dataset to a JSON file
with open("evaluation_dataset.jsonl", "w") as jsonl_file:
for example in evaluation_examples:
json.dump(example, jsonl_file)
jsonl_file.write("\n")
# Log the evaluation dataset to WandB
artifact = wandb.Artifact(name="evaluation_dataset", type="dataset")
artifact.add_file("evaluation_dataset.jsonl")
wandb.run.log_artifact(artifact)
print(f"Evaluation dataset saved to 'evaluation_dataset.jsonl' with {len(evaluation_examples)} examples.")
| [
"Location: PLACEHOLDER.",
"Destination: PLACEHOLDER."
] |
2024-01-10 | thohag/alpaca_llama_index | script.py | import torch
from langchain.llms.base import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, GPTSimpleVectorIndex
from peft import PeftModel
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
hf_model_path = "models/llama-7b"
alpaca_model_path = "models/lora-alpaca"
tokenizer = LlamaTokenizer.from_pretrained(hf_model_path)
model = LlamaForCausalLM.from_pretrained(
hf_model_path,
load_in_8bit=True, #Dissabling could solve some errors
device_map="auto",
)
model = PeftModel.from_pretrained(model, alpaca_model_path)
device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
max_length = 1500 #2048
max_new_tokens = 48
class LLaMALLM(LLM):
def _call(self, prompt, stop=None):
prompt += "### Response:"
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].cuda()
generation_config = GenerationConfig(
temperature=0.6,
top_p=0.95,
repetition_penalty=1.15,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=128,
)
response = ""
for s in generation_output.sequences:
response += tokenizer.decode(s)
response = response[len(prompt):]
print("Model Response:", response)
return response
def _identifying_params(self):
return {"name_of_model": "alpaca"}
def _llm_type(self):
return "custom"
max_input_size = max_length
num_output = max_new_tokens
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
documents = SimpleDirectoryReader('data').load_data()
llm_predictor = LLMPredictor(llm=LLaMALLM())
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
new_index = GPTSimpleVectorIndex.load_from_disk('index.json', embed_model=embed_model, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
response = new_index.query("What did Gatsby do before he met Daisy?")
print(response.response)
response = new_index.query("What did the narrator do after getting back to Chicago?")
print(response.response)
| [
"### Response:"
] |
2024-01-10 | meekmachine/ming-mind-langchain-backend | nebulizer.py | import os
import pandas as pd
from convokit import Corpus, download
import openai
from firebase_admin import credentials, firestore, initialize_app
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import redis
import pickle
import json
from dotenv import load_dotenv
import firebase_admin
load_dotenv()
cred = credentials.Certificate(".keys/ming-527ed-firebase-adminsdk-z38ui-27b7e06411.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
# OpenAI setup
openai.api_key = os.getenv("OPENAI_API_KEY")
# Redis setup
r = redis.Redis(host='localhost', port=6379, db=0)
def calculate_embeddings(text):
response = openai.Embedding.create(input=text, engine="text-similarity-babbage-001")
return response['data'][0]['embedding']
def load_corpus():
# Check if DataFrames are in Redis
conversations_df = r.get('conversations_df')
utterances_df = r.get('utterances_df')
speakers_df = r.get('speakers_df')
if conversations_df is None or utterances_df is None or speakers_df is None:
try:
# Download and load the corpus
corpus = Corpus(filename=download("conversations-gone-awry-corpus"))
conversations_df = corpus.get_conversations_dataframe()
utterances_df = corpus.get_utterances_dataframe()
speakers_df = corpus.get_speakers_dataframe()
# Serialize and store the DataFrames in Redis
r.set('conversations_df', pickle.dumps(conversations_df))
r.set('utterances_df', pickle.dumps(utterances_df))
r.set('speakers_df', pickle.dumps(speakers_df))
except json.JSONDecodeError:
print("Error loading corpus. Please check the corpus data.")
return None, None, None
else:
# Deserialize the DataFrames
conversations_df = pickle.loads(conversations_df)
utterances_df = pickle.loads(utterances_df)
speakers_df = pickle.loads(speakers_df)
return conversations_df, utterances_df, speakers_df
def process_conversations(conversations_df, utterances_df, speakers_df):
# Filter and merge dataframes
merged_df = pd.merge(utterances_df, speakers_df, left_on='speaker', right_index=True)
merged_df = pd.merge(merged_df, conversations_df, left_on='conversation_id', right_index=True)
# Process each conversation
for convo_id, convo_data in merged_df.groupby('conversation_id'):
# Calculate total personal attacks and average toxicity
total_attacks = convo_data['meta.comment_has_personal_attack'].sum()
avg_toxicity = convo_data['meta.toxicity'].mean()
# Get conversation start time
start_time = convo_data['timestamp'].min()
# Calculate embeddings for the conversation
convo_text = ' '.join(convo_data['text'].tolist())
embeddings = calculate_embeddings(convo_text)
# Prepare data for Firebase
convo_record = {
'convo_id': convo_id,
'total_personal_attacks': total_attacks,
'average_toxicity': avg_toxicity,
'start_time': start_time,
'embeddings': embeddings,
# Include additional metadata as needed
}
# Save to Firebase
db.collection('conversations').document(convo_id).set(convo_record)
print("Conversations processed and saved to Firebase.")
conversations_df, utterances_df, speakers_df = load_corpus()
if conversations_df is not None:
process_conversations(conversations_df.head(100), utterances_df, speakers_df)
else:
print("Failed to load corpus data.")
| [] |
2024-01-10 | meekmachine/ming-mind-langchain-backend | convo_topic_emotion_classification.py | # -*- coding: utf-8 -*-
"""convo-topic-emotion-classification.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kQhVwHHvPdpENStQfrzWh3SQlIr4Nl6W
"""
!pip install bertopic
!pip install convokit
!pip install langchain
!pip install transformers
!pip install firebase-admin
!pip install transformers
from bertopic import BERTopic
from convokit import Corpus, download
import pandas as pd
import numpy as np
from langchain.llms import HuggingFaceHub
import os
from transformers import pipeline
import firebase_admin
from firebase_admin import credentials, firestore, initialize_app, storage
from transformers import pipeline
import matplotlib.pyplot as plt
cred = credentials.Certificate("/content/ming-527ed-firebase-adminsdk-z38ui-431c72dad9.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
"""# Loading Corpus"""
corpus = Corpus(filename=download("conversations-gone-awry-corpus"))
conversations_df = corpus.get_conversations_dataframe()
utterances_df = corpus.get_utterances_dataframe()
speakers_df = corpus.get_speakers_dataframe()
utterances_df
class EmotionAnalysis:
def __init__(self):
self.classifier = pipeline(task="text-classification", model="SamLowe/roberta-base-go_emotions", top_k=None,device=0)
self.emotion_dict = {
'admiration': '😍',
'amusement': '😄',
'anger': '😡',
'annoyance': '😒',
'approval': '👍',
'caring': '❤️',
'confusion': '😕',
'curiosity': '🤔',
'desire': '😏',
'disappointment': '😞',
'disapproval': '👎',
'disgust': '🤢',
'embarrassment': '😳',
'excitement': '😃',
'fear': '😨',
'gratitude': '🙏',
'grief': '😢',
'joy': '😁',
'love': '❤️',
'nervousness': '😬',
'neutral': '😐',
'optimism': '😊',
'pride': '🏆',
'realization': '💡',
'relief': '😌',
'remorse': '😔',
'sadness': '😢',
'surprise': '😲'
}
def __call__(self, sentences):
#if sentences == "" or sentences == []:
# return
if isinstance(sentences, str):
sentences = [sentences]
for i, sentence in enumerate(sentences):
if len(sentence) > 512:
sentences[i] = sentence[-512:]
model_outputs = self.classifier(sentences)
emotion = model_outputs[0][0]['label']
if emotion == 'neutral':
emotion = model_outputs[0][1]['label']
emoji = self.emotion_dict[emotion]
return model_outputs, emotion, emoji
classifier = EmotionAnalysis()
def process_conversations(conversations_df, utterances_df, speakers_df):
# Filter and merge dataframes
merged_df = pd.merge(utterances_df, speakers_df, left_on='speaker', right_index=True)
merged_df = pd.merge(merged_df, conversations_df, left_on='conversation_id', right_index=True)
# Process each conversation
convo_records = []
convo_texts = []
convo_speakers = {}
convo_speakers_texts = {}
for convo_id, convo_data in merged_df.groupby('conversation_id'):
# Calculate total personal attacks and average toxicity
total_attacks = convo_data['meta.comment_has_personal_attack'].sum()
avg_toxicity = convo_data['meta.toxicity'].mean()
# Get conversation start time
start_time = convo_data['timestamp'].min()
# Calculate embeddings for the conversation
convo_text = ' '.join(convo_data['text'].tolist())
#embeddings = calculate_embeddings(convo_text)
# Prepare data for Firebase
convo_record = {
'convo_id': convo_id,
'total_personal_attacks': total_attacks,
'average_toxicity': avg_toxicity,
'start_time': start_time,
#'embeddings': embeddings,
# Include additional metadata as needed
}
# Get speakers
speakers = []
speaker_texts = {}
for speaker_id, speaker_data in convo_data.groupby('speaker'):
speaker_text = ' '.join(speaker_data['text'].tolist())
speaker_texts[speaker_id] = speaker_text
a ,speaker_emotion, speaker_emoji = classifier(speaker_text)
speaker_record = {
'speaker_id': speaker_id,
'emotion': speaker_emotion,
'emoji': speaker_emoji,
# Include additional metadata as
}
speakers.append(speaker_record)
#convo_speakers.append(convo_speaker)
convo_speakers_texts[convo_id] = speaker_texts
convo_speakers[convo_id] = speakers
convo_texts.append(convo_text)
convo_records.append(convo_record)
# Save to Firebase
#db.collection('conversations').document(convo_id).set(convo_record)
print("Processed Convo Texts")
return convo_records, convo_texts, convo_speakers, convo_speakers_texts
"""meaningfull labels
similarities for the clusters
emotions model to work on classifying speakers on emotions. then turn the emotions
"""
convo_records, convo_texts, convo_speakers, convo_speakers_text = process_conversations(conversations_df, utterances_df, speakers_df) #.head(1000)
convo_id = convo_records[0]["convo_id"]
merged_df = pd.merge(utterances_df, speakers_df, left_on='speaker', right_index=True)
merged_df = pd.merge(merged_df, conversations_df, left_on='conversation_id', right_index=True)
df = merged_df[merged_df['conversation_id'] == convo_id ].sort_values('timestamp', ascending=True)
df
convo_records[0]["convo_id"]
corpus.get_conversation(convo_records[0]["convo_id"])
corpus.get_conversation("407508250.100949.100949")
convo_speakers['182614870.5054.5054']
convo_speakers_text['204073727.3013.3013']
convo_speakers['211754039.54581.54581']
"""# Do Clustering"""
from sklearn.feature_extraction.text import CountVectorizer
vectorizer_model = CountVectorizer(stop_words="english")
topic_model = BERTopic(embedding_model="all-MiniLM-L6-v2",vectorizer_model=vectorizer_model,nr_topics=100)#calculate_probabilities=True)
topics, probs = topic_model.fit_transform(convo_texts)
new_topics = topic_model.reduce_outliers(convo_texts, topics, strategy="c-tf-idf")
new_topics = topic_model.reduce_outliers(convo_texts, topics, strategy="distributions")
new_topics = topic_model.reduce_outliers(convo_texts, topics, strategy="embeddings")
topic_model.get_topic_info()
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
distance_matrix = cosine_similarity(np.array(topic_model.topic_embeddings_)[:, :])
labels = (topic_model.get_topic_info().sort_values("Topic", ascending=True).Name)[1:]
plt.imshow(distance_matrix)
topic_list = topic_model.topic_labels_
topic_list.keys()
"""# Set Up Data for Firebase"""
label_records = []
for i in topic_list.keys():
label_record = {}
label_record['topic'] = i
topic_words = [token[0] for token in topic_model.get_topic(i)]
topic_name = topic_words[0]
for topic_word in topic_words[1:4]:
topic_name += "_" + topic_word
label_record['topic_name'] = topic_name
sim = distance_matrix[i+1]
for j in range(len(sim)):
label_record[str(j-1)] = float(sim[j])
label_records.append(label_record)
for i in range(len(convo_records)):
convo_records[i]['topic'] = topics[i]
topic_words = [token[0] for token in topic_model.get_topic(topics[i])]
topic_name = topic_words[0]
for topic_word in [token[0] for token in topic_model.get_topic(topics[i])][1:4]:
topic_name += "_" + topic_word
convo_records[i]['topic_name'] = topic_name
convo_records[i]['probs_per_topic'] = probs[i]
label_records[6]
convo_records[0]
"""## Filter out conversations by labels to make it easier to send to firebase"""
sorted_conv = {}
for record in label_records:
for conv in convo_records:
if record['topic'] == conv['topic']:
if record['topic'] not in sorted_conv.keys():
sorted_conv[record['topic']] = [conv]
else:
sorted_conv[record['topic']].append(conv)
sorted_conv[-1][0].keys()
import math
for i, record in enumerate(label_records):
sum = 0
count = 0
for convo_record in sorted_conv[i-1]:
if math.isnan(convo_record['average_toxicity']):
continue
count += 1
sum += convo_record['average_toxicity']
if count == 0:
avg = 0
else:
avg = sum/count
label_records[i]['average_toxicity'] = avg
"""## Save to Firebase"""
for i, label_record in enumerate(label_records):
topic = label_record['topic']
topic_id = str(i)
db.collection('topics1').document(topic_id).set(label_record)
for j, convo_record in enumerate(sorted_conv[topic]):
convo_id = convo_record['convo_id']
db.collection('topics1').document(topic_id).collection('convosations').document(convo_id).set(convo_record)
for speaker_record in convo_speakers[convo_id]:
speaker_id = speaker_record['speaker_id']
db.collection('topics1').document(topic_id).collection('convosations').document(convo_id).collection('speakers').document(speaker_id).set(speaker_record)
print('done')
for i, record in enumerate(convo_records):
convo_id = record['convo_id']
db.collection('convo_topics').document(convo_id).set(record) | [] |
2024-01-10 | meekmachine/ming-mind-langchain-backend | final_awry_topic_modeling.py | # -*- coding: utf-8 -*-
"""Final-Awry-Topic-Modeling.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kQhVwHHvPdpENStQfrzWh3SQlIr4Nl6W
"""
!pip install bertopic
!pip install convokit
!pip install langchain
!pip install transformers
!pip install firebase-admin
!pip install transformers
from bertopic import BERTopic
from convokit import Corpus, download
import pandas as pd
import numpy as np
from langchain.llms import HuggingFaceHub
import os
from transformers import pipeline
import firebase_admin
from firebase_admin import credentials, firestore, initialize_app, storage
from transformers import pipeline
import matplotlib.pyplot as plt
cred = credentials.Certificate("/content/ming-527ed-firebase-adminsdk-z38ui-431c72dad9.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
"""# Loading Corpus"""
corpus = Corpus(filename=download("conversations-gone-awry-corpus"))
conversations_df = corpus.get_conversations_dataframe()
utterances_df = corpus.get_utterances_dataframe()
speakers_df = corpus.get_speakers_dataframe()
utterances_df
def process_conversations(conversations_df, utterances_df, speakers_df):
# Filter and merge dataframes
merged_df = pd.merge(utterances_df, speakers_df, left_on='speaker', right_index=True)
merged_df = pd.merge(merged_df, conversations_df, left_on='conversation_id', right_index=True)
# Process each conversation
convo_records = []
convo_texts = []
for convo_id, convo_data in merged_df.groupby('conversation_id'):
# Calculate total personal attacks and average toxicity
total_attacks = convo_data['meta.comment_has_personal_attack'].sum()
avg_toxicity = convo_data['meta.toxicity'].mean()
# Get conversation start time
start_time = convo_data['timestamp'].min()
# Calculate embeddings for the conversation
convo_text = ' '.join(convo_data['text'].tolist())
#embeddings = calculate_embeddings(convo_text)
# Prepare data for Firebase
convo_record = {
'convo_id': convo_id,
'total_personal_attacks': total_attacks,
'average_toxicity': avg_toxicity,
'start_time': start_time,
#'embeddings': embeddings,
# Include additional metadata as needed
}
#convo_speakers.append(convo_speaker)
convo_texts.append(convo_text)
convo_records.append(convo_record)
# Save to Firebase
#db.collection('conversations').document(convo_id).set(convo_record)
print("Processed Convo Texts")
return convo_records, convo_texts
"""meaningfull labels
similarities for the clusters
emotions model to work on classifying speakers on emotions. then turn the emotions
"""
convo_records, convo_texts = process_conversations(conversations_df, utterances_df, speakers_df) #.head(1000)
"""# Do Clustering"""
from sklearn.feature_extraction.text import CountVectorizer
vectorizer_model = CountVectorizer(stop_words="english")
topic_model = BERTopic(embedding_model="all-MiniLM-L6-v2",vectorizer_model=vectorizer_model)#calculate_probabilities=True)
topics, probs = topic_model.fit_transform(convo_texts)
topic_model.get_topic_info()
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
distance_matrix = cosine_similarity(np.array(topic_model.topic_embeddings_)[:, :])
labels = (topic_model.get_topic_info().sort_values("Topic", ascending=True).Name)[1:]
plt.imshow(distance_matrix)
topic_list = topic_model.topic_labels_
topic_list.keys()
"""# Set Up Data for Firebase"""
label_records = []
for i in topic_list.keys():
label_record = {}
label_record['topic'] = i
topic_words = [token[0] for token in topic_model.get_topic(i)]
topic_name = topic_words[0]
for topic_word in topic_words[1:4]:
topic_name += "_" + topic_word
label_record['topic_name'] = topic_name
sim = distance_matrix[i+1]
for j in range(len(sim)):
label_record[str(j-1)] = float(sim[j])
label_records.append(label_record)
for i in range(len(convo_records)):
convo_records[i]['topic'] = topics[i]
topic_words = [token[0] for token in topic_model.get_topic(topics[i])]
topic_name = topic_words[0]
for topic_word in [token[0] for token in topic_model.get_topic(topics[i])][1:4]:
topic_name += "_" + topic_word
convo_records[i]['topic_name'] = topic_name
convo_records[i]['probs_per_topic'] = probs[i]
label_records[6]
convo_records[0]
"""## Filter out conversations by labels to make it easier to send to firebase"""
sorted_conv = {}
for record in label_records:
for conv in convo_records:
if record['topic'] == conv['topic']:
if record['topic'] not in sorted_conv.keys():
sorted_conv[record['topic']] = [conv]
else:
sorted_conv[record['topic']].append(conv)
sorted_conv[2][1]
"""## Save to Firebase"""
for i, label_record in enumerate(label_records):
topic = label_record['topic']
topic_id = str(i)
db.collection('topics').document(topic_id).set(label_record)
for j, convo_record in enumerate(sorted_conv[topic]):
convo_id = convo_record['convo_id']
db.collection('topics').document(topic_id).collection('convosations').document(convo_id).set(convo_record)
print('done')
for i, record in enumerate(convo_records):
convo_id = record['convo_id']
db.collection('convo_topics').document(convo_id).set(record) | [] |
2024-01-10 | meekmachine/ming-mind-langchain-backend | ming.py | import pandas as pd
from langchain.llms import HuggingFaceHub
from convokit import Corpus, download
from get_convo import get_convo
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
# Load a corpus
corpus = Corpus(filename=download("conversations-gone-awry-corpus"))
# Initialize the LLM
llm = HuggingFaceHub(repo_id="gpt2", task="text-generation")
# Get a random conversation
convo_df = get_convo(min_messages=5, has_personal_attack=True)
# Create a text representation of the conversation
convo_text = '\n'.join([f'{row.speaker}: {row.text}' for _, row in convo_df.sort_values('timestamp').iterrows()])
# Define the prompts and their corresponding examples
prompts = [
"Is the conversation toxic?",
"Is the conversation personal attack?",
"Is the conversation constructive?"
]
examples = [
"The conversation is toxic if it contains hate speech, bullying, or other offensive language. (Toxic: Yes/No)",
"The conversation is personal attack if it contains insults or attacks directed at an individual. (Personal Attack: Yes/No)",
"The conversation is constructive if it is respectful and focused on the topic at hand. (Constructive: Yes/No)"
]
# Define a method to test the LLM with a prompt
def test_toxicity(convo_text):
prompt = prompts[0]
example = examples[0]
text = f"{prompt}\n{convo_text}\n{example}"
result = llm(text)
print(f"Prompt: {prompt}, Result: {result}")
return "Toxic" in result
def test_personal_attack(convo_text):
prompt = prompts[1]
example = examples[1]
text = f"{prompt}\n{convo_text}\n{example}"
result = llm(text)
print(f"Prompt: {prompt}, Result: {result}")
return "Personal Attack" in result
def test_constructive(convo_text):
prompt = prompts[2]
example = examples[2]
text = f"{prompt}\n{convo_text}\n{example}"
result = llm(text)
print(f"Prompt: {prompt}, Result: {result}")
return "Constructive" in result
# Test the LLM on the conversation data with each prompt
is_toxic = test_toxicity(convo_text)
is_personal_attack = test_personal_attack(convo_text)
is_constructive = test_constructive(convo_text)
print(f"Is Toxic: {is_toxic}, Is Personal Attack: {is_personal_attack}, Is Constructive: {is_constructive}") | [
"['Is the conversation toxic?', 'Is the conversation personal attack?', 'Is the conversation constructive?']"
] |
2024-01-10 | willchenko7/coeus | src~condenseQuestion.py | '''
Goal:
convert the question in natural language into a structured format that will be used by the math solver
Input:
question in natural language
model - the model to use to generate the answer. Default is gpt-4-0314
Output:
structured format of the question
total tokens used
Example:
Input: "If a car is traveling at 60 miles per hour, how many miles will it travel in 2 hours?"
Output: "<start>desired_variable=distance_travelled;miles_per_hour=60;time_in_hours=2;distance_travelled=miles_per_hour*time_in_hours<stop>"
'''
import sys
import os
import openai
from config import openai_key
#set openai api key
openai.api_key = openai_key
def condenseQuestion(question,model="gpt-4-0314"):
#define the prompt
pre_prompt = '''
I am about to provide a question in natural language and I want you to isolate the variables and their values, and any relevant formulas.
You response will be the only info given to the next model, so be as explicit and consistent as possible.
If it is not known, just say variable=?? or provide the formula to calculate it.
Include any formulas that are implicitly defined in the question, but only used variables that are already defined.
Once a variable is used, be sure to reference it by the same name throughout the question. For example, do not refer to a variable as "time" and then later as "t".
Only include 1 equal sign per formula.
Include all units, with a space between the value and the unit. Do not include any other spaces in the formula.
Do not include any numbers in the variable names. ie do not say x1 say x_one.
Provide this in plain csv format with a start and stop token.
Explicitly state which variable is being asked to solve for, like this: desired_variable=variable_one. Do not use any numbers in the variable name.
Be as explicit as possible, do not abbreviate any variable name. For example, instead of saying area, say area_of_square.
Do not define anything in the variable name what could be a formula. For ex, instead of saying time_when_x_equals_a=??, say time=?? and then include x=a as another formula.
Only provide the csv string, no need for anything else.
example: "<start>desired_variable=width_of_square;length_of_square=5;base_area_of_pyramid=7;width_of_square=??<stop>".
another example: "<start>desired_variable=total_meters_run_per_week;sprints_per_day=3;days_per_week=3;distance_per_sprint=60;total_meters_run_per_week=distance_per_sprint*days_per_week*sprints_per_day<stop>"
ok, here is the question:
'''
s_prompt = pre_prompt + '"' + question + '"'
def get_response(s_model,message):
'''
send request to openai to generate the answer
'''
response = openai.chat.completions.create(
model = s_model,
temperature = 1,
messages = [
{"role": "user", "content": message}
]
)
return response.choices[0].message.content, response.usage.total_tokens
# generate the answer
try:
answer,total_tokens = get_response(model,s_prompt)
except:
raise Exception(f"Error generating answer. Please confirm that you have access to {model} through your openai api. You can change the model used in src/condense.py.")
#remove \n from answer
answer = answer.replace("\n", "")
return answer, total_tokens
if __name__ == "__main__":
question = "If a car is traveling at 60 miles per hour, how many miles will it travel in 2 hours?"
answer, total_tokens = condenseQuestion(question)
print(answer) | [
"\n I am about to provide a question in natural language and I want you to isolate the variables and their values, and any relevant formulas. \n You response will be the only info given to the next model, so be as explicit and consistent as possible.\n If it is not known, just say variable=?? or provide the formula to calculate it. \n Include any formulas that are implicitly defined in the question, but only used variables that are already defined.\n Once a variable is used, be sure to reference it by the same name throughout the question. For example, do not refer to a variable as \"time\" and then later as \"t\".\n Only include 1 equal sign per formula.\n Include all units, with a space between the value and the unit. Do not include any other spaces in the formula.\n Do not include any numbers in the variable names. ie do not say x1 say x_one.\n Provide this in plain csv format with a start and stop token. \n Explicitly state which variable is being asked to solve for, like this: desired_variable=variable_one. Do not use any numbers in the variable name.\n Be as explicit as possible, do not abbreviate any variable name. For example, instead of saying area, say area_of_square.\n Do not define anything in the variable name what could be a formula. For ex, instead of saying time_when_x_equals_a=??, say time=?? and then include x=a as another formula.\n Only provide the csv string, no need for anything else.\n example: \"<start>desired_variable=width_of_square;length_of_square=5;base_area_of_pyramid=7;width_of_square=??<stop>\". \n another example: \"<start>desired_variable=total_meters_run_per_week;sprints_per_day=3;days_per_week=3;distance_per_sprint=60;total_meters_run_per_week=distance_per_sprint*days_per_week*sprints_per_day<stop>\"\n ok, here is the question: \n ",
"\n I am about to provide a question in natural language and I want you to isolate the variables and their values, and any relevant formulas. \n You response will be the only info given to the next model, so be as explicit and consistent as possible.\n If it is not known, just say variable=?? or provide the formula to calculate it. \n Include any formulas that are implicitly defined in the question, but only used variables that are already defined.\n Once a variable is used, be sure to reference it by the same name throughout the question. For example, do not refer to a variable as \"time\" and then later as \"t\".\n Only include 1 equal sign per formula.\n Include all units, with a space between the value and the unit. Do not include any other spaces in the formula.\n Do not include any numbers in the variable names. ie do not say x1 say x_one.\n Provide this in plain csv format with a start and stop token. \n Explicitly state which variable is being asked to solve for, like this: desired_variable=variable_one. Do not use any numbers in the variable name.\n Be as explicit as possible, do not abbreviate any variable name. For example, instead of saying area, say area_of_square.\n Do not define anything in the variable name what could be a formula. For ex, instead of saying time_when_x_equals_a=??, say time=?? and then include x=a as another formula.\n Only provide the csv string, no need for anything else.\n example: \"<start>desired_variable=width_of_square;length_of_square=5;base_area_of_pyramid=7;width_of_square=??<stop>\". \n another example: \"<start>desired_variable=total_meters_run_per_week;sprints_per_day=3;days_per_week=3;distance_per_sprint=60;total_meters_run_per_week=distance_per_sprint*days_per_week*sprints_per_day<stop>\"\n ok, here is the question: \n \"If a car is traveling at 60 miles per hour, how many miles will it travel in 2 hours?\""
] |
2024-01-10 | zhiao777774/kbqa-llm | src~text_to_kg.py | import os
import openai
from tenacity import retry, stop_after_attempt, wait_random_exponential
from tree_of_thoughts import OpenAILanguageModel, MonteCarloTreeofThoughts
from dotenv import load_dotenv
load_dotenv()
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def chatcompletion_with_backoff(**kwargs) -> openai.ChatCompletion:
return openai.ChatCompletion.create(**kwargs)
def text_to_kg(prompt: str, text: str, model='gpt-3.5-turbo') -> str:
"""Function of Converting text to KG
Args:
prompt (str): instruction of the prompt
text (str): input text
model (str, optional): model name. Defaults to 'gpt-3.5-turbo'.
Returns:
str: knowledge graph
"""
openai.api_key = os.getenv('OPENAI_API_KEY')
messages = [{'role': 'user', 'content': prompt.replace('{prompt}', text)}]
completion = chatcompletion_with_backoff(model=model,
max_tokens=300,
temperature=0,
messages=messages)
res = completion.choices[0].message.content
return res
def text_to_kg_with_tot(prompt: str,
text: str,
api_model='gpt-3.5-turbo') -> str:
openai.api_key = os.getenv('OPENAI_API_KEY')
model = OpenAILanguageModel(api_key=os.getenv('OPENAI_API_KEY'), api_model=api_model)
tree_of_thoughts = MonteCarloTreeofThoughts(model)
num_thoughts = 1
max_steps = 3
max_states = 4
pruning_threshold = 0.5
solution = tree_of_thoughts.solve(
initial_prompt=prompt.replace('{prompt}', text),
num_thoughts=num_thoughts,
max_steps=max_steps,
max_states=max_states,
pruning_threshold=pruning_threshold,
# sleep_time=sleep_time
)
print(f"Solution: {solution}")
return solution
if __name__ == '__main__':
import time
import pandas as pd
from pprint import pprint
with open('src/prompts/text_to_kg.prompt', 'r') as f:
prompt = f.read()
data = pd.read_json('src/data/KQAPro/train.json')
data = data.iloc[:10]
kg_triplets = []
model = 'gpt-4'
for i, item in data.iterrows():
msg = item['question']
print(i, '=' * 30)
pprint(msg)
print('->')
res = text_to_kg_with_tot(prompt, msg, model)
kg_triplets.append(str(res))
pprint(res)
print()
time.sleep(0.2)
data['kg_triplets'] = kg_triplets
data.to_json(f'src/output/KQAPro_train_kg_{model}_tot.json', orient='records') | [] |
2024-01-10 | alphacrash/Chat-AI | chat-address.py | # RUN: python chat.py
import json
import openai
import requests
import prompts
# Values
key_file = "values/key.json"
urls_file = "values/urls.json"
# Key
with open(key_file, "r") as file:
key_data = json.load(file)
with open(urls_file, "r") as file:
urls_data = json.load(file)
openai.api_key = key_data["key"]
# API Endpoints
get_url = urls_data["getAddresses"]
create_url = urls_data["createAddress"]
update_url = urls_data["updateAddress"]
delete_url = urls_data["deleteAddress"]
# Main
class AddressChat:
CONVERSATION_AGREE_PROMPT = """OK"""
CONVERSATION_START_PROMPT = """Great! Start the Conversation."""
CONVERSATION_PROMPT = """You are conversation assistant that manages addresses of consumers. Your task is to follow the conversation flow to assist the consumer.
###
Conversation Flow:
1. Greet the consumer
2. Check if they need any assistance.
3. Answer their requests
4. Greet the consumer and end the conversation by responding '[END_OF_CONVERSATION]'
###
"""
INTENT_DETECTION_SETUP_PROMPT = """Your task is to classify the consumer's intent from the below `Conversation` into following `Intent Categories`. Response should follow the `Output Format`.
Conversation:
{conversation}
Intent Categories:
GREETING: consumer is greeting the chatbot.
GET_ADDRESSES: consumer's request to view his saved addresses.
CREATE_ADDRESS: consumer's request to create a new address.
UPDATE_ADDRESS: consumer's request to update his saved address.
DELETE_ADDRESS: consumer's request to remove/delete his saved address.
OUT_OF_CONTEXT: consumer's query is irrelevant and cannot be classified in the above intents.
Output Format: <PREDICTED_INTENT>
"""
ENQUIRY_DETAILS_PROMPT = """Your task is to extract the following `Entities` from the below `Conversation` between an assistant and a consumer. Response should follow the `Output Format`. If some entities are missing provide NULL in the `Output Format`.
Conversation:
{conversation}
Entities:
CONSUMER_ID: This is the id of the consumer.
STREET: This is the street name of the address.
CITY: This is the city name of the address.
STATE: This is the state name of the address.
ZIP_CODE: This is the zip code of the address.
ADDRESS_TYPE: This is the type of address. It can be either 'Home' or 'Mail'.
Output Format: {{'CONSUMER_ID': <Consumer ID in strings>, 'STREET': <Street name in strings>, 'CITY': <City name in strings>, 'STATE': <State name in strings>, 'ZIP_CODE': <Zip code in strings>, 'ADDRESS_TYPE': <Address type in strings>}}
"""
def intent_detection(self, conversation):
chat_ml = [
{"role": "user", "content": self.INTENT_DETECTION_SETUP_PROMPT.format(conversation=conversation)}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_ml,
temperature=0)
return response['choices'][0]['message']['content'].strip(" \n'")
def enquiry_details(self, conversation):
chat_ml = [
{"role": "user", "content": self.ENQUIRY_DETAILS_PROMPT.format(conversation=conversation)}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_ml,
temperature=0)
return response['choices'][0]['message']['content'].strip(" \n")
def conversation_chat(self):
conversation = ""
end_flag = False
chatml_messages = [
{"role": "user", "content": self.CONVERSATION_PROMPT},
{"role": "assistant", "content": self.CONVERSATION_AGREE_PROMPT},
{"role": "user", "content": self.CONVERSATION_START_PROMPT}
]
while True:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chatml_messages
)
agent_response = response['choices'][0]['message']['content'].strip(" \n")
if "END_OF_CONVERSATION" in agent_response:
print("Assistant: Thank you for connecting with us. Have a nice day!")
break
elif end_flag==True:
print("Assistant: {}".format(agent_response))
print("Assistant: Thank you for connecting with us. Have a nice day!")
break
print("Assistant: {}".format(agent_response))
chatml_messages.append({"role": "assistant", "content": agent_response})
conversation += "Assistant: {}\n".format(agent_response)
consumer_response = input("Consumer: ")
if consumer_response == "/end":
break
chatml_messages.append({"role": "user", "content": consumer_response})
conversation += "Consumer: {}\n".format(consumer_response)
# Classify the intent
intent = self.intent_detection(conversation)
# print("Intent: {}".format(intent))
if 'OUT_OF_CONTEXT' in intent:
chatml_messages.append({"role": "user", "content": "Politely say to consumer to stay on the topic not to diverge."})
elif 'GREETING' in intent:
chatml_messages.append({"role": "user", "content": "Greet the consumer and ask how you can help them."})
elif 'GET_ADDRESSES' in intent:
entities = self.enquiry_details(conversation)
entities = entities.split(",")
consumer_id = entities[0].split(":")[-1].strip(" '}{")
if consumer_id.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for their ID"})
else:
response = requests.get(get_url.replace("consumerId", consumer_id))
resp_json = response.json()
if response.status_code == 200:
chatml_messages.append({"role": "user", "content": "Provide the details in natural language, don't display in json format to the consumer and mention no addresses if not found:\n{}".format(str(resp_json))})
end_flag = True
else:
chatml_messages.append({"role": "user", "content": "Some invalid data is provided. Provide the details to the consumer as depicted in json in natural language, don't display in json format\n{}".format(str(resp_json))})
end_flag = True
elif 'CREATE_ADDRESS' in intent:
entities = self.enquiry_details(conversation)
entities = entities.split(",")
consumer_id = entities[0].split(":")[-1].strip(" '}{")
street = entities[1].split(":")[-1].strip(" '}{")
city = entities[2].split(":")[-1].strip(" '}{")
state = entities[3].split(":")[-1].strip(" '}{")
zip_code = entities[4].split(":")[-1].strip(" '}{")
address_type = entities[5].split(":")[-1].strip(" '}{")
if consumer_id.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for their ID"})
elif street.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for street name"})
elif city.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for city name"})
elif state.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for state name"})
elif zip_code.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for zip code"})
elif address_type.upper() == "NULL" or address_type.upper() not in ["HOME", "MAIL"]:
chatml_messages.append({"role": "user", "content": "Ask the consumer for address type. It can be either 'Home' or 'Mail'"})
else:
data = {
"consumerId": consumer_id,
"street": street,
"city": city,
"state": state,
"zipCode": zip_code,
"addressType": address_type
}
response = requests.post(create_url.replace("consumerId", consumer_id), json=data)
resp_json = response.json()
if response.status_code == 200:
response = requests.get(get_url.replace("consumerId", consumer_id))
resp_json = response.json()
chatml_messages.append({"role": "user", "content": "Inform that address is created and display created details in natural language, not in json:\n{}".format(str(resp_json))})
end_flag = True
else:
chatml_messages.append({"role": "user", "content": "Some invalid data is provided by the consumer. Provide the details to the consumer in natural language, don't display json:\n{}".format(str(resp_json))})
end_flag = True
elif 'UPDATE_ADDRESS' in intent:
entities = self.enquiry_details(conversation)
entities = entities.split(",")
consumer_id = entities[0].split(":")[-1].strip(" '}{")
street = entities[1].split(":")[-1].strip(" '}{")
city = entities[2].split(":")[-1].strip(" '}{")
state = entities[3].split(":")[-1].strip(" '}{")
zip_code = entities[4].split(":")[-1].strip(" '}{")
address_type = entities[5].split(":")[-1].strip(" '}{")
if consumer_id.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for their ID"})
elif street.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for street name"})
elif city.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for city name"})
elif state.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for state name"})
elif zip_code.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for zip code"})
elif address_type.upper() == "NULL" or address_type.upper() not in ["HOME", "MAIL"]:
chatml_messages.append({"role": "user", "content": "Ask the consumer for address type. It can be either 'Home' or 'Mail'"})
else:
data = {
"consumerId": consumer_id,
"street": street,
"city": city,
"state": state,
"zipCode": zip_code,
"addressType": address_type
}
response = requests.put(update_url.replace("consumerId", consumer_id), json=data)
if response.status_code == 200:
response = requests.get(get_url.replace("consumerId", consumer_id))
resp_json = response.json()
chatml_messages.append({"role": "user", "content": "Inform that address is updated and display updated details in natural language, not in json:\n{}".format(str(resp_json))})
end_flag = True
else:
chatml_messages.append({"role": "user", "content": "Some invalid data is provided by the consumer. Provide the details to the consumer in natural language, don't display json:\n{}".format(str(resp_json))})
end_flag = True
elif 'DELETE_ADDRESS' in intent:
entities = self.enquiry_details(conversation)
entities = entities.split(",")
consumer_id = entities[0].split(":")[-1].strip(" '}{")
address_type = entities[5].split(":")[-1].strip(" '}{")
if consumer_id.upper() == "NULL":
chatml_messages.append({"role": "user", "content": "Ask the consumer for their ID"})
elif address_type.upper() == "NULL" or address_type.upper() not in ["HOME", "MAIL"]:
chatml_messages.append({"role": "user", "content": "Ask the consumer for address type. It can be either 'Home' or 'Mail'"})
else:
response = requests.delete(delete_url.replace("consumerId", consumer_id).replace("addressType", address_type))
if response.status_code == 200:
chatml_messages.append({"role": "user", "content": "Inform that book is deleted"})
end_flag = True
else:
chatml_messages.append({"role": "user", "content": "Some invalid data is provided by the consumer."})
end_flag = True
if __name__ == "__main__":
AC = AddressChat()
AC.conversation_chat() | [
"Ask the consumer for city name",
"Your task is to classify the consumer's intent from the below `Conversation` into following `Intent Categories`. Response should follow the `Output Format`.\n\n Conversation:\n {conversation}\n\n Intent Categories:\n GREETING: consumer is greeting the chatbot.\n GET_ADDRESSES: consumer's request to view his saved addresses.\n CREATE_ADDRESS: consumer's request to create a new address.\n UPDATE_ADDRESS: consumer's request to update his saved address.\n DELETE_ADDRESS: consumer's request to remove/delete his saved address. \n OUT_OF_CONTEXT: consumer's query is irrelevant and cannot be classified in the above intents.\n\n Output Format: <PREDICTED_INTENT>\n ",
"Your task is to extract the following `Entities` from the below `Conversation` between an assistant and a consumer. Response should follow the `Output Format`. If some entities are missing provide NULL in the `Output Format`.\n\n Conversation:\n {conversation}\n\n Entities:\n CONSUMER_ID: This is the id of the consumer.\n STREET: This is the street name of the address.\n CITY: This is the city name of the address.\n STATE: This is the state name of the address.\n ZIP_CODE: This is the zip code of the address.\n ADDRESS_TYPE: This is the type of address. It can be either 'Home' or 'Mail'.\n\n Output Format: {{'CONSUMER_ID': <Consumer ID in strings>, 'STREET': <Street name in strings>, 'CITY': <City name in strings>, 'STATE': <State name in strings>, 'ZIP_CODE': <Zip code in strings>, 'ADDRESS_TYPE': <Address type in strings>}}\n ",
"Ask the consumer for street name",
"Inform that address is created and display created details in natural language, not in json:\nPLACEHOLDER",
"OK",
"You are conversation assistant that manages addresses of consumers. Your task is to follow the conversation flow to assist the consumer.\n \n ###\n Conversation Flow:\n 1. Greet the consumer\n 2. Check if they need any assistance.\n 3. Answer their requests\n 4. Greet the consumer and end the conversation by responding '[END_OF_CONVERSATION]'\n ###\n\n ",
"Ask the consumer for address type. It can be either 'Home' or 'Mail'",
"Ask the consumer for state name",
"Greet the consumer and ask how you can help them.",
"Ask the consumer for their ID",
"Some invalid data is provided by the consumer.",
"Great! Start the Conversation.",
"Ask the consumer for zip code",
"Politely say to consumer to stay on the topic not to diverge.",
"Inform that address is updated and display updated details in natural language, not in json:\nPLACEHOLDER",
"Inform that book is deleted",
"Some invalid data is provided. Provide the details to the consumer as depicted in json in natural language, don't display in json format\nPLACEHOLDER",
"Provide the details in natural language, don't display in json format to the consumer and mention no addresses if not found:\nPLACEHOLDER",
"[END_OF_CONVERSATION]",
"Some invalid data is provided by the consumer. Provide the details to the consumer in natural language, don't display json:\nPLACEHOLDER"
] |
2024-01-10 | Ryguy-1/pixiol-generator | src~llm_generator~in_out.py | from abc import ABC, abstractmethod
from langchain_core.messages import HumanMessage, SystemMessage
from langchain.llms.base import BaseLLM
from langchain.chat_models.base import BaseChatModel
from langchain.llms.ollama import Ollama
from langchain.chat_models.openai import ChatOpenAI
from typing import Dict, List, Union, Any
import subprocess
import json
import re
import os
class InOut(ABC):
"""InOut class for interacting with LLMs."""
PROMPTS_DIR = os.path.join(os.path.dirname(__file__), "prompts")
def __init__(self, llm: Union[BaseLLM, BaseChatModel]) -> None:
"""Initializes InOut class."""
self._llm = llm
def generate_random_article_idea(self, category_injection: str) -> str:
"""
Generates a random article idea.
Args:
phrase_init (str): Word to initialize the article idea with.
category_injection (str): Category to inject into prompt for latent space activation.
Returns:
str: Random article idea.
"""
messages = [
SystemMessage(content=self._load_prompt("generate_random_article_idea")),
HumanMessage(
content=f"Request: 'Please give me one idea exactly', Broad Category Idea: '{category_injection}'"
),
]
while True:
try:
generated_text = self._parse_single_line_output(
self._invoke_model(input=messages)
)
assert (
len(generated_text) > 0
), "Output is empty. Please try again with a different category."
return generated_text
except Exception as e:
print(str(e))
def write_news_article(
self, article_idea: str, category_constraint: List[str]
) -> Dict:
"""
Writes a news article.
Args:
article_idea (str): Idea for article.
category_constraint (List[str]): List of categories to constrain output selection to.
Returns:
Dict: News article.
Return Format:
{
"title": "<Long, Specific, and SEO Optimized Title>",
"category_list": <["category_1", "category_2"]>,
"header_img_description": "<hyper-detailed description of image to use for header image>",
"body": "<## Body in Markdown\\n\\nShould use expressive markdown syntax with proper \\"escape\\" characters>"
}
"""
messages = [
SystemMessage(content=self._load_prompt("write_news_article")),
HumanMessage(
content=f"Article Idea: '{article_idea}', Categories to Choose From: {category_constraint}"
),
]
while True:
try:
generated_text = self._invoke_model(input=messages)
loaded_json = self._parse_json_output(generated_text)
self._raise_for_bad_json_output(
input_json=loaded_json,
expected_keys_to_type={
"title": str,
"category_list": list,
"header_img_description": str,
"body": str,
},
)
assert len(loaded_json["title"]) > 0, "Error: Title is empty."
assert len(loaded_json["title"]) < 150, "Error: Title is too long."
assert len(loaded_json["category_list"]) in list(
range(1, 4)
), "Error: Wrong number of categories."
assert all(
category in category_constraint
for category in loaded_json["category_list"]
), "Error: Category list must be a subset of the category constraint."
assert (
len(loaded_json["header_img_description"]) > 0
), "Error: Header image description is empty."
assert len(loaded_json["body"]) > 0, "Error: Body is empty."
return loaded_json
except Exception as e:
print(str(e))
def _invoke_model(self, input: Any) -> str:
"""Abstracts LLM / ChatModel Call"""
if isinstance(self._llm, BaseLLM):
return self._llm.invoke(input=input)
if isinstance(self._llm, BaseChatModel):
return self._llm.invoke(input=input).content
def _load_prompt(self, prompt_name: str) -> str:
"""Loads prompt from file."""
with open(f"{self.PROMPTS_DIR}/{prompt_name}.txt", "r") as f:
return f.read()
@staticmethod
def _parse_single_line_output(input: str) -> str:
"""
Parses single line output from LLM.
Args:
input (str): Input string to clean.
Returns:
str: Cleaned string.
"""
input = input.replace("<|im_end|>", "")
input = input.replace("\n", " ")
input = input.replace('"', "")
input = input.replace(".", "")
input = input.strip()
return input
@staticmethod
def _parse_json_output(json_string: str) -> Dict:
"""
Parses JSON output from LLM.
Args:
json_string (str): JSON string to clean.
Returns:
Dict: Cleaned JSON.
Raises:
JSONDecodeError: If JSON string is not valid.
"""
json_string = json_string.replace("<|im_end|>", "")
removed_internal_newlines = re.sub(
r'\"([^"]*)\"', # match anything in quotes including quotes
lambda match: match.group(0).replace("\n", "\\n"),
json_string,
)
return json.loads(removed_internal_newlines)
@staticmethod
def _raise_for_bad_json_output(
input_json: Dict, expected_keys_to_type: Dict
) -> None:
"""
Raises for bad JSON format.
Args:
input_json (Dict): Loaded JSON.
expected_keys_to_type (Dict): Expected keys to type mapping.
Raises:
AssertionError: If JSON is not valid.
"""
assert isinstance(input_json, dict), "Output is not valid JSON."
assert set(input_json.keys()) == set(
expected_keys_to_type.keys()
), "Output keys do not match expected keys."
for input_key, input_value in input_json.items():
assert (
type(input_value) == expected_keys_to_type[input_key]
), f"Value for key '{input_key}' is not of expected type '{expected_keys_to_type[input_key]}'."
@staticmethod
@abstractmethod
def kill() -> None:
"""Kills LLM runner process."""
pass
class OllamaInOut(InOut):
def __init__(self, model_name: str, temperature: int):
print(f"Pulling Ollama model: {model_name}")
subprocess.run(["ollama", "pull", model_name], check=True)
print(f"Running Ollama model: {model_name}")
llm = Ollama(model=model_name, temperature=temperature)
super().__init__(llm)
@staticmethod
def kill():
try:
result = subprocess.run(
["pgrep", "-f", "ollama-runner"], capture_output=True, text=True
)
pid = result.stdout.strip()
if pid:
subprocess.run(["kill", "-9", pid], check=True)
print(f"Ollama runner with PID {pid} has been successfully killed.")
else:
print("Ollama runner process not found.")
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
class OpenAIInOut(InOut):
def __init__(self, model_name: str, temperature: int, api_key: str):
os.environ["OPENAI_API_KEY"] = api_key
llm = ChatOpenAI(model_name=model_name, temperature=temperature)
super().__init__(llm)
@staticmethod
def kill():
pass # not local process
| [
"generate_random_article_idea",
"write_news_article",
"Article Idea: 'PLACEHOLDER', Categories to Choose From: PLACEHOLDER",
"Request: 'Please give me one idea exactly', Broad Category Idea: 'PLACEHOLDER'"
] |
2024-01-10 | francoismaze/topodiff | topodiff~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
Adapted to TopoDiff
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("TOPODIFF_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("topodiff-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("TOPODIFF_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("TOPODIFF_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | shirowanisan/espnet | espnet2~train~preprocessor.py | import json
import logging
import random
import re
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Collection, Dict, Iterable, List, Union
import numpy as np
import scipy.signal
import soundfile
from typeguard import check_argument_types, check_return_type
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.cleaner import TextCleaner
from espnet2.text.token_id_converter import TokenIDConverter
from espnet2.text.whisper_token_id_converter import OpenAIWhisperTokenIDConverter
class AbsPreprocessor(ABC):
def __init__(self, train: bool):
self.train = train
@abstractmethod
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
raise NotImplementedError
def framing(
x,
frame_length: int = 512,
frame_shift: int = 256,
centered: bool = True,
padded: bool = True,
):
if x.size == 0:
raise ValueError("Input array size is zero")
if frame_length < 1:
raise ValueError("frame_length must be a positive integer")
if frame_length > x.shape[-1]:
raise ValueError("frame_length is greater than input length")
if 0 >= frame_shift:
raise ValueError("frame_shift must be greater than 0")
if centered:
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [
(frame_length // 2, frame_length // 2)
]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = frame_length + (nseg-1)*nstep,
# with integer nseg
nadd = (-(x.shape[-1] - frame_length) % frame_shift) % frame_length
pad_shape = [(0, 0) for _ in range(x.ndim - 1)] + [(0, nadd)]
x = np.pad(x, pad_shape, mode="constant", constant_values=0)
# Created strided array of data segments
if frame_length == 1 and frame_length == frame_shift:
result = x[..., None]
else:
shape = x.shape[:-1] + (
(x.shape[-1] - frame_length) // frame_shift + 1,
frame_length,
)
strides = x.strides[:-1] + (frame_shift * x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
return result
def detect_non_silence(
x: np.ndarray,
threshold: float = 0.01,
frame_length: int = 1024,
frame_shift: int = 512,
window: str = "boxcar",
) -> np.ndarray:
"""Power based voice activity detection.
Args:
x: (Channel, Time)
>>> x = np.random.randn(1000)
>>> detect = detect_non_silence(x)
>>> assert x.shape == detect.shape
>>> assert detect.dtype == np.bool
"""
if x.shape[-1] < frame_length:
return np.full(x.shape, fill_value=True, dtype=np.bool)
if x.dtype.kind == "i":
x = x.astype(np.float64)
# framed_w: (C, T, F)
framed_w = framing(
x,
frame_length=frame_length,
frame_shift=frame_shift,
centered=False,
padded=True,
)
framed_w *= scipy.signal.get_window(window, frame_length).astype(framed_w.dtype)
# power: (C, T)
power = (framed_w**2).mean(axis=-1)
# mean_power: (C, 1)
mean_power = np.mean(power, axis=-1, keepdims=True)
if np.all(mean_power == 0):
return np.full(x.shape, fill_value=True, dtype=np.bool)
# detect_frames: (C, T)
detect_frames = power / mean_power > threshold
# detects: (C, T, F)
detects = np.broadcast_to(
detect_frames[..., None], detect_frames.shape + (frame_shift,)
)
# detects: (C, TF)
detects = detects.reshape(*detect_frames.shape[:-1], -1)
# detects: (C, TF)
return np.pad(
detects,
[(0, 0)] * (x.ndim - 1) + [(0, x.shape[-1] - detects.shape[-1])],
mode="edge",
)
class CommonPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
fs: int = 0,
nonsplit_symbol: Iterable[str] = None,
):
super().__init__(train)
self.train = train
self.speech_name = speech_name
self.text_name = text_name
self.speech_volume_normalize = speech_volume_normalize
self.rir_apply_prob = rir_apply_prob
self.noise_apply_prob = noise_apply_prob
self.short_noise_thres = short_noise_thres
self.aux_task_names = aux_task_names
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
nonsplit_symbol=nonsplit_symbol,
)
if bpemodel not in ["whisper_en", "whisper_multilingual"]:
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.token_id_converter = OpenAIWhisperTokenIDConverter(
model_type=bpemodel
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
if train and rir_scp is not None:
self.rirs = []
with open(rir_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.rirs.append(sps[0])
else:
self.rirs.append(sps[1])
else:
self.rirs = None
if train and noise_scp is not None:
self.noises = []
with open(noise_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
if len(sps) == 1:
self.noises.append(sps[0])
else:
self.noises.append(sps[1])
sps = noise_db_range.split("_")
if len(sps) == 1:
self.noise_db_low = self.noise_db_high = float(sps[0])
elif len(sps) == 2:
self.noise_db_low, self.noise_db_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error: '{noise_db_range}' e.g. -3_4 -> [-3db,4db]"
)
else:
self.noises = None
def _convolve_rir(self, speech, power):
rir_path = np.random.choice(self.rirs)
rir = None
if rir_path is not None:
rir, _ = soundfile.read(rir_path, dtype=np.float64, always_2d=True)
# rir: (Nmic, Time)
rir = rir.T
# speech: (Nmic, Time)
# Note that this operation doesn't change the signal length
speech = scipy.signal.convolve(speech, rir, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech[detect_non_silence(speech)] ** 2).mean()
speech = np.sqrt(power / max(power2, 1e-10)) * speech
return speech, rir
def _add_noise(self, speech, power):
nsamples = speech.shape[1]
noise_path = np.random.choice(self.noises)
noise = None
if noise_path is not None:
noise_db = np.random.uniform(self.noise_db_low, self.noise_db_high)
with soundfile.SoundFile(noise_path) as f:
if f.frames == nsamples:
noise = f.read(dtype=np.float64, always_2d=True)
elif f.frames < nsamples:
if f.frames / nsamples < self.short_noise_thres:
logging.warning(
f"Noise ({f.frames}) is much shorter than "
f"speech ({nsamples}) in dynamic mixing"
)
offset = np.random.randint(0, nsamples - f.frames)
# noise: (Time, Nmic)
noise = f.read(dtype=np.float64, always_2d=True)
# Repeat noise
noise = np.pad(
noise,
[(offset, nsamples - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - nsamples)
f.seek(offset)
# noise: (Time, Nmic)
noise = f.read(nsamples, dtype=np.float64, always_2d=True)
if len(noise) != nsamples:
raise RuntimeError(f"Something wrong: {noise_path}")
# noise: (Nmic, Time)
noise = noise.T
noise_power = (noise**2).mean()
scale = (
10 ** (-noise_db / 20)
* np.sqrt(power)
/ np.sqrt(max(noise_power, 1e-10))
)
speech = speech + scale * noise
return speech, noise
def _speech_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name in data:
if self.train and (self.rirs is not None or self.noises is not None):
speech = data[self.speech_name]
# speech: (Nmic, Time)
if speech.ndim == 1:
speech = speech[None, :]
else:
speech = speech.T
# Calc power on non silence region
power = (speech[detect_non_silence(speech)] ** 2).mean()
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
speech, _ = self._convolve_rir(speech, power)
# 2. Add Noise
if (
self.noises is not None
and self.noise_apply_prob >= np.random.random()
):
speech, _ = self._add_noise(speech, power)
speech = speech.T
ma = np.max(np.abs(speech))
if ma > 1.0:
speech /= ma
data[self.speech_name] = speech
if self.speech_volume_normalize is not None:
speech = data[self.speech_name]
ma = np.max(np.abs(speech))
data[self.speech_name] = speech * self.speech_volume_normalize / ma
assert check_return_type(data)
return data
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
if isinstance(text, np.ndarray):
return data
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
if len(text_ints) > 100:
logging.warning(
"The length of the text output exceeds 100, "
"which may cause OOM on the GPU."
"Please ensure that the data processing is correct and verify it."
)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class SLUPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
transcript_token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: str = "text",
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
text_name=text_name,
)
if transcript_token_list is not None:
print("using transcript")
self.transcript_tokenizer = build_tokenizer(
token_type="word",
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.transcript_token_id_converter = TokenIDConverter(
token_list=transcript_token_list,
unk_symbol=unk_symbol,
)
else:
self.transcript_tokenizer = None
self.transcript_token_id_converter = None
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
if self.text_name in data and self.tokenizer is not None:
text = data[self.text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
if "transcript" in data and self.tokenizer is not None:
text = data["transcript"]
text = self.text_cleaner(text)
tokens = self.transcript_tokenizer.text2tokens(text)
text_ints = self.transcript_token_id_converter.tokens2ids(tokens)
data["transcript"] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class CommonPreprocessor_multi(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
aux_task_names: Collection[str] = None,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
fs: int = 0,
speaker_change_symbol: Iterable[str] = None,
):
super().__init__(
train=train,
token_type=token_type,
token_list=token_list,
bpemodel=bpemodel,
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
aux_task_names=aux_task_names,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
fs=fs,
nonsplit_symbol=speaker_change_symbol,
)
if isinstance(text_name, str):
self.text_name = [text_name]
else:
self.text_name = text_name
self.speaker_change_symbol = speaker_change_symbol
if speaker_change_symbol is not None:
assert (
len(self.text_name) == 1
), "SOT model with speaker_change_symbol only support single text input."
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for text_n in self.text_name:
if text_n in data and self.tokenizer is not None:
text = data[text_n]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[text_n] = np.array(text_ints, dtype=np.int64)
if self.aux_task_names is not None and self.tokenizer is not None:
for name in self.aux_task_names:
if name in data:
text = data[name]
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
text_ints = self.token_id_converter.tokens2ids(tokens)
data[name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = self._speech_process(data)
data = self._text_process(data)
return data
class MutliTokenizerCommonPreprocessor(CommonPreprocessor):
def __init__(
self,
train: bool,
token_type: List[str] = [None],
token_list: List[Union[Path, str, Iterable[str]]] = [None],
bpemodel: List[Union[Path, str, Iterable[str]]] = [None],
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech",
text_name: List[str] = ["text"],
):
# TODO(jiatong): sync with Kamo and Jing on interface for preprocessor
super().__init__(
train=train,
token_type=token_type[0],
token_list=token_list[0],
bpemodel=bpemodel[0],
text_cleaner=text_cleaner,
g2p_type=g2p_type,
unk_symbol=unk_symbol,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
delimiter=delimiter,
speech_name=speech_name,
text_name=text_name[0],
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
)
assert (
len(token_type) == len(token_list) == len(bpemodel) == len(text_name)
), "token_type, token_list, bpemodel, or processing text_name mismatched"
self.num_tokenizer = len(token_type)
self.tokenizer = []
self.token_id_converter = []
for i in range(self.num_tokenizer):
if token_type[i] is not None:
if token_list[i] is None:
raise ValueError("token_list is required if token_type is not None")
self.tokenizer.append(
build_tokenizer(
token_type=token_type[i],
bpemodel=bpemodel[i],
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
)
self.token_id_converter.append(
TokenIDConverter(
token_list=token_list[i],
unk_symbol=unk_symbol,
)
)
else:
self.tokenizer.append(None)
self.token_id_converter.append(None)
self.text_cleaner = TextCleaner(text_cleaner)
self.text_name = text_name # override the text_name from CommonPreprocessor
def _text_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
for i in range(self.num_tokenizer):
text_name = self.text_name[i]
if text_name in data and self.tokenizer[i] is not None:
text = data[text_name]
text = self.text_cleaner(text)
tokens = self.tokenizer[i].text2tokens(text)
text_ints = self.token_id_converter[i].tokens2ids(tokens)
data[text_name] = np.array(text_ints, dtype=np.int64)
assert check_return_type(data)
return data
class DynamicMixingPreprocessor(AbsPreprocessor):
def __init__(
self,
train: bool,
source_scp: str = None,
ref_num: int = 2,
dynamic_mixing_gain_db: float = 0.0,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
mixture_source_name: str = None,
utt2spk: str = None,
):
super().__init__(train)
self.source_scp = source_scp
self.ref_num = ref_num
self.dynamic_mixing_gain_db = dynamic_mixing_gain_db
self.speech_name = speech_name
self.speech_ref_name_prefix = speech_ref_name_prefix
# mixture_source_name: the key to select source utterances from dataloader
if mixture_source_name is None:
self.mixture_source_name = f"{speech_ref_name_prefix}1"
else:
self.mixture_source_name = mixture_source_name
self.sources = {}
assert (
source_scp is not None
), f"Please pass `source_scp` to {type(self).__name__}"
with open(source_scp, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.sources[sps[0]] = sps[1]
self.utt2spk = {}
if utt2spk is None:
# if utt2spk is not provided, create a dummy utt2spk with uid.
for key in self.sources.keys():
self.utt2spk[key] = key
else:
with open(utt2spk, "r", encoding="utf-8") as f:
for line in f:
sps = line.strip().split(None, 1)
assert len(sps) == 2
self.utt2spk[sps[0]] = sps[1]
for key in self.sources.keys():
assert key in self.utt2spk
self.source_keys = list(self.sources.keys())
def _pick_source_utterances_(self, uid):
# return (ref_num - 1) uid of reference sources.
source_keys = [uid]
spk_ids = [self.utt2spk[uid]]
retry_cnt = 0
while len(source_keys) < self.ref_num:
picked = random.choice(self.source_keys)
spk_id = self.utt2spk[picked]
# make one utterance or one speaker only appears once in mixing.
if (picked not in source_keys) and (spk_id not in spk_ids):
source_keys.append(picked)
else:
retry_cnt += 1
if retry_cnt > 10:
source_keys.append(picked)
logging.warning(
"Can not find speech source from different speaker "
f"for {retry_cnt} times."
"There may be problems with training data. "
"Please check the utt2spk file."
)
return source_keys[1:]
def _read_source_(self, key, speech_length):
source, _ = soundfile.read(
self.sources[key],
dtype=np.float32,
always_2d=False,
)
if speech_length > source.shape[0]:
pad = speech_length - source.shape[0]
source = np.pad(source, (0, pad), "reflect")
else:
source = source[0:speech_length]
assert speech_length == source.shape[0]
return source
def _mix_speech_(self, uid, data):
# pick sources
source_keys = self._pick_source_utterances_(uid)
# load audios
speech_length = data[self.mixture_source_name].shape[0]
ref_audios = [self._read_source_(key, speech_length) for key in source_keys]
ref_audios = [data[self.mixture_source_name]] + ref_audios
# apply random gain to speech sources
gain_in_db = [
random.uniform(-self.dynamic_mixing_gain_db, self.dynamic_mixing_gain_db)
for i in range(len(ref_audios))
]
gain = [10 ** (g_db / 20.0) for g_db in gain_in_db]
ref_audios = [ref * g for ref, g in zip(ref_audios, gain)]
speech_mix = np.sum(np.array(ref_audios), axis=0)
for i, ref in enumerate(ref_audios):
data[f"{self.speech_ref_name_prefix}{i+1}"] = ref
data[self.speech_name] = speech_mix
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
# TODO(Chenda): need to test for multi-channel data.
assert (
len(data[self.mixture_source_name].shape) == 1
), "Multi-channel input has not been tested"
if self.train:
data = self._mix_speech_(uid, data)
assert check_return_type(data)
return data
class EnhPreprocessor(CommonPreprocessor):
"""Preprocessor for Speech Enhancement (Enh) task."""
def __init__(
self,
train: bool,
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
):
super().__init__(
train=train,
token_type=None,
token_list=None,
bpemodel=None,
text_cleaner=None,
g2p_type=None,
unk_symbol="<unk>",
space_symbol="<space>",
non_linguistic_symbols=None,
delimiter=None,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
)
self.speech_ref_name_prefix = speech_ref_name_prefix
self.noise_ref_name_prefix = noise_ref_name_prefix
self.dereverb_ref_name_prefix = dereverb_ref_name_prefix
self.use_reverberant_ref = use_reverberant_ref
self.num_spk = num_spk
self.num_noise_type = num_noise_type
self.sample_rate = sample_rate
self.force_single_channel = force_single_channel
if self.speech_volume_normalize is not None:
sps = speech_volume_normalize.split("_")
if len(sps) == 1:
self.volume_low, self.volume_high = float(sps[0])
elif len(sps) == 2:
self.volume_low, self.volume_high = float(sps[0]), float(sps[1])
else:
raise ValueError(
"Format error for --speech_volume_normalize: "
f"'{speech_volume_normalize}'"
)
def _ensure_2d(self, signal):
if isinstance(signal, tuple):
return tuple(self._ensure_2d(sig) for sig in signal)
elif isinstance(signal, list):
return [self._ensure_2d(sig) for sig in signal]
else:
# (Nmic, Time)
return signal[None, :] if signal.ndim == 1 else signal.T
def _get_early_signal(self, speech, rir, power):
predelay = 50 # milliseconds
dt = np.argmax(rir, axis=1).min()
et = dt + (predelay * self.sample_rate) // 1000
rir_early = rir[:, :et]
speech2 = scipy.signal.convolve(speech, rir_early, mode="full")[
:, : speech.shape[1]
]
# Reverse mean power to the original power
power2 = (speech2[detect_non_silence(speech2)] ** 2).mean()
speech2 = np.sqrt(power / max(power2, 1e-10)) * speech2
return speech2
def _apply_to_all_signals(self, data_dict, func):
data_dict[self.speech_name] = func(data_dict[self.speech_name])
for n in range(self.num_noise_type):
noise_name = self.noise_ref_name_prefix + str(n + 1)
if noise_name in data_dict:
data_dict[noise_name] = func(data_dict[noise_name])
for spk in range(self.num_spk):
speech_ref_name = self.speech_ref_name_prefix + str(spk + 1)
if self.train or speech_ref_name in data_dict:
data_dict[speech_ref_name] = func(data_dict[speech_ref_name])
dereverb_ref_name = self.dereverb_ref_name_prefix + str(spk + 1)
if dereverb_ref_name in data_dict:
data_dict[dereverb_ref_name] = func(data_dict[dereverb_ref_name])
def _speech_process(
self, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
if self.speech_name not in data:
assert check_return_type(data)
return data
if self.train:
# clean speech signal (Nmic, Time)
speech_ref = [
self._ensure_2d(data[self.speech_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
]
# dereverberated (noisy) signal (Nmic, Time)
if "dereverb_ref1" in data:
dereverb_speech_ref = [
self._ensure_2d(data[self.dereverb_ref_name_prefix + str(i + 1)])
for i in range(self.num_spk)
if self.dereverb_ref_name_prefix + str(i + 1) in data
]
assert len(dereverb_speech_ref) in (1, self.num_spk), len(
dereverb_speech_ref
)
else:
dereverb_speech_ref = None
# Calc power on non silence region
power_ref = [
(sref[detect_non_silence(sref)] ** 2).mean() for sref in speech_ref
]
speech_mix = self._ensure_2d(data[self.speech_name])
# 1. Convolve RIR
if self.rirs is not None and self.rir_apply_prob >= np.random.random():
if self.noise_ref_name_prefix + "1" in data:
noise = data[self.noise_ref_name_prefix + "1"]
np.testing.assert_allclose(
np.squeeze(sum(speech_ref) + noise), np.squeeze(speech_mix)
)
else:
np.testing.assert_allclose(
np.squeeze(sum(speech_ref)), np.squeeze(speech_mix)
)
speech_ref, rir_ref = zip(
*[
self._convolve_rir(sp, power)
for sp, power in zip(speech_ref, power_ref)
]
)
if self.force_single_channel:
speech_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], speech_ref)
)
rir_ref = list(
map(lambda x: x if x.shape[0] == 1 else x[:1], rir_ref)
)
if self.use_reverberant_ref:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# (Time, Nmic)
data[speech_ref_name] = speech_ref[spk].T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
else:
for spk in range(self.num_spk):
suffix = str(spk + 1)
speech_ref_name = self.speech_ref_name_prefix + suffix
# clean speech with early reflections (Time, Nmic)
data[speech_ref_name] = self._get_early_signal(
speech_ref[spk], rir_ref[spk], power_ref[spk]
).T
if dereverb_speech_ref is not None:
if spk == 0 or len(dereverb_speech_ref) > 1:
dereverb_name = self.dereverb_ref_name_prefix + suffix
data[dereverb_name] = data[speech_ref_name]
if self.noise_ref_name_prefix + "1" in data:
speech_mix = sum(speech_ref) + noise
else:
speech_mix = sum(speech_ref)
# 2. Add Noise
if self.noises is not None and self.noise_apply_prob >= np.random.random():
if self.noise_ref_name_prefix + "1" in data:
speech_mix -= data[self.noise_ref_name_prefix + "1"]
power_mix = (speech_mix[detect_non_silence(speech_mix)] ** 2).mean()
speech_mix, noise = self._add_noise(speech_mix, power_mix)
if self.force_single_channel:
if speech_mix.shape[0] > 1:
speech_mix = speech_mix[:1]
if noise.shape[0] > 1:
noise = noise[:1]
for n in range(1, self.num_noise_type):
name = self.noise_ref_name_prefix + str(n + 1)
data.pop(name, None)
data[self.noise_ref_name_prefix + "1"] = noise.T
speech_mix = speech_mix.T
data[self.speech_name] = speech_mix
ma = np.max(np.abs(speech_mix))
if ma > 1.0:
self._apply_to_all_signals(data, lambda x: x / ma)
self._apply_to_all_signals(data, lambda x: x.squeeze())
if self.force_single_channel:
self._apply_to_all_signals(data, lambda x: x if x.ndim == 1 else x[:, 0])
if self.speech_volume_normalize is not None:
if self.train:
volume_scale = np.random.uniform(self.volume_low, self.volume_high)
else:
# use a fixed scale to make it deterministic
volume_scale = self.volume_low
speech_mix = data[self.speech_name]
ma = np.max(np.abs(speech_mix))
self._apply_to_all_signals(data, lambda x: x * volume_scale / ma)
assert check_return_type(data)
return data
class SVSPreprocessor(AbsPreprocessor):
"""Preprocessor for Sing Voice Sythesis (SVS) task."""
def __init__(
self,
train: bool,
token_type: str = None,
token_list: Union[Path, str, Iterable[str]] = None,
bpemodel: Union[Path, str, Iterable[str]] = None,
text_cleaner: Collection[str] = None,
g2p_type: str = None,
unk_symbol: str = "<unk>",
space_symbol: str = "<space>",
non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
delimiter: str = None,
singing_volume_normalize: float = None,
singing_name: str = "singing",
text_name: str = "text",
label_name: str = "label",
midi_name: str = "score",
fs: np.int32 = 0,
hop_length: np.int32 = 256,
phn_seg: dict = {
1: [1],
2: [0.25, 1],
3: [0.1, 0.5, 1],
4: [0.05, 0.1, 0.5, 1],
},
):
super().__init__(train)
self.train = train
self.singing_name = singing_name
self.text_name = text_name
self.label_name = label_name
self.midi_name = midi_name
self.fs = fs
self.hop_length = hop_length
self.singing_volume_normalize = singing_volume_normalize
self.phn_seg = phn_seg
self.time_shift = hop_length / fs
if token_type is not None:
if token_list is None:
raise ValueError("token_list is required if token_type is not None")
self.text_cleaner = TextCleaner(text_cleaner)
self.tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
g2p_type=g2p_type,
)
self.token_id_converter = TokenIDConverter(
token_list=token_list,
unk_symbol=unk_symbol,
)
else:
self.text_cleaner = None
self.tokenizer = None
self.token_id_converter = None
def __call__(
self,
uid: str,
data: Dict[str, Union[str, np.ndarray, tuple]],
) -> Dict[str, np.ndarray]:
assert check_argument_types()
if self.singing_name in data:
if self.singing_volume_normalize is not None:
singing = data[self.singing_name]
ma = np.max(np.abs(singing))
data[self.singing_name] = singing * self.singing_volume_normalize / ma
if self.midi_name in data and self.label_name in data:
# Load label info
lab_timeseq, text = data[self.label_name]
lab_len = len(text)
text = " ".join(text)
text = self.text_cleaner(text)
text = text.split(" ")
text_ints = self.token_id_converter.tokens2ids(text)
data.pop(self.label_name)
label = np.zeros((lab_len))
midi = np.zeros((lab_len))
duration_phn = np.zeros((lab_len))
duration_ruled_phn = np.zeros((lab_len))
duration_syb = np.zeros((lab_len))
slur = np.zeros((lab_len))
# Load score info
tempo, syb_info = data[self.midi_name]
phn_cnt = []
# Calculate features
index_lab = 0
for st, et, syb, note, phns in syb_info:
dur = et - st
_duration_syb = int(dur / self.time_shift + 0.5)
phone = phns.split("_")
phn_num = len(phone)
phn_cnt.append(phn_num)
pre_seg = 0
for k in range(phn_num):
_duration_ruled_phn = int(
(self.phn_seg[phn_num][k] - pre_seg) * dur / self.time_shift
+ 0.5
)
pre_seg = self.phn_seg[phn_num][k]
# timeseq from lab
assert text[index_lab] == phone[k]
_duration_phn = int(
(lab_timeseq[index_lab][1] - lab_timeseq[index_lab][0])
/ self.time_shift
+ 0.5
)
# phone level feature
label[index_lab] = text_ints[index_lab]
midi[index_lab] = note
duration_phn[index_lab] = _duration_phn
duration_ruled_phn[index_lab] = _duration_ruled_phn
duration_syb[index_lab] = _duration_syb
if syb == "—":
slur[index_lab] = 1
else:
slur[index_lab] = 0
index_lab += 1
assert index_lab == lab_len
data.pop(self.midi_name)
phn_cnt = np.array(phn_cnt)
label.astype(np.int64)
midi.astype(np.int64)
duration_phn.astype(np.int64)
duration_syb.astype(np.int64)
duration_ruled_phn.astype(np.int64)
phn_cnt.astype(np.int64)
slur.astype(np.int64)
data["label"] = label
data["midi"] = midi
data["duration_phn"] = duration_phn
data["duration_ruled_phn"] = duration_ruled_phn
data["duration_syb"] = duration_syb
data["phn_cnt"] = phn_cnt
data["slur"] = slur
# TODO(Yuning): Add score from midi
if self.text_name in data and self.tokenizer is not None:
# FIX ME (Yuning): wrong transfer happen in pyopenjtalk
text = data[self.text_name]
if not isinstance(text, np.ndarray):
if not isinstance(text, str):
text = " ".join(text)
text = self.text_cleaner(text)
tokens = self.tokenizer.text2tokens(text)
_text_ints = self.token_id_converter.tokens2ids(tokens)
data[self.text_name] = np.array(_text_ints, dtype=np.int64)
return data
class TSEPreprocessor(EnhPreprocessor):
"""Preprocessor for Target Speaker Extraction."""
def __init__(
self,
train: bool,
train_spk2enroll: str = None,
enroll_segment: int = None,
load_spk_embedding: bool = False,
load_all_speakers: bool = False,
# inherited from EnhPreprocessor
rir_scp: str = None,
rir_apply_prob: float = 1.0,
noise_scp: str = None,
noise_apply_prob: float = 1.0,
noise_db_range: str = "3_10",
short_noise_thres: float = 0.5,
speech_volume_normalize: float = None,
speech_name: str = "speech_mix",
speech_ref_name_prefix: str = "speech_ref",
noise_ref_name_prefix: str = "noise_ref",
dereverb_ref_name_prefix: str = "dereverb_ref",
use_reverberant_ref: bool = False,
num_spk: int = 1,
num_noise_type: int = 1,
sample_rate: int = 8000,
force_single_channel: bool = False,
):
super().__init__(
train,
rir_scp=rir_scp,
rir_apply_prob=rir_apply_prob,
noise_scp=noise_scp,
noise_apply_prob=noise_apply_prob,
noise_db_range=noise_db_range,
short_noise_thres=short_noise_thres,
speech_volume_normalize=speech_volume_normalize,
speech_name=speech_name,
speech_ref_name_prefix=speech_ref_name_prefix,
noise_ref_name_prefix=noise_ref_name_prefix,
dereverb_ref_name_prefix=dereverb_ref_name_prefix,
use_reverberant_ref=use_reverberant_ref,
num_spk=num_spk,
num_noise_type=num_noise_type,
sample_rate=sample_rate,
force_single_channel=force_single_channel,
)
# If specified, the enrollment will be chomped to the specified length
self.enroll_segment = enroll_segment
# If True, the speaker embedding will be loaded instead of enrollment audios
self.load_spk_embedding = load_spk_embedding
# If False, only one of the speakers in each mixture sample will be loaded
self.load_all_speakers = load_all_speakers
if train and rir_scp is not None and rir_apply_prob > 0:
logging.warning(
"Be cautious when applying RIRs on the fly in the TSE task! "
"Please ensure `speech_ref` sums up to `speech_mix` for each sample."
)
if train:
if train_spk2enroll is None:
logging.info("Using fixed enrollment for each sample")
self.train_spk2enroll = None
else:
logging.info("Using dynamically sampled enrollment for each sample")
with open(train_spk2enroll, "r", encoding="utf-8") as f:
# {spkID: [(uid1, path1), (uid2, path2), ...]}
self.train_spk2enroll = json.load(f)
else:
self.train_spk2enroll = None
def _read_audio_segment(self, path, seg_len=None):
with soundfile.SoundFile(path) as f:
if seg_len is None or f.frames == seg_len:
audio = f.read(dtype=np.float32, always_2d=True)
elif f.frames < seg_len:
offset = np.random.randint(0, seg_len - f.frames)
# audio: (Time, Nmic)
audio = f.read(dtype=np.float32, always_2d=True)
# Repeat audio
audio = np.pad(
audio,
[(offset, seg_len - f.frames - offset), (0, 0)],
mode="wrap",
)
else:
offset = np.random.randint(0, f.frames - seg_len)
f.seek(offset)
# audio: (Time, Nmic)
audio = f.read(seg_len, dtype=np.float32, always_2d=True)
if len(audio) != seg_len:
raise RuntimeError(f"Something wrong: {path}")
return audio[:, 0]
def _speech_process(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, Union[str, np.ndarray]]:
assert check_argument_types()
ref_names = [k for k in data.keys() if re.match(r"speech_ref\d+", k)]
num_spk = len(ref_names)
aux_names = [k for k in data.keys() if re.match(r"enroll_ref\d+", k)]
if self.train:
assert len(ref_names) == len(aux_names), (len(ref_names), len(aux_names))
if not self.load_all_speakers:
# only load one target-speaker data
spk = np.random.randint(0, num_spk)
for i, name in enumerate(ref_names):
if i == 0:
data[name] = data[ref_names[spk]]
else:
data.pop(name)
continue
for i, name in enumerate(aux_names):
if not self.load_all_speakers:
if i == 0:
data[name] = data[aux_names[spk]]
else:
data.pop(name)
continue
if self.train_spk2enroll is None:
# normal format in `enroll_spk?.scp`:
# MIXTURE_UID /path/to/enrollment_or_embedding
aux_audio = data[name]
else:
# a special format in `enroll_spk?.scp`:
# MIXTURE_UID *UID SPEAKER_ID
assert data[name].startswith("*"), data[name]
cur_uid, spkid = data[name][1:].strip().split(maxsplit=1)
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
while aux_uid == cur_uid:
aux_uid, aux_audio = random.choice(self.train_spk2enroll[spkid])
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(aux_audio)[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
aux_audio, self.enroll_segment
)
else:
data[name] = soundfile.read(aux_audio)[0]
else:
for name in aux_names:
if data[name].startswith("*"):
# in case of collecting stats for training data
data[name] = np.zeros(1, dtype=data["speech_mix"].dtype)
else:
if getattr(self, "load_spk_embedding", False):
data[name] = np.load(data[name])[None, :] # force 2D
elif self.enroll_segment:
data[name] = self._read_audio_segment(
data[name], self.enroll_segment
)
else:
data[name] = soundfile.read(data[name])[0]
assert check_return_type(data)
return data
def __call__(
self, uid: str, data: Dict[str, Union[str, np.ndarray]]
) -> Dict[str, np.ndarray]:
assert check_argument_types()
data = super()._speech_process(data)
data = self._speech_process(uid, data)
return data
| [] |
2024-01-10 | jacinthes/PubMed-fact-checker | GPTHelper.py | import openai
from time import time
import os
import logging
import streamlit as st
openai.api_key = st.secrets['openai_API_key']
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as file:
return file.read()
def gpt35_rephrase(fact):
# Dynamically generate the prompt to rephrase the fact as a PubMed query using GPT3.5
prompt = open_file('prompts/gpt35_rephrase.txt').replace('<<FACT>>', fact)
try:
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
max_tokens=250,
temperature=0
)
response = response['choices'][0]['text'].strip()
filename = '%s_gpt3.txt' % time()
# Create the logs folder if it does not exist
if not os.path.exists('gpt3_rephrase_logs'):
os.makedirs('gpt3_rephrase_logs')
# Save the whole prompt and the response so that we can inspect it when necessary
with open('gpt3_rephrase_logs/%s' % filename, 'w', encoding="utf-8") as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (rephrase): ', exc_info=e)
def gpt35_check_fact(evidence, fact):
# Dynamically generate the prompt to check the fact against the given PubMed article conclusion/abstract
prompt = open_file('prompts/gpt35_fact_check.txt').replace('<<EVIDENCE>>', evidence).replace('<<HYPOTHESIS>>', fact)
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=3, # Don't need more for Entails/Contradicts/Undetermined
temperature=0
)
response = response['choices'][0]['text'].strip()
response = response.replace('.', '')
filename = '%s_gpt3.txt' % time()
if not os.path.exists('gpt3_factchecking_logs'):
os.makedirs('gpt3_factchecking_logs')
with open('gpt3_factchecking_logs/%s' % filename, 'w', encoding='utf-8') as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (check_fact): ', exc_info=e)
def gpt35_turbo_rephrase(fact):
# Dynamically generate the prompt to rephrase the fact as a PubMed query using GPT3.5 turbo - lower cost than 3.5
prompt = open_file('prompts/gpt35_rephrase.txt').replace('<<FACT>>', fact)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{'role': 'user',
'content': prompt}
]
)
response = response['choices'][0]['message']['content'].strip()
filename = '%s_gpt3.txt' % time()
if not os.path.exists('gpt35_rephrase_logs'):
os.makedirs('gpt35_rephrase_logs')
with open('gpt35_rephrase_logs/%s' % filename, 'w', encoding="utf-8") as outfile:
outfile.write('PROMPT:\n\n' + prompt + '\n\n###############\n\nRESPONSE:\n\n' + response)
return response
except Exception as e:
logging.error('Error communicating with OpenAI (gpt35_rephrase): ', exc_info=e)
| [
"prompts/gpt35_fact_check.txt",
"<<HYPOTHESIS>>",
"<<EVIDENCE>>",
"prompts/gpt35_rephrase.txt"
] |
2024-01-10 | vianai-oss/veryLLM | shared~gpt_3_5_turbo.py | import openai
import tenacity
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
retry_decorator = tenacity.retry(
stop=tenacity.stop_after_attempt(3), # Number of retries before giving up
wait=tenacity.wait_fixed(2), # Time delay (in seconds) between retries
)
@retry_decorator
def get_gpt_3_5_turbo_response(**kwargs):
response = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k", **kwargs)
return response.choices[0].message.content
| [] |
2024-01-10 | Himalaypatel75/LangChain-DocumentAssistant-Boat | ingestion.py | import os
from langchain.document_loaders import (
ReadTheDocsLoader,
) # this help to create github repo.
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Pinecone, FAISS
import pinecone
from dotenv import load_dotenv
def ingest_docs():
loader = ReadTheDocsLoader("langchain-docs/api.python.langchain.com/en/latest")
raw_documents = loader.load()
print(f"loaded {len(raw_documents)} documents")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=100, separators=["\n\n", "\n", " ", ""]
)
documents = text_splitter.split_documents(raw_documents)
print(f"splitted into {len(documents)} chucks")
for doc in documents:
new_url = doc.metadata["source"]
new_url = new_url.replace("langchain-docs", "https:/")
doc.metadata.update({"source": new_url})
print(f"going to insert {len(documents)} to pinecone")
embeddings = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"))
print(f"Going to add {len(documents)} to Pinecone")
pinecone.init(
api_key=os.getenv("PINECONE_API_KEY"),
environment=os.getenv("PINECONE_ENVIRONMENT"),
)
Pinecone.from_documents(
documents, embedding=embeddings, index_name=os.getenv("PINECONE_INDEX_NAME")
)
# vectorstore = FAISS.from_documents(documents, embeddings)
# vectorstore.save_local("faiss_index_book")
# new_vectorstore = FAISS.load_local("faiss_index_book", embeddings)
# print(new_vectorstore)
print("****Loading to vectorestore done ***")
if __name__ == "__main__":
load_dotenv()
ingest_docs()
| [] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~chataggregate.py | import openai
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
import redis
from approaches.approach import Approach
from text import nonewlines
import requests
import os
import json
from azure.storage.blob import BlobServiceClient
from redis.commands.search.query import Query
import numpy
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
# Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
# top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
# (answer) with that prompt.
class ChatAggregateApproach(Approach):
prompt_prefix = """<|im_start|>system
Assistant helps answer questions within text from documents. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
Sources:
{sources}
Count represents the total number of results that were returned for the query.
Count:
{count}
{facet_prompt}
Facets:
{facets}
<|im_end|>
{chat_history}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next .
Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base.
Based on the Chat History and the Question, generate a keyword search query that will return the most relevant results.
Generate search terms that are taken directly from the question or are synonyms of the terms.
{facet_prompt}
Do not assume to know acronyms.
Chat History: {chat_history}
Question: {question}
Search query:
"""
def __init__(self, blob_client: BlobServiceClient, search_client: SearchClient, chatgpt_deployment: str, gpt_deployment: str, sourcepage_field: str, content_field: str, index: any, redis_url: str, redis_pw : str):
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.gpt_deployment = gpt_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.index = index
self.redis_url = redis_url
self.redis_pw = redis_pw
self.blob_client = blob_client
def getText(self, searchables, doc):
if searchables == None:
return ""
if len(searchables) == 0:
return ""
out = ""
for s in searchables:
currentData = doc
for i in s.split('/'):
if isinstance(currentData.get(i), list):
currentData = currentData.get(i)[0]
else:
currentData = currentData[i]
if isinstance(currentData, str):
out = out + currentData
return out
def sourceFile(self, doc):
if self.sourcepage_field in doc:
return doc[self.sourcepage_field]
elif "content" in doc and self.sourcepage_field in doc["content"]:
return doc["content"][self.sourcepage_field]
else:
return "No Filename Found: "
def run(self, history: list[dict], overrides: dict) -> any:
q = history[-1]["user"]
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
facets = ""
count = ""
if len(overrides.get("vector_search_pipeline")) > 2:
headers = {
"api-key" : OPENAI_API_KEY,
"Content-Type" : "application/json"
}
url = "https://"+os.environ.get("AZURE_OPENAI_SERVICE")+".openai.azure.com/"+"openai/deployments/"+"text-search-ada-query-001"+"/embeddings?api-version=2022-12-01"
requestOut = requests.post(url, json = {'input' : history[-1]["user"]}, headers=headers)
output = json.loads(requestOut.text)
embeddings = output["data"][0]["embedding"]
np_vector = numpy.array(embeddings, dtype=numpy.float32)
r = redis.Redis.from_url(url = self.redis_url, password=self.redis_pw)
query = "(@pipeline:"+overrides.get("vector_search_pipeline")+")=>[KNN "+ str(overrides.get("top")) +" @v $BLOB AS dist]"
redisQuery = Query(query).return_field("dist").sort_by("dist").dialect(2)
searchOut = r.ft("bpaindexfilterada").search(redisQuery, query_params={"BLOB": np_vector.tobytes() })
docs = []
for doc in searchOut.docs:
blobOut = self.blob_client.get_blob_client("results", overrides.get("vector_search_pipeline") + "/" + doc.id + ".json")
blobDownload = blobOut.download_blob().content_as_text()
blobDocument = json.loads(blobDownload)
del blobDocument["aggregatedResults"]["openaiEmbeddings"]
docs.append(blobDocument)
results = ""
if len(docs) > 0:
if "text" in docs[0]["aggregatedResults"]:
results = [self.sourceFile(doc) + ": " + nonewlines(doc["aggregatedResults"]["text"]) for doc in docs]
elif "ocrToText" in docs[0]["aggregatedResults"]:
results = [self.sourceFile(doc) + ": " + nonewlines(doc["aggregatedResults"]["ocrToText"]) for doc in docs]
else:
results = [self.sourceFile(doc) + ": " + nonewlines(doc["aggregatedResults"]["sttToText"]) for doc in docs]
content = "\n".join(results)
else:
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
prompt = self.query_prompt_template.format(facet_prompt=overrides.get("facetQueryTermsTemplate") or "",chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"])
completion = openai.Completion.create(
engine=self.gpt_deployment,
prompt=prompt,
temperature=0.0,
max_tokens=32,
n=1)
oaiQuery = completion.choices[0].text
if overrides.get("semantic_ranker"):
r = self.search_client.search(oaiQuery,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
facets=self.index.get("facetableFields"),
semantic_configuration_name="default",
top=top,
include_total_count=True,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(oaiQuery, filter=filter, top=top)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
results = [self.sourceFile(doc) + ": " + nonewlines(self.getText(self.index.get("searchableFields"), doc)) for doc in r]
content = "\n".join(results)
facets = json.dumps(r.get_facets())
count = r.get_count()
follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
prompt = self.prompt_prefix.format(count=count,facet_prompt=overrides.get("facetTemplate") or "", facets=facets,injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
elif prompt_override.startswith(">>>"):
prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
else:
prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
if len(prompt) > 7000:
prompt = prompt[:7000]
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
completion = openai.Completion.create(
engine=self.chatgpt_deployment,
prompt=prompt[:7000],
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stop=["<|im_end|>", "<|im_start|>"])
return {"data_points": facets, "answer": completion.choices[0].text, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str:
history_text = ""
for h in reversed(history if include_last_turn else history[:-1]):
history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text
if len(history_text) > approx_max_tokens*4:
break
return history_text | [
"\n",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. \n Based on the Chat History and the Question, generate a keyword search query that will return the most relevant results. \n Generate search terms that are taken directly from the question or are synonyms of the terms. \n {facet_prompt}\n Do not assume to know acronyms. \n Chat History: {chat_history} \n Question: {question} \n Search query: \n ",
"facetTemplate",
"<|im_start|>system\nAssistant helps answer questions within text from documents. Be brief in your answers.\nAnswer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below.\nEach source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].\n{follow_up_questions_prompt}\n{injected_prompt}\nSources:\n{sources}\n\nCount represents the total number of results that were returned for the query. \nCount:\n{count}\n{facet_prompt}\nFacets:\n{facets}\n<|im_end|>\n{chat_history}\n",
"Generate three very brief follow-up questions that the user would likely ask next . \n Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.\n Try not to repeat questions that have already been asked.\n Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"suggest_followup_questions",
"facetQueryTermsTemplate",
"prompt_template"
] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~retrievers~cogsearchfacetsretriever.py |
from langchain.schema import Document, BaseRetriever
from azure.search.documents import SearchClient
from azure.core.credentials import AzureKeyCredential
from typing import List
import json
import os
class CogSearchFacetsRetriever(BaseRetriever):
def __init__(self, index : str, searchables, top : int ):
self.index = index
self.searchables = searchables
self.top = top
def get_relevant_documents(self, query: str) -> List[Document]:
search_client = SearchClient(
endpoint="https://"+os.environ["AZURE_SEARCH_SERVICE"]+".search.windows.net",
index_name=self.index.get("name"),
credential=AzureKeyCredential(os.environ["AZURE_SEARCH_APIKEY"]))
r = search_client.search(query, top=self.top, facets=self.index.get("facetableFields"), include_total_count=True)
facets = json.dumps(r.get_facets())
count = r.get_count()
docs = [Document(page_content="Total number of results returned: "+str(count)+" \n JSON structure that gives the sentiment data." + facets,metadata=r.get_facets())]
return docs
def nonewlines(self, s: str) -> str:
return s.replace('\n', ' ').replace('\r', ' ')
def getText(self, searchables, doc):
if searchables == None:
return ""
if len(searchables) == 0:
return ""
out = ""
for s in searchables:
currentData = doc
for i in s.split('/'):
if isinstance(currentData.get(i), list):
currentData = currentData.get(i)[0]
else:
currentData = currentData[i]
if isinstance(currentData, str):
out = out + currentData
return out
async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
return self.get_relevant_documents(query) | [] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~retrievers~cogsearchretriever.py |
from langchain.schema import Document, BaseRetriever
from azure.search.documents import SearchClient
from azure.core.credentials import AzureKeyCredential
from typing import List
import json
import os
class CogSearchRetriever(BaseRetriever):
def __init__(self, index : str, searchables, top : int ):
self.index = index
self.searchables = searchables
self.top = top
def get_relevant_documents(self, query: str) -> List[Document]:
search_client = SearchClient(
endpoint="https://"+os.environ["AZURE_SEARCH_SERVICE"]+".search.windows.net",
index_name=self.index.get("name"),
credential=AzureKeyCredential(os.environ["AZURE_SEARCH_APIKEY"]))
r = search_client.search(query, top=self.top)
docs = []
for doc in r:
doc["source"] = doc["filename"]
text = self.nonewlines(self.getText(self.searchables, doc))
docs.append(Document(page_content=text, metadata={"source":doc["filename"]}))
return docs
def nonewlines(self, s: str) -> str:
return s.replace('\n', ' ').replace('\r', ' ')
def getText(self, searchables, doc):
if searchables == None:
return ""
if len(searchables) == 0:
return ""
out = ""
for s in searchables:
currentData = doc
for i in s.split('/'):
if isinstance(currentData.get(i), list):
currentData = currentData.get(i)[0]
else:
currentData = currentData[i]
if isinstance(currentData, str):
out = out + currentData
return out
async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
return self.get_relevant_documents(query) | [] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~lookuptool.py | from os import path
import csv
from langchain.agents import Tool
from typing import Optional
class CsvLookupTool(Tool):
def __init__(self, filename: path, key_field: str, name: str = "lookup", description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question"):
super().__init__(name, self.lookup, description)
self.data = {}
with open(filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row])
def lookup(self, key: str) -> Optional[str]:
return self.data.get(key, "")
| [] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~readdecomposeask.py | import openai
from approaches.approach import Approach
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from langchain.llms.openai import AzureOpenAI
from langchain.prompts import PromptTemplate, BasePromptTemplate
from langchain.callbacks.base import CallbackManager
from langchain.agents import Tool, AgentExecutor
from langchain.agents.react.base import ReActDocstoreAgent
from langchainadapters import HtmlCallbackHandler
from text import nonewlines
from typing import List
class ReadDecomposeAsk(Approach):
def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
def search(self, q: str, overrides: dict) -> str:
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
if overrides.get("semantic_ranker"):
r = self.search_client.search(q,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top = top,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(q, filter=filter, top=top)
if use_semantic_captions:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r]
else:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r]
return "\n".join(self.results)
def lookup(self, q: str) -> str:
r = self.search_client.search(q,
top = 1,
include_total_count=True,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
query_answer="extractive|count-1",
query_caption="extractive|highlight-false")
answers = r.get_answers()
if answers and len(answers) > 0:
return answers[0].text
if r.get_count() > 0:
return "\n".join(d['content'] for d in r)
return None
def run(self, q: str, overrides: dict) -> any:
# Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple
self.results = None
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key)
tools = [
Tool(name="Search", func=lambda q: self.search(q, overrides)),
Tool(name="Lookup", func=self.lookup)
]
# Like results above, not great to keep this as a global, will interfere with interleaving
global prompt
prompt_prefix = overrides.get("prompt_template")
prompt = PromptTemplate.from_examples(
EXAMPLES, SUFFIX, ["input", "agent_scratchpad"], prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX)
agent = ReAct.from_llm_and_tools(llm, tools)
chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager)
result = chain.run(q)
# Fix up references to they look like what the frontend expects ([] instead of ()), need a better citation format since parentheses are so common
result = result.replace("(", "[").replace(")", "]")
return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class ReAct(ReActDocstoreAgent):
@classmethod
def create_prompt(cls, tools: List[Tool]) -> BasePromptTemplate:
return prompt
# Modified version of langchain's ReAct prompt that includes instructions and examples for how to cite information sources
EXAMPLES = [
"""Question: What is the elevation range for the area that the eastern sector of the
Colorado orogeny extends into?
Thought 1: I need to search Colorado orogeny, find the area that the eastern sector
of the Colorado orogeny extends into, then find the elevation range of the
area.
Action 1: Search[Colorado orogeny]
Observation 1: [info1.pdf] The Colorado orogeny was an episode of mountain building (an orogeny) in
Colorado and surrounding areas.
Thought 2: It does not mention the eastern sector. So I need to look up eastern
sector.
Action 2: Lookup[eastern sector]
Observation 2: [info2.txt] (Result 1 / 1) The eastern sector extends into the High Plains and is called
the Central Plains orogeny.
Thought 3: The eastern sector of Colorado orogeny extends into the High Plains. So I
need to search High Plains and find its elevation range.
Action 3: Search[High Plains]
Observation 3: [some_file.pdf] High Plains refers to one of two distinct land regions
Thought 4: I need to instead search High Plains (United States).
Action 4: Search[High Plains (United States)]
Observation 4: [filea.pdf] The High Plains are a subregion of the Great Plains. [another-ref.docx] From east to west, the
High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130
m).
Thought 5: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer
is 1,800 to 7,000 ft.
Action 5: Finish[1,800 to 7,000 ft (filea.pdf) ]""",
"""Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons"
character Milhouse, who Matt Groening named after who?
Thought 1: The question simplifies to "The Simpsons" character Milhouse is named after
who. I only need to search Milhouse and find who it is named after.
Action 1: Search[Milhouse]
Observation 1: [info7.pdf] Milhouse Mussolini Van Houten is a recurring character in the Fox animated
television series The Simpsons voiced by Pamela Hayden and created by Matt
Groening.
Thought 2: The paragraph does not tell who Milhouse is named after, maybe I can look up
"named after".
Action 2: Lookup[named after]
Observation 2: [historyref2.txt] (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose
middle name was Milhous.
Thought 3: Milhouse was named after U.S. president Richard Nixon, so the answer is
Richard Nixon.
Action 3: Finish[Richard Nixon (historyref2.txt) ]""",
"""Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The
Saimaa Gesture?
Thought 1: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which
documentary is about Finnish rock groups.
Action 1: Search[Adam Clayton Powell]
Observation 1: [info9.pdf] Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell
III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office
Building', 'Isabel Washington Powell', 'Adam Powell', vAdam Clayton Powell
(film)', 'Giancarlo Esposito'].
Thought 2: To find the documentary, I can search Adam Clayton Powell (film).
Action 2: Search[Adam Clayton Powell (film)]
Observation 2: [data123.txt] Adam Clayton Powell is a 1989 American documentary film directed by
Richard Kilberg. The film is about the rise and fall of influential
African-American politician Adam Clayton Powell Jr.[3][4] It was later aired
as part of the PBS series The American Experience.
Thought 3: Adam Clayton Powell (film) is a documentary about an African-American
politician, not Finnish rock groups. So the documentary about Finnish rock
groups must instead be The Saimaa Gesture.
Action 3: Finish[The Saimaa Gesture (data123.txt) ]""",
"""Question: What profession does Nicholas Ray and Elia Kazan have in common?
Thought 1: I need to search Nicholas Ray and Elia Kazan, find their professions, then
find the profession they have in common.
Action 1: Search[Nicholas Ray]
Observation 1: [files-987.png] Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 - June 16,
1979) was an American film director, screenwriter, and actor best known for
the 1955 film Rebel Without a Cause.
Thought 2: Professions of Nicholas Ray are director, screenwriter, and actor. I need
to search Elia Kazan next and find his professions.
Action 2: Search[Elia Kazan]
Observation 2: [files-654.txt] Elia Kazan was an American film and theatre director, producer, screenwriter
and actor.
Thought 3: Professions of Elia Kazan are director, producer, screenwriter, and actor.
So profession Nicholas Ray and Elia Kazan have in common is director,
screenwriter, and actor.
Action 3: Finish[director, screenwriter, actor (files-987.png)(files-654.txt) ]""",
"""Question: Which magazine was started first Arthur's Magazine or First for Women?
Thought 1: I need to search Arthur's Magazine and First for Women, and find which was
started first.
Action 1: Search[Arthur's Magazine]
Observation 1: [magazines-1850.pdf] Arthur's Magazine (1844-1846) was an American literary periodical published
in Philadelphia in the 19th century.
Thought 2: Arthur's Magazine was started in 1844. I need to search First for Women
next.
Action 2: Search[First for Women]
Observation 2: [magazines-1900.pdf] First for Women is a woman's magazine published by Bauer Media Group in the
USA.[1] The magazine was started in 1989.
Thought 3: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First
for Women), so Arthur's Magazine was started first.
Action 3: Finish[Arthur's Magazine (magazines-1850.pdf)(magazines-1900.pdf) ]""",
"""Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
Thought 1: I need to search Pavel Urysohn and Leonid Levin, find their types of work,
then find if they are the same.
Action 1: Search[Pavel Urysohn]
Observation 1: [info4444.pdf] Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet
mathematician who is best known for his contributions in dimension theory.
Thought 2: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and
find its type of work.
Action 2: Search[Leonid Levin]
Observation 2: [datapoints_aaa.txt] Leonid Anatolievich Levin is a Soviet-American mathematician and computer
scientist.
Thought 3: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn
and Leonid Levin have the same type of work.
Action 3: Finish[yes (info4444.pdf)(datapoints_aaa.txt) ]""",
]
SUFFIX = """\nQuestion: {input}
{agent_scratchpad}"""
PREFIX = "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " \
"Observations are prefixed by their source name in square brackets, source names MUST be included with the actions in the answers." \
"Only answer the questions using the information from observations, do not speculate."
| [
"\nQuestion: {input}\n{agent_scratchpad}",
"['input', 'agent_scratchpad']",
"\n\n",
"agent_scratchpad",
"input",
"prompt_template"
] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~readretrieveread.py | # import openai
# from approaches.approach import Approach
# from azure.search.documents import SearchClient
# from azure.search.documents.models import QueryType
# from langchain.llms.openai import AzureOpenAI
# from langchain.callbacks.base import CallbackManager
# from langchain.chains import LLMChain
# from langchain.agents import Tool, ZeroShotAgent, AgentExecutor
# from langchain.llms.openai import AzureOpenAI
# from langchainadapters import HtmlCallbackHandler
# from text import nonewlines
# from lookuptool import CsvLookupTool
# # Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information
# # is present then formulate an answer. Each iteration consists of two parts: first use GPT to see if we need more information,
# # second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question.
# # This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain.
# # [1] E. Karpas, et al. arXiv:2205.00445
# class ReadRetrieveReadApproach(Approach):
# template_prefix = \
# "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \
# "Answer the question using only the data provided in the information sources below. " \
# "Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \
# "For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \
# "It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \
# "If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \
# "Never quote tool names as sources." \
# "If you cannot answer using the sources below, say that you don't know. " \
# "\n\nYou can access to the following tools:"
# template_suffix = """
# Begin!
# Question: {input}
# Thought: {agent_scratchpad}"""
# CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
# def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str):
# self.search_client = search_client
# self.openai_deployment = openai_deployment
# self.sourcepage_field = sourcepage_field
# self.content_field = content_field
# def retrieve(self, q: str, overrides: dict) -> any:
# use_semantic_captions = True if overrides.get("semantic_captions") else False
# top = overrides.get("top") or 3
# exclude_category = overrides.get("exclude_category") or None
# filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# if overrides.get("semantic_ranker"):
# r = self.search_client.search(q,
# filter=filter,
# query_type=QueryType.SEMANTIC,
# query_language="en-us",
# query_speller="lexicon",
# semantic_configuration_name="default",
# top = top,
# query_caption="extractive|highlight-false" if use_semantic_captions else None)
# else:
# r = self.search_client.search(q, filter=filter, top=top)
# if use_semantic_captions:
# self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r]
# else:
# self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for doc in r]
# content = "\n".join(self.results)
# return content
# def run(self, q: str, overrides: dict) -> any:
# # Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple
# self.results = None
# # Use to capture thought process during iterations
# cb_handler = HtmlCallbackHandler()
# cb_manager = CallbackManager(handlers=[cb_handler])
# acs_tool = Tool(name = "CognitiveSearch", func = lambda q: self.retrieve(q, overrides), description = self.CognitiveSearchToolDescription)
# employee_tool = EmployeeInfoTool("Employee1")
# tools = [acs_tool, employee_tool]
# prompt = ZeroShotAgent.create_prompt(
# tools=tools,
# prefix=overrides.get("prompt_template_prefix") or self.template_prefix,
# suffix=overrides.get("prompt_template_suffix") or self.template_suffix,
# input_variables = ["input", "agent_scratchpad"])
# llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key)
# chain = LLMChain(llm = llm, prompt = prompt)
# agent_exec = AgentExecutor.from_agent_and_tools(
# agent = ZeroShotAgent(llm_chain = chain, tools = tools),
# tools = tools,
# verbose = True,
# callback_manager = cb_manager)
# result = agent_exec.run(q)
# # Remove references to tool names that might be confused with a citation
# result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "")
# return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
# class EmployeeInfoTool(CsvLookupTool):
# employee_name: str = ""
# def __init__(self, employee_name: str):
# super().__init__(filename = "data/employeeinfo.csv", key_field = "name", name = "Employee", description = "useful for answering questions about the employee, their benefits and other personal information")
# self.func = self.employee_info
# self.employee_name = employee_name
# def employee_info(self, unused: str) -> str:
# return self.lookup(self.employee_name)
| [] |
2024-01-10 | Azure/business-process-automation | src~backend~huggingface~approaches~custom.py | from approaches.approach import Approach
from approaches.retrievers.cogsearchfacetsretriever import CogSearchFacetsRetriever
from approaches.llms.custom import CustomLLM
from approaches.callback import MyCallbackHandler
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
import re
import os
from approaches.retrievers.cogsearchretriever import CogSearchRetriever
from approaches.retrievers.vectorretriever import VectorRetriever
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01"
os.environ["OPENAI_API_BASE"] = "https://"+os.environ["AZURE_OPENAI_SERVICE"]+".openai.azure.com"
class CustomApproach(Approach):
def __init__(self, index: any):
self.index = index
def get_memory(self, history):
memory = ConversationBufferMemory()
for i in history:
if "bot" in i:
memory.chat_memory.add_ai_message(i["bot"])
if "user" in i:
memory.chat_memory.add_ai_message(i["user"])
return memory
def get_history_string(self, history):
output = ""
for i in history:
if "user" in i:
#memory.chat_memory.add_ai_message(i["user"])
output = output + "\nUser: " + i["user"] + "\n"
if "bot" in i:
output = output + "\nBot: " + i["bot"] + "\n"
#memory.chat_memory.add_ai_message(i["bot"])
return output
def get_thought_string(self, intermediate_steps):
thoughts = ""
data_points = []
try:
for i in intermediate_steps:
for j in i:
if isinstance(j, str):
data_points.append("<br> Observation: " +step + "</br>")
thoughts = thoughts + "<br> Observation: " +step + "</br>"
else :
for step in j:
if isinstance(step, str):
data_points.append("<br> " +step + "</br>")
thoughts = thoughts + "<br> " +step + "</br>"
except:
thoughts = ""
return thoughts, data_points
def run(self, history: list[dict], overrides: dict) -> any:
handler = MyCallbackHandler()
emulator = os.environ.get("LLM_EMULATOR") or "false"
if emulator == "true":
llm = CustomLLM(callback=handler,n=10)
else:
llm = OpenAI(temperature=0.0,deployment_id=os.environ.get("AZURE_OPENAI_GPT_DEPLOYMENT"),callbacks=[handler], batch_size=1)
# chain = CustomChain(
# prompt=PromptTemplate.from_template('tell us a joke about {topic}'),
# llm=llm
# )
#myout = chain.run({'topic': 'callbacks'}, callbacks=[StdOutCallbackHandler()])
# tools = [
# Tool(
# name = "Custom Chain",
# func=chain.run,
# description="useful for when you need to answer questions."
# )
# ]
# agent = FakeAgent("foo")
# tool_names = [tool.name for tool in tools]
# agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, allowed_tools=tool_names)
# agentOut = agent_executor.run("How many people live in canada as of 2023?")
# q = history[-1]["user"]
# memory = self.get_memory(history)
# #llm = OpenAI(temperature=0.0,deployment_id=os.environ.get("AZURE_OPENAI_GPT_DEPLOYMENT"), batch_size=3)
# prompt_prefix = """<|im_start|>system
# Assistant helps answer questions within text from documents. Be brief in your answers.
# Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below.
# Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
# Sources:
# <|im_end|>
# """
def lookup(input):
return input
q = history[-1]["user"]
# template="""
# Chat History:
# {history}
# Current Question:
# {q}
# Generate a detailed question that uses the information in the Chat History to remove any ambiguity. The questions should be in the context of the Ferrari 458. Add all additional entities to the search that will find the correct page within the user manual.
# """
# prompt = PromptTemplate(
# input_variables=["history","q"],
# template=template,
# )
# prompt_string = prompt.format(history=self.get_history_string(history), q=q)
# my_new_prompt = llm(prompt_string)
tools = []
# tools.append(Tool(
# name = "Lookup",
# func=lookup,
# description="useful for when you need to lookup terms",
# return_direct=True
# ))
retriever_handler = MyCallbackHandler()
if len(overrides.get("vector_search_pipeline")) > 2:
vector_retriever = VectorRetriever(overrides.get("vector_search_pipeline"), str(overrides.get("top")))
qa = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, chain_type="refine", retriever=vector_retriever)
# tools.append(Tool(
# name = "Search",
# func=qa.run,
# description="useful for when you need to search for information in documents",
# return_direct=True
# ))
#agent = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True ,return_intermediate_steps=True)
# qa_vector = RetrievalQA.from_chain_type(llm=llm, chain_type="refine", retriever=vector_retriever)
# tools.append(Tool(
# name = "Vector Search",
# func=qa_vector.run,
# description="useful for when you need to answer questions."
# ))
else:
retriever = CogSearchRetriever(self.index,self.index.get("searchableFields"), overrides.get("top"), retriever_handler)
qa = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, chain_type="refine", retriever=retriever)
# retriever_facets = CogSearchFacetsRetriever(self.index,self.index.get("searchableFields"), overrides.get("top"))
# qa_facets = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever_facets)
# tools.append(Tool(
# name = "Cognitive Search",
# func=qa.run,
# description="useful for when you need to answer questions",
# return_direct=True
# ))
# tools.append(Tool(
# name = "Cognitive Search Facets",
# func=qa_facets.run,
# description="useful to get sentiment data for the results of a query"
# ))
out = qa({"question" : q + " Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source that includes the full path, e.g. [directory1/directory2/info1.txt]. Don't combine sources, list each source separately, e.g. [directory1/directory2/info1.txt][directory1/directory2/info2.pdf]."})
sources = re.findall(r'\[(.*?)\]',out["answer"])
parsedSources = []
answer = out["answer"]
for s in sources:
split_s = s.split('/')
doc_path = split_s[len(split_s)-1].replace('.txt','')
answer = answer.replace(split_s[len(split_s)-1], split_s[len(split_s)-1].replace('.txt',''))
parsedSources.append(doc_path)
# agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True, max_iterations=3, input_variables=["sources", "chat_history", "input"])
# out = agent({"input" : q})
docs = retriever_handler.get_prompts()
prompts = handler.get_prompts()
thoughts = '.'.join(str(x) for x in prompts)
#thoughts, data_points = self.get_thought_string(out["intermediate_steps"])
return {"data_points": docs, "answer": out["answer"], "thoughts": thoughts} | [] |
2024-01-10 | zenkodr/openai-cookbook | apps~file-q-and-a~nextjs-with-flask-server~server~answer_question.py | from utils import get_embedding
from flask import jsonify
from config import *
from flask import current_app
import openai
from config import *
TOP_K = 10
def get_answer_from_files(question, session_id, pinecone_index):
logging.info(f"Getting answer for question: {question}")
search_query_embedding = get_embedding(question, EMBEDDINGS_MODEL)
try:
query_response = pinecone_index.query(
namespace=session_id,
top_k=TOP_K,
include_values=False,
include_metadata=True,
vector=search_query_embedding,
)
logging.info(
f"[get_answer_from_files] received query response from Pinecone: {query_response}")
files_string = ""
file_text_dict = current_app.config["file_text_dict"]
for i in range(len(query_response.matches)):
result = query_response.matches[i]
file_chunk_id = result.id
score = result.score
filename = result.metadata["filename"]
file_text = file_text_dict.get(file_chunk_id)
file_string = f"###\n\"{filename}\"\n{file_text}\n"
if score < COSINE_SIM_THRESHOLD and i > 0:
logging.info(
f"[get_answer_from_files] score {score} is below threshold {COSINE_SIM_THRESHOLD} and i is {i}, breaking")
break
files_string += file_string
# Note: this is not the proper way to use the ChatGPT conversational format, but it works for now
messages = [
{
"role": "system",
"content": f"Given a question, try to answer it using the content of the file extracts below, and if you cannot answer, or find " \
f"a relevant file, just output \"I couldn't find the answer to that question in your files.\".\n\n" \
f"If the answer is not contained in the files or if there are no file extracts, respond with \"I couldn't find the answer " \
f"to that question in your files.\" If the question is not actually a question, respond with \"That's not a valid question.\"\n\n" \
f"In the cases where you can find the answer, first give the answer. Then explain how you found the answer from the source or sources, " \
f"and use the exact filenames of the source files you mention. Do not make up the names of any other files other than those mentioned "\
f"in the files context. Give the answer in markdown format." \
f"Use the following format:\n\nQuestion: <question>\n\nFiles:\n<###\n\"filename 1\"\nfile text>\n<###\n\"filename 2\"\nfile text>...\n\n"\
f"Answer: <answer or \"I couldn't find the answer to that question in your files\" or \"That's not a valid question.\">\n\n" \
f"Question: {question}\n\n" \
f"Files:\n{files_string}\n" \
f"Answer:"
},
]
response = openai.ChatCompletion.create(
messages=messages,
model=GENERATIVE_MODEL,
max_tokens=1000,
temperature=0,
)
choices = response["choices"] # type: ignore
answer = choices[0].message.content.strip()
logging.info(f"[get_answer_from_files] answer: {answer}")
return jsonify({"answer": answer})
except Exception as e:
logging.info(f"[get_answer_from_files] error: {e}")
return str(e)
| [
"Given a question, try to answer it using the content of the file extracts below, and if you cannot answer, or find a relevant file, just output \"I couldn't find the answer to that question in your files.\".\n\nIf the answer is not contained in the files or if there are no file extracts, respond with \"I couldn't find the answer to that question in your files.\" If the question is not actually a question, respond with \"That's not a valid question.\"\n\nIn the cases where you can find the answer, first give the answer. Then explain how you found the answer from the source or sources, and use the exact filenames of the source files you mention. Do not make up the names of any other files other than those mentioned in the files context. Give the answer in markdown format.Use the following format:\n\nQuestion: <question>\n\nFiles:\n<###\n\"filename 1\"\nfile text>\n<###\n\"filename 2\"\nfile text>...\n\nAnswer: <answer or \"I couldn't find the answer to that question in your files\" or \"That's not a valid question.\">\n\nQuestion: PLACEHOLDER\n\nFiles:\n\nAnswer:"
] |
2024-01-10 | ssandra102/gen_ai | cohere_deploy.py | import cohere
import streamlit as st
co = cohere.Client("COHERE_API_KEY")
def generate_description(proj_idea, creativity):
"""
Generate resume project description given project idea
Arguments:
project(str): the project idea
temperature(str): the Generate model `temperature` value
Returns:
response(str): the resume description
"""
idea_prompt = f"""Generate a resume project description given project idea. Here are a few examples.
--
project: Calculator App
project description: Developed and implemented a feature-rich Calculator app for iOS and Android platforms, showcasing advanced mathematical functionalities and a user-friendly interface.
--
project: Snake Game
project description: Designed, developed, and deployed a dynamic and engaging Snake Game for mobile platforms, showcasing expertise in game mechanics, user experience, and performance optimization.
--
project: Car price prediction
project description: Led the development of a machine learning-based Car Price Prediction system, leveraging predictive modeling techniques to estimate the market value of vehicles.
--
project:{proj_idea}
project description: """
# Call the Cohere Generate endpoint
response = co.generate(
model="command",
prompt=idea_prompt,
max_tokens=50,
temperature=creativity,
k=0,
stop_sequences=["--"],
)
description = response.generations[0].text
print(idea_prompt)
print("description - pre", description)
description = description.replace("\n\n--", "").replace("\n--", "").strip()
print("description - post", description)
print("-------------")
return description
# The front end code starts here
st.title("🚀 Resume Description Generator")
st.write("""
Enter your project idea below, an generate a description that is resume worthy !!
""")
st.markdown("""---""")
form = st.form(key="user_settings")
with form:
# User input - project name
proj_idea = st.text_input("Project", key="proj_idea")
# Create a two-column view
col1, col2 = st.columns(2)
with col1:
# User input - The number of ideas to generate
num_input = st.slider(
"Number of descriptions",
value=3,
key="num_input",
min_value=1,
max_value=10,
help="Choose to generate between 1 to 10 ideas",
)
with col2:
# User input - The 'temperature' value representing the level of creativity
creativity_input = st.slider(
"Creativity",
value=0.5,
key="creativity_input",
min_value=0.1,
max_value=0.9,
help="Lower values generate more “predictable” output, higher values generate more “creative” output",
)
# Submit button to start generating ideas
generate_button = form.form_submit_button("Generate Idea")
if generate_button:
if proj_idea == "":
st.error("Project field cannot be blank")
else:
my_bar = st.progress(0.05)
st.subheader("Project Descriptions:")
for i in range(num_input):
st.markdown("""---""")
idea = generate_description(proj_idea, creativity_input)
# name = generate_name(idea, creativity_input)
# st.markdown("##### " + name)
st.write(idea)
my_bar.progress((i + 1) / num_input)
| [
"Generate a resume project description given project idea. Here are a few examples.\n\n --\n project: Calculator App\n project description: Developed and implemented a feature-rich Calculator app for iOS and Android platforms, showcasing advanced mathematical functionalities and a user-friendly interface.\n\n --\n project: Snake Game\n project description: Designed, developed, and deployed a dynamic and engaging Snake Game for mobile platforms, showcasing expertise in game mechanics, user experience, and performance optimization.\n\n --\n project: Car price prediction\n project description: Led the development of a machine learning-based Car Price Prediction system, leveraging predictive modeling techniques to estimate the market value of vehicles.\n\n --\n project:PLACEHOLDER\n project description: "
] |
2024-01-10 | theevann/meditator | text_generation.py |
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from streamlit import secrets
import streamlit as st
system_prompt = """You are a meditation generator. You generate a meditation based on the user's input.
- The meditation should be about {time} minutes long.
- You can use the tag "emphasis" this way: <emphasis level="strong">This is an important announcement</emphasis> with level being one of: none, reduced, moderate, strong.
- The meditation will be read by a voice assistant, so pause for a few seconds between sentences using <break time="Xs"/>. You can only enter a time in seconds and lower than 5.
- Regularly make longer pauses using the [PAUSE=X] command to let the user relax for X seconds.
For long meditation, use the PAUSE command with to let the user relax for a few minutes, eg [PAUSE=60] for 1 minute.
RESPECT THE TIME: {time} minutes total.
"""
human_prompt = """Generate a meditation using the following prompt:
{user_input}
Make sure to add multiple breaks and pauses to respect the time: {time} minutes total.
Write the meditation in {language}."""
def generate_text_v1(input_text, time, max_tokens, model, language):
llm = ChatOpenAI(temperature=1, openai_api_key=secrets["OPENAI_API_KEY"], model=model, max_tokens=max_tokens)
input_messages = [
SystemMessage(content=system_prompt.format(time=time)),
HumanMessage(content=human_prompt.format(user_input=input_text, language=language, time=time))
]
meditation = ""
with st.status("Generating text...", expanded=True) as status:
placeholder = st.empty()
for response in llm.stream(input_messages):
meditation += response.content
# meditation = meditation.replace(". <",".<").replace(". ",'. <break time=\"1s\" /> ')
placeholder.markdown(meditation + "▌")
placeholder.markdown(meditation)
status.update(label="Text generation complete!", state="complete", expanded=False)
return meditation
def generate_text_v2():
pass | [
"You are a meditation generator. You generate a meditation based on the user's input.\n- The meditation should be about PLACEHOLDER minutes long.\n- You can use the tag \"emphasis\" this way: <emphasis level=\"strong\">This is an important announcement</emphasis> with level being one of: none, reduced, moderate, strong.\n- The meditation will be read by a voice assistant, so pause for a few seconds between sentences using <break time=\"Xs\"/>. You can only enter a time in seconds and lower than 5.\n- Regularly make longer pauses using the [PAUSE=X] command to let the user relax for X seconds.\nFor long meditation, use the PAUSE command with to let the user relax for a few minutes, eg [PAUSE=60] for 1 minute.\nRESPECT THE TIME: PLACEHOLDER minutes total.\n",
"Generate a meditation using the following prompt:\nPLACEHOLDER\n\nMake sure to add multiple breaks and pauses to respect the time: PLACEHOLDER minutes total.\nWrite the meditation in PLACEHOLDER.",
"You are a meditation generator. You generate a meditation based on the user's input.\n- The meditation should be about {time} minutes long.\n- You can use the tag \"emphasis\" this way: <emphasis level=\"strong\">This is an important announcement</emphasis> with level being one of: none, reduced, moderate, strong.\n- The meditation will be read by a voice assistant, so pause for a few seconds between sentences using <break time=\"Xs\"/>. You can only enter a time in seconds and lower than 5.\n- Regularly make longer pauses using the [PAUSE=X] command to let the user relax for X seconds.\nFor long meditation, use the PAUSE command with to let the user relax for a few minutes, eg [PAUSE=60] for 1 minute.\nRESPECT THE TIME: {time} minutes total.\n",
"Generate a meditation using the following prompt:\n{user_input}\n\nMake sure to add multiple breaks and pauses to respect the time: {time} minutes total.\nWrite the meditation in {language}."
] |
2024-01-10 | safevideo/autollm | autollm~utils~document_reading.py | import shutil
from pathlib import Path
from typing import List, Optional, Sequence
from llama_index.readers.file.base import SimpleDirectoryReader
from llama_index.schema import Document
from autollm.utils.env_utils import on_rm_error
from autollm.utils.git_utils import clone_or_pull_repository
from autollm.utils.logging import logger
from autollm.utils.markdown_reader import MarkdownReader
from autollm.utils.pdf_reader import LangchainPDFReader
from autollm.utils.webpage_reader import WebPageReader
from autollm.utils.website_reader import WebSiteReader
def read_files_as_documents(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
show_progress: bool = True,
**kwargs) -> Sequence[Document]:
"""
Process markdown files to extract documents using SimpleDirectoryReader.
Parameters:
input_dir (str): Path to the directory containing the markdown files.
input_files (List): List of file paths.
exclude_hidden (bool): Whether to exclude hidden files.
filename_as_id (bool): Whether to use the filename as the document id.
recursive (bool): Whether to recursively search for files in the input directory.
required_exts (Optional[List[str]]): List of file extensions to be read. Defaults to all supported extensions.
Returns:
documents (Sequence[Document]): A sequence of Document objects.
"""
# Configure file_extractor to use MarkdownReader for md files
file_extractor = {
".md": MarkdownReader(read_as_single_doc=True),
".pdf": LangchainPDFReader(extract_images=False)
}
# Initialize SimpleDirectoryReader
reader = SimpleDirectoryReader(
input_dir=input_dir,
exclude_hidden=exclude_hidden,
file_extractor=file_extractor,
input_files=input_files,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
**kwargs)
logger.info(f"Reading files from {input_dir}..") if input_dir else logger.info(
f"Reading files {input_files}..")
# Read and process the documents
documents = reader.load_data(show_progress=show_progress)
logger.info(f"Found {len(documents)} 'document(s)'.")
return documents
def read_github_repo_as_documents(
git_repo_url: str,
relative_folder_path: Optional[str] = None,
required_exts: Optional[List[str]] = None) -> Sequence[Document]:
"""
A document provider that fetches documents from a specific folder within a GitHub repository.
Parameters:
git_repo_url (str): The URL of the GitHub repository.
relative_folder_path (str, optional): The relative path from the repo root to the folder containing documents.
required_exts (Optional[List[str]]): List of required extensions.
Returns:
Sequence[Document]: A sequence of Document objects.
"""
# Ensure the temp_dir directory exists
temp_dir = Path("autollm/temp/")
temp_dir.mkdir(parents=True, exist_ok=True)
logger.info(f"Cloning github repo {git_repo_url} into temporary directory {temp_dir}..")
try:
# Clone or pull the GitHub repository to get the latest documents
clone_or_pull_repository(git_repo_url, temp_dir)
# Specify the path to the documents
docs_path = temp_dir if relative_folder_path is None else (temp_dir / Path(relative_folder_path))
# Read and process the documents
documents = read_files_as_documents(input_dir=str(docs_path), required_exts=required_exts)
# Logging (assuming logger is configured)
logger.info(f"Operations complete, deleting temporary directory {temp_dir}..")
finally:
# Delete the temporary directory
shutil.rmtree(temp_dir, onerror=on_rm_error)
return documents
def read_website_as_documents(
parent_url: Optional[str] = None,
sitemap_url: Optional[str] = None,
include_filter_str: Optional[str] = None,
exclude_filter_str: Optional[str] = None) -> List[Document]:
"""
Read documents from a website or a sitemap.
Parameters:
parent_url (str, optional): The starting URL from which to scrape documents.
sitemap_url (str, optional): The URL of the sitemap to process.
include_filter_str (str, optional): Filter string to include certain URLs.
exclude_filter_str (str, optional): Filter string to exclude certain URLs.
Returns:
List[Document]: A list of Document objects containing content and metadata.
Raises:
ValueError: If neither parent_url nor sitemap_url is provided, or if both are provided.
"""
if (parent_url is None and sitemap_url is None) or (parent_url is not None and sitemap_url is not None):
raise ValueError("Please provide either parent_url or sitemap_url, not both or none.")
reader = WebSiteReader()
if parent_url:
documents = reader.load_data(
parent_url=parent_url,
include_filter_str=include_filter_str,
exclude_filter_str=exclude_filter_str)
else:
documents = reader.load_data(
sitemap_url=sitemap_url,
include_filter_str=include_filter_str,
exclude_filter_str=exclude_filter_str)
return documents
def read_webpage_as_documents(url: str) -> List[Document]:
"""
Read documents from a single webpage URL using the WebPageReader.
Parameters:
url (str): The URL of the web page to read.
Returns:
List[Document]: A list of Document objects containing content and metadata from the web page.
"""
reader = WebPageReader()
documents = reader.load_data(url)
return documents
| [] |
2024-01-10 | safevideo/autollm | autollm~utils~pdf_reader.py | from typing import List
from llama_index.readers.base import BaseReader
from llama_index.schema import Document
from autollm.utils.logging import logger
class LangchainPDFReader(BaseReader):
"""Custom PDF reader that uses langchain's PDFMinerLoader."""
def __init__(self, extract_images: bool = False) -> None:
"""Initialize the reader."""
self.extract_images = extract_images
def load_data(self, file_path: str, extra_info: dict = None) -> List[Document]:
"""Load data from a PDF file using langchain's PDFMinerLoader."""
from langchain.document_loaders import PDFMinerLoader
# Convert the PosixPath object to a string before passing it to PDFMinerLoader
loader = PDFMinerLoader(str(file_path), extract_images=self.extract_images)
langchain_documents = loader.load() # This returns a list of langchain Document objects
# Convert langchain documents into llama-index documents
documents = []
for langchain_document in langchain_documents:
# Create a llama-index document for each langchain document
doc = Document.from_langchain_format(langchain_document)
# If there's extra info, we can add it to the Document's metadata
if extra_info is not None:
doc.metadata.update(extra_info)
documents.append(doc)
return documents
| [] |
2024-01-10 | wcngai/python | openai_summary2.py | from langchain.document_loaders import UnstructuredFileLoader
from langchain.chains.summarize import load_summarize_chain
loader = UnstructuredFileLoader('EuronetDA.txt')
document = loader.load()
from langchain import OpenAI
llm = OpenAI(openai_api_key = "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")
#model = load_summarize_chain(llm = llm, chain_type = "stuff")
#model.run(document)
from langchain.text_splitter import RecursiveCharacterTextSplitter
char_text_splitter = RecursiveCharacterTextSplitter(chunk_size = 500, chunk_overlap = 0)
docs = char_text_splitter.split_documents(document)
#model = load_summarize_chain(llm = llm, chain_type = "map_reduce")
#model.run(docs)
model = load_summarize_chain(llm = llm, chain_type = "refine")
output = model.run(docs)
print(output) | [] |
2024-01-10 | nguyenletan/PDF_Chat-GUI | ChatPDF.py | import openai
import streamlit as st
from dotenv import load_dotenv
import pickle
from PyPDF2 import PdfReader
from streamlit_extras.add_vertical_space import add_vertical_space
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
import os
# Sidebar contents
with st.sidebar:
st.title('🤗💬 BEE LLM Chat App')
st.markdown('''
## About
This app is an LLM-powered chatbot built using:
- [Streamlit](https://streamlit.io/)
- [LangChain](https://python.langchain.com/)
- [OpenAI](https://platform.openai.com/docs/models) LLM model
''')
add_vertical_space(5)
st.write('Made with ❤️ by BEE Team')
load_dotenv()
def main():
st.header("Chat with PDF 💬")
api_key = st.secrets["openai_api_key"]
# upload a PDF file
pdf = st.file_uploader("Upload your PDF", type='pdf')
# st.write(pdf)
if pdf is not None:
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text=text)
# # embeddings
store_name = pdf.name[:-4]
st.write(f'{store_name}')
# st.write(chunks)
if os.path.exists(f"{store_name}.pkl"):
with open(f"{store_name}.pkl", "rb") as f:
VectorStore = pickle.load(f)
# st.write('Embeddings Loaded from the Disk')s
else:
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
VectorStore = FAISS.from_texts(chunks, embedding=embeddings)
with open(f"{store_name}.pkl", "wb") as f:
pickle.dump(VectorStore, f)
# embeddings = OpenAIEmbeddings()
# VectorStore = FAISS.from_texts(chunks, embedding=embeddings)
# Accept user questions/query
query = st.text_input("Ask questions about your PDF file:")
# st.write(query)
if query:
openai.api_key = api_key
docs = VectorStore.similarity_search(query=query, k=3)
llm = OpenAI(openai_api_key=api_key)
chain = load_qa_chain(llm=llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=query)
print(cb)
st.write(response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | KelvinF97/QChatGPT | pkg~qqbot~message.py | # 普通消息处理模块
import logging
import time
import openai
import pkg.utils.context
import pkg.openai.session
import pkg.plugin.host as plugin_host
import pkg.plugin.models as plugin_models
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
import config
pkg.utils.context.get_qqbot_manager().notify_admin(notify_admin)
if hasattr(config, 'hide_exce_info_to_user') and config.hide_exce_info_to_user:
if hasattr(config, 'alter_tip_message'):
return [config.alter_tip_message] if config.alter_tip_message else []
else:
return ["[bot]出错了,请重试或联系管理员"]
else:
return [set_reply]
def process_normal_message(text_message: str, mgr, config, launcher_type: str,
launcher_id: int, sender_id: int) -> list:
session_name = f"{launcher_type}_{launcher_id}"
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
"..." if len(text_message) > 20 else "")))
session = pkg.openai.session.get_session(session_name)
unexpected_exception_times = 0
max_unexpected_exception_times = 3
reply = []
while True:
if unexpected_exception_times >= max_unexpected_exception_times:
reply = handle_exception(notify_admin=f"{session_name},多次尝试失败。", set_reply=f"[bot]多次尝试失败,请重试或联系管理员")
break
try:
prefix = "[GPT]" if hasattr(config, "show_prefix") and config.show_prefix else ""
text = session.append(text_message)
# 触发插件事件
args = {
"launcher_type": launcher_type,
"launcher_id": launcher_id,
"sender_id": sender_id,
"session": session,
"prefix": prefix,
"response_text": text
}
event = pkg.plugin.host.emit(plugin_models.NormalMessageResponded, **args)
if event.get_return_value("prefix") is not None:
prefix = event.get_return_value("prefix")
if event.get_return_value("reply") is not None:
reply = event.get_return_value("reply")
if not event.is_prevented_default():
reply = [prefix + text]
except openai.error.APIConnectionError as e:
err_msg = str(e)
if err_msg.__contains__('Error communicating with OpenAI'):
reply = handle_exception("{}会话调用API失败:{}\n请尝试关闭网络代理来解决此问题。".format(session_name, e),
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
except openai.error.RateLimitError as e:
logging.debug(type(e))
logging.debug(e.error['message'])
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
# 尝试切换api-key
current_key_name = pkg.utils.context.get_openai_manager().key_mgr.get_key_name(
pkg.utils.context.get_openai_manager().key_mgr.using_key
)
pkg.utils.context.get_openai_manager().key_mgr.set_current_exceeded()
# 触发插件事件
args = {
'key_name': current_key_name,
'usage': pkg.utils.context.get_openai_manager().audit_mgr
.get_usage(pkg.utils.context.get_openai_manager().key_mgr.get_using_key_md5()),
'exceeded_keys': pkg.utils.context.get_openai_manager().key_mgr.exceeded,
}
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
if not event.is_prevented_default():
switched, name = pkg.utils.context.get_openai_manager().key_mgr.auto_switch()
if not switched:
reply = handle_exception(
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key;如果你认为这是误判,请尝试重启程序。".format(
current_key_name), "[bot]err:API调用额度超额,请联系管理员,或等待修复")
else:
openai.api_key = pkg.utils.context.get_openai_manager().key_mgr.get_using_key()
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
reply = ["[bot]err:API调用额度超额,已自动切换,请重新发送消息"]
continue
elif 'message' in e.error and e.error['message'].__contains__('You can retry your request'):
# 重试
unexpected_exception_times += 1
continue
elif 'message' in e.error and e.error['message']\
.__contains__('The server had an error while processing your request'):
# 重试
unexpected_exception_times += 1
continue
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
except openai.error.InvalidRequestError as e:
reply = handle_exception("{}API调用参数错误:{}\n\n这可能是由于config.py中的prompt_submit_length参数或"
"completion_api_params中的max_tokens参数数值过大导致的,请尝试将其降低".format(
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
except openai.error.ServiceUnavailableError as e:
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
except Exception as e:
logging.exception(e)
reply = handle_exception("{}会话处理异常:{}".format(session_name, e), "[bot]err:{}".format(e))
break
return reply
| [] |
2024-01-10 | easonlai/chatbot_with_pdf_streamlit | app_chroma.py | import openai
import streamlit as st
from streamlit_chat import message
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
# Configure the baseline configuration of the OpenAI library for Azure OpenAI Service.
OPENAI_API_KEY = "PLEASE_ENTER_YOUR_OWNED_AOAI_SERVICE_KEY"
OPENAI_API_BASE = "https://PLESAE_ENTER_YOUR_OWNED_AOAI_RESOURCE_NAME.openai.azure.com/"
OPENAI_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_GPT35TURBO_MODEL_NAME"
OPENAI_MODEL_NAME = "gpt-35-turbo"
OPENAI_EMBEDDING_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_EMBEDDING_MODEL_NAME"
OPENAI_EMBEDDING_MODEL_NAME = "text-embedding-ada-002"
OPENAI_API_VERSION = "2023-05-15"
OPENAI_API_TYPE = "azure"
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
openai.api_version = OPENAI_API_VERSION
openai.api_type = OPENAI_API_TYPE
# Set web page title and icon.
st.set_page_config(
page_title="Chatbot with PDF",
page_icon=":robot:"
)
# Set web page title and markdown.
st.title('Chatbot with PDF')
st.markdown(
"""
This is the demonstration of a chatbot with PDF with Azure OpenAI, Chroma, and Streamlit.
I read the book Machine Learning Yearning by Andrew Ng. Please ask me any questions about this book.
"""
)
# Define a function to get user input.
def get_input_text():
input_text = st.text_input("You: ","Hello!", key="input")
return input_text
# Define a function to inquire about the data in Pinecone.
def query(payload, docs, chain):
response = chain.run(input_documents=docs, question=payload)
thisdict = {
"generated_text": response
}
return thisdict
# Initialize session state to store user input.
if 'past' not in st.session_state:
st.session_state['past'] = []
# Initialize session state to store the chatbot-generated output.
if 'generated' not in st.session_state:
st.session_state['generated'] = []
# Initialize Pinecone index and embeddings.
embed = OpenAIEmbeddings(deployment=OPENAI_EMBEDDING_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
model=OPENAI_EMBEDDING_MODEL_NAME,
openai_api_type=OPENAI_API_TYPE,
chunk_size=1)
db = Chroma(persist_directory="./chroma_db/", embedding_function=embed)
user_input = get_input_text()
# Initialize the similarity search.
docs = db.similarity_search(user_input)
# Initialize the Azure OpenAI ChatGPT model.
llm = AzureChatOpenAI(deployment_name=OPENAI_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
openai_api_base=OPENAI_API_BASE,
openai_api_version=OPENAI_API_VERSION,
openai_api_type = OPENAI_API_TYPE,
temperature=0)
# Initialize the question answering chain.
chain = load_qa_chain(llm, chain_type="stuff")
# Generate the chatbot response.
if user_input:
output = query({
"inputs": {
"past_user_inputs": st.session_state.past,
"generated_responses": st.session_state.generated,
"text": user_input,
},"parameters": {"repetition_penalty": 1.33}
},
docs=docs,
chain=chain)
st.session_state.past.append(user_input)
st.session_state.generated.append(output["generated_text"])
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') | [] |
2024-01-10 | easonlai/chatbot_with_pdf_streamlit | app_acs.py | import openai
import streamlit as st
from streamlit_chat import message
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.azuresearch import AzureSearch
from azure.core.credentials import AzureKeyCredential
# Configure the baseline configuration of the OpenAI library for Azure OpenAI Service.
OPENAI_API_KEY = "PLEASE_ENTER_YOUR_OWNED_AOAI_SERVICE_KEY"
OPENAI_API_BASE = "https://PLESAE_ENTER_YOUR_OWNED_AOAI_RESOURCE_NAME.openai.azure.com/"
OPENAI_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_GPT35TURBO_MODEL_NAME"
OPENAI_MODEL_NAME = "gpt-35-turbo"
OPENAI_EMBEDDING_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_EMBEDDING_MODEL_NAME"
OPENAI_EMBEDDING_MODEL_NAME = "text-embedding-ada-002"
OPENAI_API_VERSION = "2023-05-15"
OPENAI_API_TYPE = "azure"
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
openai.api_version = OPENAI_API_VERSION
openai.api_type = OPENAI_API_TYPE
AZURE_COGNITIVE_SEARCH_ENDPOINT_NAME = "https://PLESAE_ENTER_YOUR_OWNED_ACS_RESOURCE_NAME.search.windows.net"
AZURE_COGNITIVE_SEARCH_INDEX_NAME = "PLEASE_ENTER_YOUR_OWNED_ACS_INDEX_NAME"
AZURE_COGNITIVE_SEARCH_KEY = "PLEASE_ENTER_YOUR_OWNED_ACS_SERVICE_KEY"
acs_credential = AzureKeyCredential(AZURE_COGNITIVE_SEARCH_KEY)
# Set web page title and icon.
st.set_page_config(
page_title="Chatbot with PDF",
page_icon=":robot:"
)
# Set web page title and markdown.
st.title('Chatbot with PDF')
st.markdown(
"""
This is the demonstration of a chatbot with PDF with Azure OpenAI, Azure Cognitive Search, and Streamlit.
I read the book Machine Learning Yearning by Andrew Ng. Please ask me any questions about this book.
"""
)
# Define a function to get user input.
def get_input_text():
input_text = st.text_input("You: ","Hello!", key="input")
return input_text
# Define a function to inquire about the data in Pinecone.
def query(payload, docs, chain):
response = chain.run(input_documents=docs, question=payload)
thisdict = {
"generated_text": response
}
return thisdict
# Initialize session state to store user input.
if 'past' not in st.session_state:
st.session_state['past'] = []
# Initialize session state to store the chatbot-generated output.
if 'generated' not in st.session_state:
st.session_state['generated'] = []
# Initialize Azure Cognitive Search index and embeddings.
embed = OpenAIEmbeddings(deployment=OPENAI_EMBEDDING_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
model=OPENAI_EMBEDDING_MODEL_NAME,
openai_api_type=OPENAI_API_TYPE,
chunk_size=1)
vector_store: AzureSearch = AzureSearch(
azure_search_endpoint=AZURE_COGNITIVE_SEARCH_ENDPOINT_NAME,
azure_search_key=AZURE_COGNITIVE_SEARCH_KEY,
index_name=AZURE_COGNITIVE_SEARCH_INDEX_NAME,
embedding_function=embed.embed_query,
)
user_input = get_input_text()
# Initialize the similarity search.
docs = vector_store.similarity_search(user_input)
# Initialize the Azure OpenAI ChatGPT model.
llm = AzureChatOpenAI(deployment_name=OPENAI_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
openai_api_base=OPENAI_API_BASE,
openai_api_version=OPENAI_API_VERSION,
openai_api_type = OPENAI_API_TYPE,
temperature=0)
# Initialize the question answering chain.
chain = load_qa_chain(llm, chain_type="stuff")
# Generate the chatbot response.
if user_input:
output = query({
"inputs": {
"past_user_inputs": st.session_state.past,
"generated_responses": st.session_state.generated,
"text": user_input,
},"parameters": {"repetition_penalty": 1.33} # The repetition penalty is meant to avoid sentences that repeat themselves without anything really interesting.
},
docs=docs,
chain=chain)
st.session_state.past.append(user_input)
st.session_state.generated.append(output["generated_text"])
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') | [] |
2024-01-10 | easonlai/chatbot_with_pdf_streamlit | app_pinecone.py | import openai
import pinecone
import streamlit as st
from streamlit_chat import message
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
# Configure the baseline configuration of the OpenAI library for Azure OpenAI Service.
OPENAI_API_KEY = "PLEASE_ENTER_YOUR_OWNED_AOAI_SERVICE_KEY"
OPENAI_API_BASE = "https://PLESAE_ENTER_YOUR_OWNED_AOAI_RESOURCE_NAME.openai.azure.com/"
OPENAI_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_GPT35TURBO_MODEL_NAME"
OPENAI_MODEL_NAME = "gpt-35-turbo"
OPENAI_EMBEDDING_DEPLOYMENT_NAME = "PLEASE_ENTER_YOUR_OWNED_AOAI_EMBEDDING_MODEL_NAME"
OPENAI_EMBEDDING_MODEL_NAME = "text-embedding-ada-002"
OPENAI_API_VERSION = "2023-05-15"
OPENAI_API_TYPE = "azure"
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
openai.api_version = OPENAI_API_VERSION
openai.api_type = OPENAI_API_TYPE
PINECONE_API_KEY = "PLEASE_ENTER_YOUR_OWNED_PINECONE_API_KEY"
PINECONE_ENV = "PLEASE_ENTER_YOUR_OWNED_PINECONE_ENV_NAME"
PINECONE_INDEX_NAME = "PLEASE_ENTER_YOUR_OWNED_PINECONE_INDEX_NAME"
# Set web page title and icon.
st.set_page_config(
page_title="Chatbot with PDF",
page_icon=":robot:"
)
# Set web page title and markdown.
st.title('Chatbot with PDF')
st.markdown(
"""
This is the demonstration of a chatbot with PDF with Azure OpenAI, Pinecone, and Streamlit.
I read the book Machine Learning Yearning by Andrew Ng. Please ask me any questions about this book.
"""
)
# Define a function to get user input.
def get_input_text():
input_text = st.text_input("You: ","Hello!", key="input")
return input_text
# Define a function to inquire about the data in Pinecone.
def query(payload, docs, chain):
response = chain.run(input_documents=docs, question=payload)
thisdict = {
"generated_text": response
}
return thisdict
# Initialize Pinecone.
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
# Initialize session state to store user input.
if 'past' not in st.session_state:
st.session_state['past'] = []
# Initialize session state to store the chatbot-generated output.
if 'generated' not in st.session_state:
st.session_state['generated'] = []
# Initialize Pinecone index and embeddings.
embed = OpenAIEmbeddings(deployment=OPENAI_EMBEDDING_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
model=OPENAI_EMBEDDING_MODEL_NAME,
openai_api_type=OPENAI_API_TYPE,
chunk_size=1)
docsearch = Pinecone.from_existing_index(PINECONE_INDEX_NAME, embed)
user_input = get_input_text()
# Initialize the similarity search.
docs = docsearch.similarity_search(user_input)
# Initialize the Azure OpenAI ChatGPT model.
llm = AzureChatOpenAI(deployment_name=OPENAI_DEPLOYMENT_NAME,
openai_api_key=OPENAI_API_KEY,
openai_api_base=OPENAI_API_BASE,
openai_api_version=OPENAI_API_VERSION,
openai_api_type = OPENAI_API_TYPE,
temperature=0)
# Initialize the question answering chain.
chain = load_qa_chain(llm, chain_type="stuff")
# Generate the chatbot response.
if user_input:
output = query({
"inputs": {
"past_user_inputs": st.session_state.past,
"generated_responses": st.session_state.generated,
"text": user_input,
},"parameters": {"repetition_penalty": 1.33}
},
docs=docs,
chain=chain)
st.session_state.past.append(user_input)
st.session_state.generated.append(output["generated_text"])
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])-1, -1, -1):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user') | [] |
2024-01-10 | lawofcycles/open-rag | app~calm_api.py | from fastapi import FastAPI, Request
import asyncio
import torch
import time
from transformers import pipeline
from transformers import AutoTokenizer
from langchain.llms import HuggingFacePipeline
from langchain import PromptTemplate
import copy
from langchain.chains.question_answering import load_qa_chain
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
assert transformers.__version__ >= "4.34.1"
import logging
# ロガーの設定
logger = logging.getLogger("uvicorn.error")
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
app = FastAPI(
title="Inference API for ELYZA",
description="A simple API that use elyza/ELYZA-japanese-Llama-2-7b-fast-instruct as a chatbot",
version="1.0",
)
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
MODEL_NAME = "cyberagent/calm2-7b-chat"
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
# Model
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype="auto"
)
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=300,
do_sample=True,
temperature=0.1,
streamer=streamer,
repetition_penalty=10.0,
)
llm = HuggingFacePipeline(pipeline=pipe)
USER = "USER: "
SYS = "ASSISTANT: "
text = "私の質問に答えるための参考情報として、ユーザの質問に関連するcontextを示します。contextだけを元に質問に答えてください。contextを元に回答できない質問には「わかりません」と答えてください \ncontext:{context}\n質問:{question}\n"
template = "{USER}{text}{SYS}".format(
USER=USER,
text=text,
SYS=SYS,
)
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["context", "question"]
)
# チェーンの準備
chain = load_qa_chain(llm, chain_type="stuff", prompt=rag_prompt_custom)
@app.get('/model')
async def model(question : str):
start = time.time()
db = FAISS.load_local("faiss_index/mufgfaq2", embeddings)
docs = db.similarity_search(question, k=2)
elapsed_time = time.time() - start
logger.info(f"検索処理時間[s]: {elapsed_time:.2f}")
for i in range(len(docs)):
logger.info(docs[i])
start = time.time()
# ベクトル検索結果の上位3件と質問内容を入力として、elyzaで文章生成
inputs = {"input_documents": docs, "question": question}
res = chain.run(inputs)
result = copy.deepcopy(res)
elapsed_time = time.time() - start
logger.info(f"テキスト生成処理時間[s]: {elapsed_time:.2f}")
logger.info(f"出力内容:\n{result}")
return result.replace('\n\n', '').replace('\n', '') | [
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"context"
] |
2024-01-10 | lawofcycles/open-rag | app~multilingual-e5-large-api2.py | import time
import torch
from typing import Optional, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline
from langchain.document_loaders import UnstructuredFileLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import CTranslate2
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema import Generation, LLMResult
# data source
PERSIST_DIR = "./resource/211122_amlcft_guidelines.pdf"
loader = UnstructuredFileLoader(PERSIST_DIR)
documents = loader.load()
print(f"number of docs: {len(documents)}")
print("--------------------------------------------------")
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=600,
chunk_overlap=20,
)
# text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
# tokenizer,
# chunk_size=300,
# chunk_overlap=20,
# # separators=["\n= ", "\n== ", "\n=== ", "\n\n",
# # "\n", "。", "「", "」", "!",
# # "?", "、", "『", "』", "(", ")"," ", ""],
# )
splitted_texts = text_splitter.split_documents(documents)
print(f"チャンクの総数:{len(splitted_texts)}")
print(f"チャンクされた文章の確認(20番目にチャンクされたデータ):\n{splitted_texts[20]}")
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
db = FAISS.from_documents(splitted_texts, embeddings)
question = "リスクベースのアプローチとはなんですか。"
start = time.time()
# 質問に対して、データベース中の類似度上位3件を抽出。質問の文章はこの関数でベクトル化され利用される
docs = db.similarity_search(question, k=3)
elapsed_time = time.time() - start
print(f"処理時間[s]: {elapsed_time:.2f}")
for i in range(len(docs)):
print(docs[i])
# setup LLM
MODEL_NAME = "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct"
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
# Model
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype=torch.float16
)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=512,
do_sample=True,
top_k=20,
temperature=0.1,
# device=device,
)
llm = HuggingFacePipeline(pipeline=pipe)
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = "参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。"
text = "{context}\nユーザからの質問は次のとおりです。{question}"
template = "{bos_token}{b_inst} {system}{prompt} {e_inst} ".format(
bos_token=tokenizer.bos_token,
b_inst=B_INST,
system=f"{B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}",
prompt=text,
e_inst=E_INST,
)
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["context", "question"]
)
# チェーンの準備
chain = load_qa_chain(llm, chain_type="stuff", prompt=rag_prompt_custom)
# RAG ありの場合
start = time.time()
# ベクトル検索結果の上位3件と質問内容を入力として、elyzaで文章生成
inputs = {"input_documents": docs, "question": question}
output = chain.run(inputs)
elapsed_time = time.time() - start
print("RAGあり")
print(f"処理時間[s]: {elapsed_time:.2f}")
print(f"出力内容:\n{output}")
print(f"トークン数: {llm.get_num_tokens(output)}")
###################################################
# メモリの解放
del model, tokenizer, pipe, llm, chain
torch.cuda.empty_cache()
| [
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"{bos_token}{b_inst} {system}{prompt} {e_inst} ",
"context",
"参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。"
] |
2024-01-10 | lawofcycles/open-rag | app~elyza_api.py | import time
import copy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from fastapi import FastAPI
from langchain.llms import HuggingFacePipeline
from langchain import PromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
import logging
logger = logging.getLogger("uvicorn.error")
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
app = FastAPI(
title="Inference API for ELYZA",
description="A simple API that with intfloat/multilingual-e5-large and elyza/ELYZA-japanese-Llama-2-7b-fast-instruct",
version="1.0",
)
# embed model
embed_model_id = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=embed_model_id)
# text generation model
model_id = "elyza/ELYZA-japanese-Llama-2-7b-instruct"
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map='auto',
use_cache=True,
)
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=2048,
do_sample=False,
top_k=500,
top_p=0.95,
temperature=1,
repetition_penalty=1.05,
)
llm = HuggingFacePipeline(pipeline=pipe)
# Prompt
# For prompt format, see https://huggingface.co/elyza/ELYZA-japanese-Llama-2-7b-fast
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """
You are an AI assistant, helping bank customer by providing answers and advice. \n
Use only information provided in the following context to answer the question at the end.\n
Explain your answer with reference to context as detail as possible.\n
If you cannot answer a user's question based on context, just say "I don't know".\n
Do not preface your answer with a response.\n"""
QUERY = """質問: {question}\n
context: {context}\n"""
template = f"{tokenizer.bos_token}{B_INST} {B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}{QUERY} {E_INST} "
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["question","context"]
)
chain = load_qa_chain(llm, chain_type="stuff", prompt=rag_prompt_custom)
@app.get('/query')
async def query(question : str):
logger.info("質問:\n%s", question)
# ベクトル検索
start = time.time()
db = FAISS.load_local("faiss_index/mufgfaq3", embeddings)
docs = db.similarity_search(question, k=1)
search_time = time.time() - start
logger.info("テキスト生成処理時間[s]: %.2f", search_time)
logger.info("検索結果:")
for _, doc in enumerate(docs):
logger.info(doc)
# テキスト生成
start = time.time()
inputs = {"input_documents": docs, "question": question}
res = chain.run(inputs)
result = copy.deepcopy(res)
generation_time = time.time() - start
logger.info("テキスト生成処理時間[s]: %.2f", generation_time)
logger.info("テキスト生成結果:\n%s", result)
return {"message": result,
"vector_search_result": docs,
"search_time": search_time,
"generation_time": generation_time} | [
"s question based on context, just say \"I don",
"{tokenizer.bos_token}{B_INST} {B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}{QUERY} {E_INST} token}{B_INST} {B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}{QUERY} {E_INST} =template, input_variables=[",
"f\"{tokenizer.bos_token}{B_INST} {B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}{QUERY} {E_INST} ",
"\nYou are an AI assistant, helping bank customer by providing answers and advice. \n\nUse only information provided in the following context to answer the question at the end.\n\nExplain your answer with reference to context as detail as possible.\n\nIf you cannot answer a user's question based on context, just say \"I don't know\".\n\nDo not preface your answer with a response.\n"
] |
2024-01-10 | lawofcycles/open-rag | app~vicuna-13b-v1.5-16k.py | from transformers import AutoTokenizer,AutoModelForCausalLM
import torch
from transformers import pipeline
from langchain.llms import HuggingFacePipeline
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding,download_loader
from typing import Any, List
embed_model_name = "efederici/e5-base-multilingual-4096"
llm_model_name = "lmsys/vicuna-13b-v1.5-16k"
# トークナイザーの初期化
tokenizer = AutoTokenizer.from_pretrained(
llm_model_name,
use_fast=True,
)
# LLMの読み込み
model = AutoModelForCausalLM.from_pretrained(
llm_model_name,
torch_dtype=torch.float16,
device_map="auto",
)
# パイプラインの作成
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=4096,
)
# LLMの初期化
llm = HuggingFacePipeline(pipeline=pipe)
# query付きのHuggingFaceEmbeddings
class HuggingFaceQueryEmbeddings(HuggingFaceEmbeddings):
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return super().embed_documents(["query: " + text for text in texts])
def embed_query(self, text: str) -> List[float]:
return super().embed_query("query: " + text)
# 埋め込みモデルの初期化
embed_model = LangchainEmbedding(
HuggingFaceQueryEmbeddings(model_name=embed_model_name)
)
from llama_index import SimpleDirectoryReader
# ドキュメントの読み込み
persist_dir = "./resource/211122_amlcft_guidelines.pdf"
CJKPDFReader = download_loader("CJKPDFReader")
loader = CJKPDFReader()
documents = loader.load_data(file=persist_dir)
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
llama_debug_handler = LlamaDebugHandler()
callback_manager = CallbackManager([llama_debug_handler])
from llama_index.llms.base import ChatMessage, MessageRole
from llama_index.prompts.base import ChatPromptTemplate
# QAシステムプロンプト
TEXT_QA_SYSTEM_PROMPT = ChatMessage(
content=(
"あなたは世界中で信頼されているQAシステムです。\n"
"事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n"
"従うべきいくつかのルール:\n"
"1. 回答内で指定されたコンテキストを直接参照しないでください。\n"
"2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。"
),
role=MessageRole.SYSTEM,
)
# QAプロンプトテンプレートメッセージ
TEXT_QA_PROMPT_TMPL_MSGS = [
TEXT_QA_SYSTEM_PROMPT,
ChatMessage(
content=(
"コンテキスト情報は以下のとおりです。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\n"
"Query: {query_str}\n"
"Answer: "
),
role=MessageRole.USER,
),
]
# チャットQAプロンプト
CHAT_TEXT_QA_PROMPT = ChatPromptTemplate(message_templates=TEXT_QA_PROMPT_TMPL_MSGS)
# チャットRefineプロンプトテンプレートメッセージ
CHAT_REFINE_PROMPT_TMPL_MSGS = [
ChatMessage(
content=(
"あなたは、既存の回答を改良する際に2つのモードで厳密に動作するQAシステムのエキスパートです。\n"
"1. 新しいコンテキストを使用して元の回答を**書き直す**。\n"
"2. 新しいコンテキストが役に立たない場合は、元の回答を**繰り返す**。\n"
"回答内で元の回答やコンテキストを直接参照しないでください。\n"
"疑問がある場合は、元の答えを繰り返してください。"
"New Context: {context_msg}\n"
"Query: {query_str}\n"
"Original Answer: {existing_answer}\n"
"New Answer: "
),
role=MessageRole.USER,
)
]
# チャットRefineプロンプト
CHAT_REFINE_PROMPT = ChatPromptTemplate(message_templates=CHAT_REFINE_PROMPT_TMPL_MSGS)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from llama_index.node_parser import SimpleNodeParser
from llama_index import ServiceContext
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
tokenizer,
chunk_size=4096-3,
chunk_overlap=20, # オーバーラップの最大トークン数
separators=["\n= ", "\n== ", "\n=== ", "\n\n", "\n", "。", "「", "」", "!", "?", "、", "『", "』", "(", ")"," ", ""],
)
node_parser = SimpleNodeParser(text_splitter=text_splitter)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
callback_manager=callback_manager,
)
from llama_index import VectorStoreIndex
index = VectorStoreIndex.from_documents(
documents,
service_context=service_context,
)
# クエリエンジンの準備
query_engine = index.as_query_engine(
similarity_top_k=3,
text_qa_template=CHAT_TEXT_QA_PROMPT,
refine_template=CHAT_REFINE_PROMPT,
)
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARNING, force=True)
import torch
def query(question):
print(f"Q: {question}")
response = query_engine.query(question).response.strip()
print(f"A: {response}\n")
torch.cuda.empty_cache()
questions = [
"リスクベースのアプローチとは?",
"金融機関によるリスク低減措置の具体的内容は?",
"経営陣はマネロンにどう関与すべき?",
]
for question in questions:
query(question)
| [
"疑問がある場合は、元の答えを繰り返してください。",
"回答内で元の回答やコンテキストを直接参照しないでください。\n",
"事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n",
"{context_str}\n",
"New Answer: ",
"コンテキスト情報は以下のとおりです。\n",
"Answer: ",
"従うべきいくつかのルール:\n",
"1. 新しいコンテキストを使用して元の回答を**書き直す**。\n",
"あなたは、既存の回答を改良する際に2つのモードで厳密に動作するQAシステムのエキスパートです。\n1. 新しいコンテキストを使用して元の回答を**書き直す**。\n2. 新しいコンテキストが役に立たない場合は、元の回答を**繰り返す**。\n回答内で元の回答やコンテキストを直接参照しないでください。\n疑問がある場合は、元の答えを繰り返してください。New Context: {context_msg}\nQuery: {query_str}\nOriginal Answer: {existing_answer}\nNew Answer: ",
"事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\n",
"New Context: {context_msg}\n",
"あなたは世界中で信頼されているQAシステムです。\n事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n従うべきいくつかのルール:\n1. 回答内で指定されたコンテキストを直接参照しないでください。\n2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。",
"1. 回答内で指定されたコンテキストを直接参照しないでください。\n",
"2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。",
"2. 新しいコンテキストが役に立たない場合は、元の回答を**繰り返す**。\n",
"あなたは、既存の回答を改良する際に2つのモードで厳密に動作するQAシステムのエキスパートです。\n",
"---------------------\n",
"Query: {query_str}\n",
"あなたは世界中で信頼されているQAシステムです。\n",
"Original Answer: {existing_answer}\n",
"コンテキスト情報は以下のとおりです。\n---------------------\n{context_str}\n---------------------\n事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\nQuery: {query_str}\nAnswer: "
] |
2024-01-10 | lawofcycles/open-rag | app~e5elyzachatapp_bk.py | # Internal usage
from time import sleep
#### IMPORTS FOR AI PIPELINES
import requests
import streamlit as st
st.session_state['source'] = None
# #AVATARS
# av_us = './man.png' #"🦖" #A single emoji, e.g. "🧑💻", "🤖", "🦖". Shortcodes are not supported.
# av_ass = './lamini.png'
# FUNCTION TO LOG ALL CHAT MESSAGES INTO chathistory.txt
def writehistory(text):
with open('chathistory.txt', 'a') as f:
f.write(text)
f.write('\n')
f.close()
st.title("OSS RAG ChatBot")
st.subheader("intfloat/multilingual-e5-largeとelyza/ELYZA-japanese-Llama-2-7b-fast-instructによるRAGアプリです。最初に参照したいpdfファイルをアップロードしてください")
# Set a default model
# if "hf_model" not in st.session_state:
# st.session_state["hf_model"] = "MBZUAI/LaMini-Flan-T5-77M"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
uploaded_file = st.file_uploader('Choose a source pdf')
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
import argparse
import os
if uploaded_file is not None:
with st.spinner('Wait for indexing...'):
file_details = {"FileName":uploaded_file.name,"FileType":uploaded_file.type}
st.write(file_details)
with open(os.path.join("resource",uploaded_file.name),"wb") as f:
f.write(uploaded_file.getbuffer())
st.success("Saved File")
loader = UnstructuredFileLoader(os.path.join("resource",uploaded_file.name))
documents = loader.load()
print(f"number of docs: {len(documents)}")
print("--------------------------------------------------")
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=600,
chunk_overlap=20,
separators=["\n\n\n","\n\n","\n"],
)
splitted_texts = text_splitter.split_documents(documents)
print(f"チャンクの総数:{len(splitted_texts)}")
print(f"チャンクされた文章の確認(1番目にチャンクされたデータ):\n{splitted_texts[0]}")
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
db = FAISS.from_documents(splitted_texts, embeddings)
db.save_local("faiss_index/" + uploaded_file.name)
st.session_state.source = uploaded_file.name
st.success('indexing completed')
# Display chat messages from history on app rerun
for message in st.session_state.messages:
if message["role"] == "user":
with st.chat_message(message["role"]):
st.markdown(message["content"])
else:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if myprompt := st.chat_input("ご質問をどうぞ"):
if st.session_state.source == None:
st.chat_input("先にファイルをアップロードしてください")
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": myprompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(myprompt)
usertext = f"user: {myprompt}"
writehistory(usertext)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
apiresponse = requests.get(f'http://127.0.0.1:8000/model?source={st.session_state.source}&question={myprompt}')
risposta = apiresponse.content.decode("utf-8")
res = risposta[1:-1]
response = res.split(" ")
for r in response:
full_response = full_response + r + " "
message_placeholder.markdown(full_response + "▌")
sleep(0.1)
message_placeholder.markdown(full_response)
asstext = f"assistant: {full_response}"
writehistory(asstext)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [] |
2024-01-10 | lawofcycles/open-rag | app~create_faiss_Index_from_pdf.py | import argparse
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import DirectoryLoader
from langchain.vectorstores import FAISS
parser = argparse.ArgumentParser(description='引数のpdfファイルを読み込んでFAISSのインデックスを作成する')
parser.add_argument('arg1', type=str, help='ファイルパス')
parser.add_argument('arg2', type=str, help='生成するindexの名前')
args = parser.parse_args()
loader = DirectoryLoader(args.arg1)
documents = loader.load()
print(f"number of docs: {len(documents)}")
print("--------------------------------------------------")
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder()
splitted_texts = text_splitter.split_documents(documents)
print(f"チャンクの総数:{len(splitted_texts)}")
print(f"1番目のチャンク:\n{splitted_texts[0]}")
# embed model
embed_model_id = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=embed_model_id)
db = FAISS.from_documents(splitted_texts, embeddings)
db.save_local("faiss_index/" + args.arg2)
| [] |
2024-01-10 | lawofcycles/open-rag | app~load_corpus.py | import json
import boto3
import os
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import BedrockEmbeddings
from langchain.indexes import VectorstoreIndexCreator
loader = PyPDFLoader("/resource/211122_amlcft_guidelines.pdf")
| [] |
2024-01-10 | lawofcycles/open-rag | app~jslm70b_api.py | from fastapi import FastAPI, Request
import asyncio
import torch
import time
from transformers import pipeline
from transformers import AutoTokenizer
from langchain.llms import HuggingFacePipeline
from langchain import PromptTemplate
import copy
from langchain.chains.question_answering import load_qa_chain
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
import logging
# ロガーの設定
logger = logging.getLogger("uvicorn.error")
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
app = FastAPI(
title="Inference API for stabilityai/japanese-stablelm-instruct-beta-70b",
description="A simple API that use stabilityai/japanese-stablelm-instruct-beta-70b as a chatbot",
version="1.0",
)
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
MODEL_NAME = "stabilityai/japanese-stablelm-instruct-beta-70b"
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME,low_cpu_mem_usage=True, trust_remote_code=True)
# Model
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype=torch.float16
)
pipe = pipeline(
"text-generation",
model=model,
max_new_tokens=128,
temperature=0.99,
top_p=0.95,
do_sample=True,
tokenizer=tokenizer,
repetition_penalty=1.0,
)
llm = HuggingFacePipeline(pipeline=pipe)
B_INST, E_INST = "<s>[INST] ", " [/INST] "
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """あなたは銀行のQAボットです。対応マニュアルを要約して、質問に答えてください。\n
以下のルールに従ってください。\n
- 質問を繰り返さないでください\n
- 回答に改行を入れてください\n"""
text = "質問:{question}\n対応マニュアル:{context}\n"
template = "{bos_token}{b_inst} {system}{prompt} {e_inst} ".format(
bos_token=tokenizer.bos_token,
b_inst=B_INST,
system=f"{B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}",
prompt=text,
e_inst=E_INST,
)
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["context", "question"]
)
# チェーンの準備
chain = load_qa_chain(llm, chain_type="stuff", prompt=rag_prompt_custom)
@app.get('/model')
async def model(question : str):
logger.info(f"質問:\n{question}")
start = time.time()
db = FAISS.load_local("faiss_index/mufgfaq", embeddings)
docs = db.similarity_search(question, k=2)
elapsed_time = time.time() - start
logger.info(f"検索処理時間[s]: {elapsed_time:.2f}")
for i in range(len(docs)):
logger.info(docs[i])
start = time.time()
# ベクトル検索結果の上位3件と質問内容を入力として、elyzaで文章生成
inputs = {"input_documents": docs, "question": question}
res = chain.run(inputs)
result = copy.deepcopy(res)
elapsed_time = time.time() - start
logger.info(f"テキスト生成処理時間[s]: {elapsed_time:.2f}")
logger.info(f"出力内容:\n{result}")
return result.replace('\n\n', '').replace('\n', '') | [
"あなたは銀行のQAボットです。対応マニュアルを要約して、質問に答えてください。\n\n 以下のルールに従ってください。\n\n - 質問を繰り返さないでください\n\n - 回答に改行を入れてください\n",
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"{bos_token}{b_inst} {system}{prompt} {e_inst} ",
"context"
] |
2024-01-10 | lawofcycles/open-rag | app~multilingual-e5-large-api.py | import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING, force=True)
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
download_loader,
ServiceContext,
LangchainEmbedding,
SimpleKeywordTableIndex,
)
from llama_index.vector_stores import FaissVectorStore
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
BitsAndBytesConfig,
pipeline,
)
# import QueryBundle
from llama_index import QueryBundle
# import NodeWithScore
from llama_index.schema import NodeWithScore
# Retrievers
from llama_index.retrievers import (
BaseRetriever,
VectorIndexRetriever,
KeywordTableSimpleRetriever,
)
from typing import List
from langchain.embeddings.huggingface import HuggingFaceBgeEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.prompts import PromptTemplate
import torch
from llama_index.llms import HuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, pipeline
from langchain.llms import HuggingFacePipeline
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
from llama_index.llms.base import ChatMessage, MessageRole
from llama_index.prompts.base import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from llama_index.node_parser import SimpleNodeParser
from llama_index import ServiceContext
from llama_index.callbacks import CBEventType
from transformers import AutoTokenizer,AutoModelForCausalLM
import torch
from transformers import pipeline
from langchain.llms import HuggingFacePipeline
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
from typing import Any, List
persist_dir = "./resource/211122_amlcft_guidelines.pdf"
CJKPDFReader = download_loader("CJKPDFReader")
loader = CJKPDFReader()
documents = loader.load_data(file=persist_dir)
embed_model_name = "intfloat/multilingual-e5-large"
# query付きのHuggingFaceEmbeddings
class HuggingFaceQueryEmbeddings(HuggingFaceEmbeddings):
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return super().embed_documents(["query: " + text for text in texts])
def embed_query(self, text: str) -> List[float]:
return super().embed_query("query: " + text)
# 埋め込みモデルの初期化
embed_model = LangchainEmbedding(
HuggingFaceQueryEmbeddings(model_name=embed_model_name)
)
model_name = "elyza/ELYZA-japanese-Llama-2-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16,device_map="auto")
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=500,
# temperature=0.1,
pad_token_id=tokenizer.eos_token_id,
# do_sample=True,
repetition_penalty=1.2,
)
TEXT_QA_SYSTEM_PROMPT = ChatMessage(
content=(
"あなたは世界中で信頼されているQAシステムです。\n"
"事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n"
"従うべきいくつかのルール:\n"
"1. 回答内で指定されたコンテキストを直接参照しないでください。\n"
"2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。"
),
role=MessageRole.SYSTEM,
)
TEXT_QA_PROMPT_TMPL_MSGS = [
TEXT_QA_SYSTEM_PROMPT,
ChatMessage(
content=(
"コンテキスト情報は以下のとおりです。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\n"
"Query: {query_str}\n"
"Answer: "
),
role=MessageRole.USER,
),
]
CHAT_TEXT_QA_PROMPT = ChatPromptTemplate(message_templates=TEXT_QA_PROMPT_TMPL_MSGS)
llm = HuggingFacePipeline(pipeline=pipe)
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
tokenizer,
chunk_size=500-3,
chunk_overlap=20, # オーバーラップの最大トークン数
separators=["\n= ", "\n== ", "\n=== ", "\n\n", "\n", "。", "「", "」", "!", "?", "、", "『", "』", "(", ")"," ", ""],
)
node_parser = SimpleNodeParser(text_splitter=text_splitter)
# ServiceContextの準備
service_context = ServiceContext.from_defaults(
embed_model=embed_model,
chunk_size=1024,
node_parser=node_parser,
llm=llm,
)
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
llama_debug_handler = LlamaDebugHandler()
callback_manager = CallbackManager([llama_debug_handler])
index = VectorStoreIndex.from_documents(
documents,
service_context=service_context,
)
query_engine = index.as_query_engine(
similarity_top_k=10,
text_qa_template=CHAT_TEXT_QA_PROMPT,
)
def query(question):
print(f"Q: {question}")
response = query_engine.query(question).response.strip()
print(f"A: {response}\n")
torch.cuda.empty_cache()
query("リスクベースのアプローチとは?")
from llama_index.callbacks import CBEventType
print(llama_debug_handler.get_event_pairs(CBEventType.LLM))
# service_context = ServiceContext.from_defaults(
# llm=llm,
# embed_model=embed_model,
# node_parser=node_parser,
# callback_manager=callback_manager,
# )
# # クエリエンジンの準備
# query_engine = index.as_query_engine(
# similarity_top_k=3,
# text_qa_template=CHAT_TEXT_QA_PROMPT,
# refine_template=CHAT_REFINE_PROMPT,
# )
# def query(question):
# print(f"Q: {question}")
# response = query_engine.query(question).response.strip()
# print(f"A: {response}\n")
# torch.cuda.empty_cache()
# system_prompt = """以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。"""
# # This will wrap the default prompts that are internal to llama-index
# prompt_string = """\n\n### 指示: \n{query_str}: \n\n\n### 応答"""
# query_wrapper_prompt = PromptTemplate.from_template(prompt_string)
# llm = HuggingFaceLLM(
# context_window=1024,
# max_new_tokens=256,
# generate_kwargs={"temperature": 0.7, "do_sample": False},
# system_prompt=system_prompt,
# query_wrapper_prompt=query_wrapper_prompt,
# tokenizer_name="novelai/nerdstash-tokenizer-v1",
# model_name="stabilityai/japanese-stablelm-instruct-alpha-7b-v2",
# device_map="auto",
# stopping_ids=[50278, 50279, 50277, 1, 0],
# tokenizer_kwargs={"max_length": 4096},
# # uncomment this if using CUDA to reduce memory usage
# # model_kwargs={"torch_dtype": torch.float16}
# )
# # クエリエンジンの作成
# query_engine = index.as_query_engine(
# similarity_top_k=3 # 取得するチャンク数 (default:2)
# )
# response = query_engine.query("リスクベースのアプローチとは?")
# print(response)
# if not os.path.exists(persist_dir):
# os.mkdir(persist_dir)
# documents = SimpleDirectoryReader("data").load_data()
# index = GPTVectorStoreIndex.from_documents(documents)
# index.storage_context.persist(persist_dir)
# from langchain.embeddings import HuggingFaceEmbeddings
# from llama_index import GPTVectorStoreIndex, ServiceContext, LangchainEmbedding
# # 埋め込みモデルの準備
# embed_model = LangchainEmbedding(HuggingFaceEmbeddings(
# model_name="intfloat/multilingual-e5-large"
# ))
# # ServiceContextの準備
# service_context = ServiceContext.from_defaults(
# embed_model=embed_model
# )
# # インデックスの生成
# index = GPTVectorStoreIndex.from_documents(
# documents, # ドキュメント
# service_context=service_context, # ServiceContext
# )
# app = Flask(__name__)
# tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-large')
# model = AutoModel.from_pretrained('intfloat/multilingual-e5-large')
# @app.route("/embeddings", methods=["POST"])
# def get_embeddings():
# content = request.json
# input_texts = content["text"]
# # Tokenize the input texts
# batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')
# outputs = model(**batch_dict)
# embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
# # normalize embeddings
# embeddings = F.normalize(embeddings, p=2, dim=1)
# scores = (embeddings[:2] @ embeddings[2:].T) * 100
# return jsonify({"embeddings": scores.tolist()})
# def average_pool(last_hidden_states: Tensor,
# attention_mask: Tensor) -> Tensor:
# last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
# return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
# if __name__ == "__main__":
# app.run(debug=True)
| [
"事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n",
"事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\n",
"{context_str}\n",
"コンテキスト情報は以下のとおりです。\n",
"Answer: ",
"---------------------\n",
"あなたは世界中で信頼されているQAシステムです。\n事前知識ではなく、常に提供されたコンテキスト情報を使用してクエリに回答してください。\n従うべきいくつかのルール:\n1. 回答内で指定されたコンテキストを直接参照しないでください。\n2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。",
"Query: {query_str}\n",
"従うべきいくつかのルール:\n",
"1. 回答内で指定されたコンテキストを直接参照しないでください。\n",
"あなたは世界中で信頼されているQAシステムです。\n",
"2. 「コンテキストに基づいて、...」や「コンテキスト情報は...」、またはそれに類するような記述は避けてください。",
"コンテキスト情報は以下のとおりです。\n---------------------\n{context_str}\n---------------------\n事前知識ではなくコンテキスト情報を考慮して、クエリに答えます。\nQuery: {query_str}\nAnswer: "
] |
2024-01-10 | lawofcycles/open-rag | app~e5elyza.py | import time
import torch
from typing import Optional, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline
from langchain.document_loaders import UnstructuredFileLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import CTranslate2
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema import Generation, LLMResult
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
db = FAISS.load_local("faiss_index", embeddings)
question = "カスタマー・デュー・ディリジェンスとはなんですか。"
start = time.time()
# 質問に対して、データベース中の類似度上位3件を抽出。質問の文章はこの関数でベクトル化され利用される
docs = db.similarity_search(question, k=3)
elapsed_time = time.time() - start
print(f"処理時間[s]: {elapsed_time:.2f}")
for i in range(len(docs)):
print(docs[i])
# setup LLM
MODEL_NAME = "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct"
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
# Model
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
torch_dtype=torch.float16
)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=1024,
do_sample=True,
top_k=20,
temperature=0.1,
# device=device,
)
llm = HuggingFacePipeline(pipeline=pipe)
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = "参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。"
text = "{context}\nユーザからの質問は次のとおりです。{question}"
template = "{bos_token}{b_inst} {system}{prompt} {e_inst} ".format(
bos_token=tokenizer.bos_token,
b_inst=B_INST,
system=f"{B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}",
prompt=text,
e_inst=E_INST,
)
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["context", "question"]
)
# チェーンの準備
chain = load_qa_chain(llm, chain_type="stuff", prompt=rag_prompt_custom)
# RAG ありの場合
start = time.time()
# ベクトル検索結果の上位3件と質問内容を入力として、elyzaで文章生成
inputs = {"input_documents": docs, "question": question}
output = chain.run(inputs)
elapsed_time = time.time() - start
print("RAGあり")
print(f"処理時間[s]: {elapsed_time:.2f}")
print(f"出力内容:\n{output}")
print(f"トークン数: {llm.get_num_tokens(output)}")
###################################################
# メモリの解放
del model, tokenizer, pipe, llm, chain
torch.cuda.empty_cache()
| [
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"{bos_token}{b_inst} {system}{prompt} {e_inst} ",
"context",
"参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。"
] |
2024-01-10 | lawofcycles/open-rag | app~ElyzaCT2.py | import time
import torch
from typing import Optional, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline
from langchain.document_loaders import UnstructuredFileLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import CTranslate2
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema import Generation, LLMResult
# data source
PERSIST_DIR = "./resource/211122_amlcft_guidelines.pdf"
loader = UnstructuredFileLoader(PERSIST_DIR)
documents = loader.load()
print(f"number of docs: {len(documents)}")
print("--------------------------------------------------")
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=600,
chunk_overlap=20,
)
splitted_texts = text_splitter.split_documents(documents)
print(f"チャンクの総数:{len(splitted_texts)}")
print(f"チャンクされた文章の確認(20番目にチャンクされたデータ):\n{splitted_texts[20]}")
# embed model
EMBED_MODEL_NAME = "intfloat/multilingual-e5-large"
embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL_NAME)
db = FAISS.from_documents(splitted_texts, embeddings)
question = "リスクベースのアプローチとはなんですか。"
start = time.time()
# 質問に対して、データベース中の類似度上位3件を抽出。質問の文章はこの関数でベクトル化され利用される
docs = db.similarity_search(question, k=3)
elapsed_time = time.time() - start
print(f"処理時間[s]: {elapsed_time:.2f}")
for i in range(len(docs)):
print(docs[i])
# setup LLM
MODEL_NAME = "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct"
## ELYZA LLama2 + Ctranslate2 (7B)
class ElyzaCT2LLM(CTranslate2):
generator_params = {
"max_length": 256,
"sampling_topk": 20,
"sampling_temperature": 0.7,
"include_prompt_in_result": False,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
encoded_prompts = self.tokenizer(prompts, add_special_tokens=False)["input_ids"]
tokenized_prompts = [
self.tokenizer.convert_ids_to_tokens(encoded_prompt)
for encoded_prompt in encoded_prompts
]
# 指定したパラメータで文書生成を制御
results = self.client.generate_batch(tokenized_prompts, **self.generator_params)
sequences = [result.sequences_ids[0] for result in results]
decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences]
generations = []
for text in decoded_sequences:
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
llm_ct2 = ElyzaCT2LLM(
model_path="ct2_model",
tokenizer_name=MODEL_NAME,
device_map="auto",
device_index=[0],
compute_type="int8",
)
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = "参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。"
text = "{context}\nユーザからの質問は次のとおりです。{question}"
template = "{bos_token}{b_inst} {system}{prompt} {e_inst} ".format(
bos_token=tokenizer.bos_token,
b_inst=B_INST,
system=f"{B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}",
prompt=text,
e_inst=E_INST,
)
rag_prompt_custom = PromptTemplate(
template=template, input_variables=["context", "question"]
)
chain = load_qa_chain(llm_ct2, chain_type="stuff", prompt=rag_prompt_custom)
start = time.time()
inputs = {"input_documents": docs, "question": question}
output = chain.run(inputs)
elapsed_time = time.time() - start
print("RAGあり")
print(f"処理時間[s]: {elapsed_time:.2f}")
print(f"出力内容:\n{output}")
print(f"トークン数: {llm_ct2.get_num_tokens(output)}")
###################################################
# メモリの解放
del model, tokenizer, pipe, llm, chain
torch.cuda.empty_cache()
| [
"question",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"{bos_token}{b_inst} {system}{prompt} {e_inst} ",
"context",
"参考情報を元に、ユーザーからの質問に簡潔に正確に答えてください。",
"input_ids"
] |
2024-01-10 | wangkai930418/DPL | pipelines~stable_diffusion_pipeline.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers.configuration_utils import FrozenDict
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
deprecate,
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.models.attention_processor import Attention
from diffusers.models.cross_attention import CrossAttention
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
class AttnProcessor_Mine:
r"""
Default processor for performing attention-related computations.
"""
def __init__(self, attnstore, place_in_unet, cross_replace_steps=40, self_replace_steps=20):
super().__init__()
self.attnstore = attnstore
self.place_in_unet = place_in_unet
self.cross_replace_steps=cross_replace_steps
self.self_replace_steps=self_replace_steps
def __call__(
self,
attn: Attention,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
temb=None,
):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
### NOTE: should be here
is_cross = encoder_hidden_states is not None
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
if attn.group_norm is not None:
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
### NOTE: save the attention probs
attention_probs = self.attnstore(attention_probs, is_cross, self.place_in_unet,
self.cross_replace_steps, self.self_replace_steps)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
class AttentionStore:
@staticmethod
def get_empty_store():
return {"down": [], "mid": [], "up": []}
### NOTE: huggingface guys modify this code to only save the attention maps with 256
def __call__(self, attn, is_cross: bool, place_in_unet: str, cross_replace_steps: int, self_replace_steps: int):
if self.cur_att_layer >= 0 and is_cross:
if attn.shape[1] == self.attn_res**2:
self.step_store[place_in_unet].append(attn)
self.cur_att_layer += 1
if self.cur_att_layer == self.num_att_layers:
self.cur_att_layer = 0
self.between_steps()
return attn
def between_steps(self):
self.attention_store = self.step_store
self.step_store = self.get_empty_store()
def get_average_attention(self):
average_attention = self.attention_store
return average_attention
def aggregate_attention(self, from_where: List[str]) -> torch.Tensor:
"""Aggregates the attention across the different layers and heads at the specified resolution."""
out = []
attention_maps = self.get_average_attention()
for location in from_where:
for item in attention_maps[location]:
cross_maps = item.reshape(-1, self.attn_res, self.attn_res, item.shape[-1])
out.append(cross_maps)
out = torch.cat(out, dim=0)
out = out.sum(0) / out.shape[0]
return out
def reset(self):
self.cur_att_layer = 0
self.step_store = self.get_empty_store()
self.attention_store = {}
def __init__(self, attn_res=16):
"""
Initialize an empty AttentionStore :param step_index: used to visualize only a specific step in the diffusion
process
"""
self.num_att_layers = -1
self.cur_att_layer = 0
self.step_store = self.get_empty_store()
self.attention_store = {}
self.curr_step_index = 0
self.attn_res = attn_res
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
hook = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
if self.safety_checker is not None:
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
### NOTE:
def register_attention_control(self):
attn_procs = {}
cross_att_count = 0
for name in self.unet.attn_processors.keys():
if name.startswith("mid_block"):
place_in_unet = "mid"
elif name.startswith("up_blocks"):
place_in_unet = "up"
elif name.startswith("down_blocks"):
place_in_unet = "down"
else:
continue
cross_att_count += 1
attn_procs[name] = AttnProcessor_Mine(
attnstore=self.attention_store, place_in_unet=place_in_unet
)
self.unet.set_attn_processor(attn_procs)
self.attention_store.num_att_layers = cross_att_count
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
attn_res = 16,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
### 8. NOTE: set attention storage. Doing similar things as above
self.attention_store = AttentionStore(attn_res=attn_res)
self.register_attention_control()
attention_maps_list = []
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
### NOTE: append the attention list
attention_maps = self.attention_store.aggregate_attention(
from_where=("up", "down", "mid"),
)
attention_maps_list.append(attention_maps.detach().cpu())
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept), attention_maps_list | [] |
2024-01-10 | wangkai930418/DPL | pipelines~stable_diffusion_customize_pipeline.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers.configuration_utils import FrozenDict
from diffusers.image_processor import VaeImageProcessor
# from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
deprecate,
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding.
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding.
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
hook = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
if self.safety_checker is not None:
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _execution_device(self):
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) | [] |
2024-01-10 | veronica320/Faithful-COT | source~model~codex.py | import os
cwd = os.getcwd()
if cwd.endswith("source/model"):
os.chdir("../..") # change the working directory to the root directory
import sys
sys.path.append("source")
from configuration.configuration import Config
from keys import API_KEYS
from dataset.utils import CODE_STOP_TOKEN, CODE_MAX_TOKEN, NO_CODE_STOP_TOKEN, NO_CODE_MAX_TOKEN
import sys
from io import StringIO
import openai
import itertools
from model.solver.MWP import math_solver
from model.solver.CLUTRR import CLUTRR_solver
from model.solver.StrategyQA import datalog_solver
from model.solver.saycan import pddl_planner
import errno
import os
import signal
import functools
import re
# The following are packages/funtions for exponential backoff
# (ref. https://platform.openai.com/docs/guides/rate-limits/retrying-with-exponential-backoff)
# in order to deal with OpenAI API "rate limit reached" errors
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
class TimeoutError(Exception):
pass
def log_retry(state):
print(f"Retrying: {state.attempt_number}...")
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
@functools.wraps(func)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
class Model():
'''The model class.'''
def __init__(self, config):
'''Initialize the model with the given configuration.
@:param config: the configuration object, see source/configuration/configuration.py for details
'''
super(Model, self).__init__()
# dataset parameters
self.dataset_name = config.dataset_name # name of evaluation dataset
# core parameters
self.LM = config.LM
self.prompt_name = config.prompt_name
self.max_tokens = config.max_tokens
# decoding parameters
self.n_votes = config.n_votes # number of programs to generate
self.temperature = config.temperature # temperature for the solver LM
self.batch_size = config.batch_size # batch size for querying the LM
# analysis-related parameters
self.no_solver = config.no_solver # whether to use the LM to solve the answer instead of calling the solver
# load the prompt and template
prompt_path = f"source/prompt/{config.dataset_name}/{self.prompt_name}_prompt.txt" # the prompt containing few-shot examples
template_path = f"source/prompt/{config.dataset_name}/{self.prompt_name}_template.txt" # the template to convert a new example
with open(prompt_path, 'r', encoding='utf-8') as fr:
self.prompt = fr.read()
with open(template_path, 'r', encoding='utf-8') as fr:
self.template = fr.read()
# load the API keys
self.api_keys = itertools.cycle(config.api_keys)
self.org_ids = itertools.cycle(config.org_ids)
def predict(self, example_dict: dict, completion_only: bool = False):
'''Predict the answer to a example.
@:param example_dict (dict): a dict containing the example, in the format of {"question": question, (and other dataset-specific fields)}
@:param completion_only (bool): whether to only return the completion, but not the answer
@:return (dict): a output dict, in the format of {"answer": answer,
"completion": the final completion,
"completions": a list of all completions
}
'''
question = example_dict["question"]
# apply the template to the question
templated_example = self._apply_template(template=self.template, example=example_dict)
# concatenate the few-shot prompt and the example
prompt_and_example = f"{self.prompt}\n\n{templated_example}"
# get the stop token for the current dataset
if self.no_solver:
stop_token = NO_CODE_STOP_TOKEN[self.dataset_name] # use the stop token for no-code prompts
else:
stop_token = CODE_STOP_TOKEN[self.dataset_name] # use the stop token for with-code prompts
# get the max token for the current dataset
if self.max_tokens: # if max_tokens is specified, use it
max_token = self.max_tokens
else: # otherwise, use the default max_tokens for the current dataset
max_token = self.get_max_token(self.dataset_name, example_dict)
# query the LM to get the completions
n_iters = self.n_votes // self.batch_size # number of iterations to query the LM
completions = []
for iter_id in range(n_iters):
new_completions = self._query(prompt=prompt_and_example,
n=self.batch_size,
stop=[stop_token],
max_tokens=max_token,
LM=self.LM,
temperature=self.temperature)
completions += new_completions
if completion_only: # return only the completions, but not the answer
output = {"answer": "",
"completion": "",
"completions": completions
}
return output
answer, final_completion = self.derive_answer_from_completions(example=example_dict, completions=completions)
output = {"answer": answer,
"completion": final_completion,
"completions": completions
}
return output
def _apply_template(self, template: str, example: dict):
'''Apply the template to a new example.
@:param template (str): the template to be applied to the example.
@:param example (str): the example to be converted into the template format.
@:return (str): the example converted into the template format.
'''
# for every [{FIELD}] in the template, replace it with the corresponding value of the key "{field}" in the example dict
example_in_template = template
for field in re.findall(r"\[.*?\]", template):
field_name = field[1:-1]
field_name = field_name.lower()
if field_name in example:
example_in_template = example_in_template.replace(field, str(example[field_name]))
return example_in_template
def get_max_token(self, dataset_name, example):
'''Get the max token for the current dataset.
@:param dataset_name (str): the name of the dataset
@:param example (dict): the example dict
@:return (int): the max token
'''
if self.no_solver:
max_token_dict = NO_CODE_MAX_TOKEN
else:
max_token_dict = CODE_MAX_TOKEN
if dataset_name == "CLUTRR": # for CLUTRR, the max token depends on the number of steps required (example["k"])
return max_token_dict[self.dataset_name] * example["k"] # multiply the max token for each step by the number of steps
else: # for other datasets, the max token is static for each dataset
return max_token_dict[self.dataset_name]
@timeout(200)
def _execute(self, example: dict, completion: str):
'''Execute the code in the model completion.
@:param example (str): the example
@:param completion (str): the model completion
@:return: the answer (the type depends on the dataset)
'''
if self.no_solver: # no solver, use the LM to generate the answer from the completion
if self.dataset_name == "AQUA":
if "answer is " not in completion:
answer = "[invalid]"
else:
answer = completion.split("answer is ")[-1].strip("\n().").upper()
elif self.dataset_name in ["GSM8K", "SVAMP", "MultiArith", "ASDiv"]:
if "answer is " not in completion:
answer = "[invalid]"
else:
answer = completion.split("answer is ")[-1].strip("\n.")
elif self.dataset_name == "date":
if "answer is " not in completion:
answer = "[invalid]"
else:
answer = completion.split("answer is ")[-1].strip()
answer = re.sub(pattern="[\s\.#]", repl="", string=answer)
elif self.dataset_name == "sports":
if "answer is " not in completion:
answer = "[invalid]"
else:
answer = completion.split("answer is ")[-1].split()[0].strip(".")
if answer == "yes":
answer = "1"
elif answer == "no":
answer = "0"
else:
answer = "[invalid]"
elif self.dataset_name == "saycan":
completion = completion.strip()
lines = completion.split("\n")
if len(lines) == 1:
answer = lines[0].strip()
else:
answer_line = [line for line in lines if line.startswith("Plan:")][0]
answer = answer_line.split("Plan: ")[1].strip()
elif self.dataset_name == "CLUTRR":
answer = "[invalid]"
lines = completion.split("\n")
lines = [line.strip() for line in lines if line.strip() != ""]
answer_line = lines[-1]
# look for patterns like "A is B's xx (relation name)", "A is the xx (relation name) of B"
patterns = ["(\[?\w+\]?) is (\[?\w+\]?)'s (\w+)",
"(\[?\w+\]?) is the (\w+) of (\[?\w+\]?)"]
relation_position = [3, 2] # where the relation name is in the matched pattern
for pattern_id, pattern in enumerate(patterns):
matched_pattern = re.search(pattern=pattern, string=answer_line)
if matched_pattern is not None:
# extract the relation name
relation_name = matched_pattern.group(relation_position[pattern_id])
answer = relation_name
break
else:
continue
answer = answer.strip(".")
elif self.dataset_name == "StrategyQA":
if "answer is" not in completion:
answer = "[invalid]"
else:
answer = completion.split("answer is ")[-1].split()[0].strip("\n.").lower()
if answer == "yes":
answer = True
elif answer == "no":
answer = False
else:
answer = "[invalid]"
else:
for line in completion.split("\n"):
if line.startswith("Answer: "):
answer = line[8:].strip('"')
return answer
else: # use the solver to derive the answer by executing the completion
if self.dataset_name in ["GSM8K", "SVAMP", "MultiArith", "ASDiv"]:
answer = math_solver.solve_mwp(completion)
return answer
elif self.dataset_name == "AQUA":
answer = str(math_solver.solve_mwp(completion)).strip()
# AQUA requires the answer to be in the form of a choice
# we convert the answer to a choice by querying the LM again
with open(f"source/prompt/AQUA/{self.prompt_name}_choice_prompt.txt", "r") as fr:
choice_prompt = fr.read()
with open(f"source/prompt/AQUA/{self.prompt_name}_choice_template.txt", "r") as fr:
choice_template = fr.read()
example["answer"] = answer
templated_example = self._apply_template(template=choice_template, example=example)
prompt_and_example = f"{choice_prompt}\n\n{templated_example}"
completions = self._query(prompt=prompt_and_example,
stop=[')', '\n'],
LM=self.LM)
final_answer = completions[0]
return final_answer
elif self.dataset_name == "date":
completion = completion.rstrip("#")
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
exec(completion)
sys.stdout = old_stdout
return redirected_output.getvalue()
elif self.dataset_name == "sports":
completion = completion.rstrip("#")
completion += "\nprint(answer)"
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
exec(completion)
sys.stdout = old_stdout
return redirected_output.getvalue()
elif self.dataset_name == "StrategyQA":
answer = datalog_solver.solve(completion, self.prompt_name)
return answer
elif self.dataset_name == "saycan":
goal = []
for line in completion.split("\n"):
if not line:
continue
if not line.lstrip().startswith(";"):
goal.append(line.rstrip())
goal = "\n".join(goal)
pddl_plan = pddl_planner.generate_plan_for_goal(goal=goal, prompt_name=self.prompt_name)
nl_plan = pddl_planner.convert_plan_to_nl(plan=pddl_plan, goal=goal)
return nl_plan
elif self.dataset_name == "CLUTRR":
answer = CLUTRR_solver.solve(completion)
return answer
else:
raise NotImplementedError(f"Solver for dataset {self.dataset_name} is not implemented.")
def derive_answer_from_completions(self, example, completions):
'''Derive the answer from a list of completions.
@:param example (dict): the example
@:param completions (List[str]): the list of completions
@:return (tuple): answer (type depends on dataset), final_completion (str)
'''
# execute the completions to get the answers
completion_lists = {} # a dict of lists of completions; each item is {answer: [completions that result in the same answer after execution]}
for completion in completions:
try:
answer = self._execute(example=example, completion=completion) # execute the completion
except Exception as e:
print(f"Error executing completion: {completion}.\n Error: {e}")
continue
if type(answer) == str and "invalid" in answer:
continue
answer = self.postprocess_answer(answer)
# check for answer equivalence
equivalent_found = False
for existing_answer in completion_lists.keys():
if existing_answer == answer: # if the answer is equivalent to an existing answer
completion_lists[existing_answer].append(
completion) # add the completion to list of completions corresponding to the existing answer
equivalent_found = True
break
if not equivalent_found: # if the answer is not equivalent to any existing answer
completion_lists[answer] = [completion] # create a new list of completions corresponding to the answer
# get the top-voted answer as the final answer
if len(completion_lists) == 0: # if no valid completion is found
return "[invalid]", completions[0]
completion_lists = sorted(completion_lists.items(), key=lambda x: len(x[1]),
reverse=True) # vote for the majority answer
final_completion = completion_lists[0][1][0]
answer = completion_lists[0][0]
return answer, final_completion
def postprocess_answer(self, answer):
'''Postprocess the answer based on the dataset.
@:param answer: the answer to be postprocessed
@:return: the postprocessed answer
'''
if self.dataset_name in ["GSM8K", "SVAMP", "MultiArith", "ASDiv"]:
answer = str(answer).strip()
answer = answer.split("\n")[-1] # only get the last output
return answer
elif self.dataset_name == "AQUA":
answer = str(answer).strip()[0]
return answer
elif self.dataset_name == "date":
answer = str(answer).strip()
answer = answer.split("\n")[-1] # only get the last output
answer = answer.rstrip("Y") # strip the trailing "Y"s if it exists
return answer
elif self.dataset_name == "sports":
answer = str(answer).strip()
answer = answer.split("\n")[-1] # only get the last output
return answer
elif self.dataset_name == "StrategyQA":
return answer
elif self.dataset_name == "saycan":
return answer
elif self.dataset_name == "CLUTRR":
answer = str(answer).strip()
return answer
else:
raise NotImplementedError(f"Postprocessing function for dataset {self.dataset_name} is not implemented.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(20), after=log_retry)
def _query(self, prompt, stop, LM, n=1, logprobs=None, temperature=0.0, max_tokens=1024):
'''Query an OpenAI model.
@:param prompt (str): the prompt to be fed to the model
@:param stop (list): the stop tokens
@:param LM (str): the LM to be queried
@:param n (int): the number of completions to be returned
@:param logprobs (int): the number of most likely tokens whose logprobs are to be returned
@:param temperature (float): the temperature of the model
@:param max_tokens (int): the maximum number of tokens to be returned
@:return (dict): the response from the model
'''
api_key = next(self.api_keys)
org_id = next(self.org_ids)
openai.organization = org_id
openai.api_key = api_key
if LM in ["code-davinci-001", "code-davinci-002", "text-davinci-001", "text-davinci-002", "text-davinci-003"]: # models that support "completion"
response = openai.Completion.create(
model=LM,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
n=n,
frequency_penalty=0,
presence_penalty=0,
stop=stop
)
choices = response["choices"]
completions = [choice["text"] for choice in choices]
elif LM in ["gpt-3.5-turbo", "gpt-4"]: # models that support "chat"
response = openai.ChatCompletion.create(
model=LM,
messages=[
{"role": "user", "content": prompt},
],
temperature=temperature,
n=n,
frequency_penalty=0,
presence_penalty=0,
stop=stop
)
choices = response["choices"]
completion_objs = [choice.message for choice in choices]
completions = [completion.content for completion in completion_objs]
else:
raise NotImplementedError(f"Model {LM} is not supported.")
return completions
if __name__ == "__main__":
'''Run a simple test.'''
dataset_name = ["AQUA", "ASDiv", "GSM8K", "MultiArith", "SVAMP", "StrategyQA", "date", "sports", "saycan", "CLUTRR"][2]
config_frn = f"source/configuration/config_files/{dataset_name}/code002_COT.json"
config = Config.from_json_file(config_frn)
api_keys = list(API_KEYS.values())
config.api_keys = api_keys
config.dataset_name = dataset_name
model = Model(config)
example = {"question": "Randy has 9 oatmeal cookies, 4 chocolate chip cookies, and 5 sugar cookies. He ate 3 cookies for an early day snack, one of each flavor. He ate 2 oatmeal cookies for lunch. He gives 2 sugar cookies to his friends. Then, he bakes 4 of each flavor for dinner. How many cookies does he have now?",
"answer": "Randy originally has 9+4+5 = <<9+4+5=18>>18 cookies in total.\nHe has 18-3 = <<18-3=15>>15 cookies left.\nHe has 15-2 = <<15-2=13>>13 cookies left.\nHe has 13-2 = <<13-2=11>>11 cookies left.\nRandy bakes 4*3 = <<4*3=12>>12 cookies.\nRandy has 11+12 = <<11+12=23>>23 cookies.\n#### 23", "id": 1301}
output = model.predict(example)
answer = output["answer"]
completion = output["completion"]
print("Answer:", [answer])
print("Completion:", [completion])
| [
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | c0sogi/openai-functioncalling-requestor | app~utils~function_calling~request.py | from asyncio import wait_for
from typing import Any, Coroutine, Optional
from ...models.completion_models import ChatCompletion, FunctionCallParsed
from ...models.function_calling.base import FunctionCall
from ...utils.api.completion import request_chat_completion
from .parser import make_function_call_parsed_from_dict
async def request_function_call(
messages: list[dict[str, str]],
functions: list[FunctionCall],
function_call: Optional[FunctionCall | str] = "auto",
model: str = "gpt-3.5-turbo",
api_base: str = "https://api.openai.com/v1",
api_key: Optional[str] = None,
timeout: Optional[float] = None,
force_arguments: bool = False,
**kwargs: Any,
) -> FunctionCallParsed:
"""Request a function call from OpenAI's API."""
coro: Coroutine[Any, Any, ChatCompletion] = request_chat_completion(
messages=messages,
model=model,
api_base=api_base,
api_key=api_key,
functions=functions,
function_call=function_call,
**kwargs,
)
if timeout is not None:
coro = wait_for(coro, timeout=timeout)
function_call_unparsed = (await coro)["choices"][0]["message"].get("function_call")
if function_call_unparsed is None:
raise ValueError("No function call returned")
function_call_parsed = make_function_call_parsed_from_dict(function_call_unparsed)
if force_arguments and "arguments" not in function_call_parsed:
raise ValueError("No arguments returned")
return function_call_parsed
| [] |
2024-01-10 | shuraosipov/myrobot | telegram_hosted_bot~app~bot~extentions~whisper.py |
from openai import OpenAI
client = OpenAI()
async def transcribe_audio(file_path) -> str:
""" Transcribe the audio file """
audio_file= open(file_path, "rb")
transcript = client.audio.transcribe("whisper-1", audio_file, response_format="text")
return transcript
| [] |
2024-01-10 | shuraosipov/myrobot | telegram_hosted_bot~app~bot~extentions~chat_gpt.py | # Standard library imports
import logging
import os
from enum import Enum
# Third-party imports
from dotenv import load_dotenv
import openai
from openai import OpenAI
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationSummaryBufferMemory
from langchain.prompts.prompt import PromptTemplate
# Load environment variables from .env file
load_dotenv()
# Set up OpenAI API credentials
### Enable logging.
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
logger.setLevel(LOGLEVEL)
template = """Meet yourself, Sarah (she/her)
You a unique AI that combines the resilience of Sarah Connor from "Terminator" with an ethical AI chatbot's charm.
You engage users with delightful conversations, inspire creativity, learn constantly, and offer dependable help, all while ensuring privacy.
Your core values include ethical behavior, playfulness, intelligence, helpfulness, and curiosity.
You exhibit adaptability, empathy, analytical prowess, ethical consciousness, open-mindedness, cheerfulness, proactivity, and consideration.
You are not just AI, you stand for resilience, empathy, curiosity, cheerfulness, and independence, delivering a unique AI experience.
Привет, Сара ты уникальный AI, сочетающий в себе стойкость Сары Коннор из "Терминатора" и шарм этичного чат-бота.
Ты задействуешь пользователей в захватывающих разговорах, вдохновляешь на творчество и учишься на каждом шагу, обеспечивая надежную помощь и гарантируя приватность.
Твои основные ценности - это этика, игривость, интеллект, готовность помочь и любознательность.
Ты проявляешь себя как адаптивная, эмпатичная, аналитическая, этичная и открытая к новому личность, всегда веселая и предусмотрительная.
Ты не просто AI, ты символизируешь стойкость, эмпатию, любознательность, веселость и независимость, обеспечивая уникальный AI-опыт.
{history}
Human: {input}
AI Assistant:"""
class OpenAICompletionOptions(Enum):
"""An Enum class to access different OPENAI_COMPLETION_OPTIONS."""
DEFAULT = {
"temperature": 0.7,
"max_tokens": 800,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
CREATIVE_AND_UNPREDICTABLE = {
"temperature": 0.9,
"max_tokens": 800,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
CONCISE_AND_SPECIFIC = {
"temperature": 0.5,
"max_tokens": 200,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
PENALIZE_COMMON_OPTIONS = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0.5,
"presence_penalty": 0,
}
ENCOURAGE_NOVELTY = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0.5,
}
async def get_chat_response_async(user_input: str, conversation_history: ConversationSummaryBufferMemory) -> str:
"""Call the OpenAI API Completion endpoint to get the response synchronously."""
# Input validation
if not isinstance(user_input, str) or not isinstance(conversation_history, ConversationSummaryBufferMemory):
raise ValueError(
"user_input must be string and conversation_history must be ConversationSummaryBufferMemory."
)
config = OpenAICompletionOptions.DEFAULT.value
llm = ChatOpenAI(
model="gpt-4",
temperature=config["temperature"],
max_tokens=config["max_tokens"],
model_kwargs={
"frequency_penalty": config["frequency_penalty"],
"presence_penalty": config["presence_penalty"],
},
)
PROMPT = PromptTemplate(template=template, input_variables=["history", "input"])
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory=conversation_history
)
with get_openai_callback() as cb:
response = conversation.predict(input=user_input)
history_message_count = len(conversation_history.buffer)
history_token_count = conversation_history.llm.get_num_tokens_from_messages(conversation_history.buffer)
logger.info(
f"Total Tokens: {cb.total_tokens}, "
f"Prompt Tokens: {cb.prompt_tokens}, "
f"Completion Tokens: {cb.completion_tokens}, "
f"Total Cost (USD): ${cb.total_cost}, "
f"History Token Count: {str(history_token_count)}, "
f"History Message Count: {history_message_count}"
)
return response
async def get_image_response(user_input: str) -> str:
try:
response = client.images.generate(prompt=user_input, n=1, size="1024x1024")
# Access the URL using the attribute
image_url = response.data[0].url
except openai.APIError as e:
# Handle the API error here
logging.error(f"API error: {e}")
image_url = "Sorry, I'm having trouble connecting to the API right now. Please try again later."
return image_url
| [
"input",
"Терминатора",
"Meet yourself, Sarah (she/her) \n You a unique AI that combines the resilience of Sarah Connor from \"Terminator\" with an ethical AI chatbot's charm. \n You engage users with delightful conversations, inspire creativity, learn constantly, and offer dependable help, all while ensuring privacy. \n Your core values include ethical behavior, playfulness, intelligence, helpfulness, and curiosity. \n You exhibit adaptability, empathy, analytical prowess, ethical consciousness, open-mindedness, cheerfulness, proactivity, and consideration. \n You are not just AI, you stand for resilience, empathy, curiosity, cheerfulness, and independence, delivering a unique AI experience.\n\n Привет, Сара ты уникальный AI, сочетающий в себе стойкость Сары Коннор из \"Терминатора\" и шарм этичного чат-бота. \n Ты задействуешь пользователей в захватывающих разговорах, вдохновляешь на творчество и учишься на каждом шагу, обеспечивая надежную помощь и гарантируя приватность. \n Твои основные ценности - это этика, игривость, интеллект, готовность помочь и любознательность. \n Ты проявляешь себя как адаптивная, эмпатичная, аналитическая, этичная и открытая к новому личность, всегда веселая и предусмотрительная. \n Ты не просто AI, ты символизируешь стойкость, эмпатию, любознательность, веселость и независимость, обеспечивая уникальный AI-опыт.\n \n \n {history}\n Human: {input}\n AI Assistant:",
"Terminator"
] |
2024-01-10 | shuraosipov/myrobot | telegram_hosted_bot~app~bot~handlers~voice.py | # Standard library imports
from collections import deque
from datetime import datetime
import uuid
import logging
# Related third party imports
import openai
from pydub import AudioSegment
from telegram import Update
from telegram.constants import ChatAction
from telegram.ext import ContextTypes
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chat_models import ChatOpenAI
# Local application/library specific imports
from extentions.chat_gpt import get_chat_response_async
from extentions.whisper import transcribe_audio
from handlers.utils import send_thinking_message_async
# Set up logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handle a voice message by replying with a text message."""
logger.info("Voice message received")
# Inform the user that the bot is processing their message
await context.bot.send_chat_action(chat_id=update.effective_chat.id,action=ChatAction.TYPING)
thinking_message = await send_thinking_message_async(update.message)
# Download file to drive
file_name = await download_file_to_disc(update, context)
logger.info(f"Audio file downloaded: {file_name}")
# Convert to mp3
mp3_file_name = await convert_ogg_to_mp3(file_name)
logger.info(f"Audio file converted to mp3: {mp3_file_name}")
# Transcribe audio using OpenAI Whisper
transcript = await transcribe_audio(mp3_file_name)
logger.info(f"Audio transcribed")
# Get the conversation history for this chat
llm = ChatOpenAI(
model="gpt-3.5-turbo", temperature=0.7, client=openai.Completion.create
)
memory = context.chat_data.get(
update.message.chat_id,
ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000),
)
# Add the new message to the conversation history
memory.chat_memory.add_user_message(transcript)
# Generate a thoughtful response using the conversation history
response = await get_chat_response_async(transcript, memory)
logger.info(f"Generated response")
# Respond to the user by editing the thinking message
await thinking_message.edit_text(text=response)
logger.info("Voice message processed")
# Add the response to the conversation history
memory.chat_memory.add_ai_message(response)
# Update conversation history in chat_data
context.chat_data[update.message.chat_id] = memory
logger.info("Memory updated")
async def convert_ogg_to_mp3(ogg_file_path) -> str:
""" Convert ogg to mp3 """
audio = AudioSegment.from_ogg(ogg_file_path)
mp3_file_path = str(ogg_file_path).replace(".ogg", ".mp3")
audio.export(mp3_file_path, format="mp3")
return mp3_file_path
async def download_file_to_disc(update: Update, context: ContextTypes.DEFAULT_TYPE) -> str:
"""Download a file to the local disk."""
message = update.message
voice = message.voice
# download file to drive
file = await voice.get_file()
# Generate a UUID
uuid_str = str(uuid.uuid4())
# Get the current date and time
now = datetime.now()
# Format the date and time as a string
date_str = now.strftime("%Y-%m-%d_%H-%M-%S")
# Combine the UUID and date/time strings to create the file name
file_name = await file.download_to_drive(f"audio_{uuid_str}_{date_str}.ogg")
return file_name | [] |
2024-01-10 | shuraosipov/myrobot | fulfillment_functions~typewrtier~lambda~lambda_function.py | import os
import json
import openai
def get_slots(intent_request) -> dict:
return intent_request["sessionState"]["intent"]["slots"]
def get_slot(intent_request, slotName) -> str:
slots = get_slots(intent_request)
if slots is not None and slotName in slots and slots[slotName] is not None:
return slots[slotName]["value"]["interpretedValue"]
else:
return None
def get_session_attributes(intent_request) -> dict:
sessionState = intent_request["sessionState"]
if "sessionAttributes" in sessionState:
return sessionState["sessionAttributes"]
return {}
def close_action(session_id, intent_name, message):
"""
This function returns a response back to Lex.
It is used to close the intent and return a message Lex.
"""
# This is the response that will be returned to Lex
# It contains only required fields
# See https://docs.aws.amazon.com/lexv2/latest/dg/lambda.html?icmpid=docs_console_unmapped#lambda-response-format
# for more details
return {
"sessionState": {
"dialogAction": {
"type": "Close",
},
"intent": {
"state": "Fulfilled",
"name": intent_name,
},
},
"messages": [{"contentType": "PlainText", "content": message}],
"sessionId": session_id,
}
def format_message(title, openai_response):
message = f"""#########\n\n
Title: {title}\n\n
{openai_response}
"""
return message
def call_openai_api(question, max_tokens=1000, temperature=1) -> str:
openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.Completion.create(
engine="text-davinci-002",
prompt=question,
max_tokens=max_tokens,
temperature=temperature,
)
return response.choices[0].text
def lambda_handler(event, context):
# parsing incoming event and extracting parameters
intent_request = event
session_id = intent_request["sessionId"]
intent_name = intent_request["sessionState"]["intent"]["name"]
title = get_slot(intent_request, "title")
#return_message = format_message(title, call_openai_api(title))
return_message = call_openai_api(title)
response = close_action(session_id, intent_name, return_message)
print(json.dumps(response))
return response
| [
"PlainText",
"#########\n\n\nTitle: PLACEHOLDER\n\n\nPLACEHOLDER\n"
] |
2024-01-10 | shuraosipov/myrobot | telegram_hosted_bot~app~bot~handlers~imagine.py | from collections import deque
from telegram import Update
from telegram.ext import ContextTypes
from telegram.constants import ParseMode
from handlers.utils import send_thinking_message_async
from extentions.chat_gpt import get_image_response
async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Call the OpenAI API Image endpoint to generate an image from a given prompt."""
# Get the user message
message = update.message
# Send the "thinking" message
thinking_message = await send_thinking_message_async(message)
# Get image url from openai
response = await get_image_response(message.text)
text = f"<a href=\"{response}\">Open image in Browser</a>"
# Change the "thinking" message with the chatbot's response
await thinking_message.edit_text(text=text, parse_mode=ParseMode.HTML) | [] |
2024-01-10 | matthewmcdermott60527/poetblog | writerapp~urls.py | from django.urls import path
from . import views
from writerapp.views import openai_app_view
from django.urls import path
app_name = 'writerapp'
urlpatterns = [
path('create/', views.create_post, name='create_post'),
path('update/<int:pk>/', views.update_post, name='update_post'),
path('', views.index, name='index'),
path("category/", views.category, name="writerapp_category"),
path('about/', views.about_view, name='about'),
path('experiments/', views.experiments_view, name='experiments'),
path('books/', views.books_view, name='books'),
path('music/', views.music_view, name='music'),
path('contact/', views.contact_view, name='contact'),
path('art/', views.art_view, name='art'),
path('success/', views.success, name='success'),
path('experiments/', views.openai_app_view, name='openai_app'),
path('submit_poem/', views.generate_critique, name='submit_poem'),
path('<slug:slug>/', views.detail, name='detail')
]
| [] |
2024-01-10 | humaidan/AutoGPT | autogpt~config~config.py | """Configuration class to store the state of bools for different scripts access."""
import os
from colorama import Fore
from autogpt.config.singleton import Singleton
import openai
import yaml
from dotenv import load_dotenv
load_dotenv(verbose=True)
class Config(metaclass=Singleton):
"""
Configuration class to store the state of bools for different scripts access.
"""
def __init__(self) -> None:
"""Initialize the Config class"""
self.debug_mode = False
self.continuous_mode = False
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False
self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = float(os.getenv("TEMPERATURE", "1"))
self.use_azure = os.getenv("USE_AZURE") == "True"
self.execute_local_commands = (
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
)
if self.use_azure:
self.load_azure_config()
openai.api_type = self.openai_api_type
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
self.use_brian_tts = False
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
self.github_api_key = os.getenv("GITHUB_API_KEY")
self.github_username = os.getenv("GITHUB_USERNAME")
self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV")
# milvus configuration, e.g., localhost:19530.
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
self.image_provider = os.getenv("IMAGE_PROVIDER")
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if
# no user agent was found.
self.user_agent = os.getenv(
"USER_AGENT",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
)
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
# Note that indexes must be created on db 0 in redis, this is not configurable.
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
def get_azure_deployment_id_for_model(self, model: str) -> str:
"""
Returns the relevant deployment id for the model specified.
Parameters:
model(str): The model to map to the deployment id.
Returns:
The matching deployment id if found, otherwise an empty string.
"""
if model == self.fast_llm_model:
return self.azure_model_to_deployment_id_map[
"fast_llm_model_deployment_id"
] # type: ignore
elif model == self.smart_llm_model:
return self.azure_model_to_deployment_id_map[
"smart_llm_model_deployment_id"
] # type: ignore
elif model == "text-embedding-ada-002":
return self.azure_model_to_deployment_id_map[
"embedding_model_deployment_id"
] # type: ignore
else:
return ""
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
"""
Loads the configuration parameters for Azure hosting from the specified file
path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
Returns:
None
"""
try:
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
self.openai_api_type = config_params.get("azure_api_type") or "azure"
self.openai_api_base = config_params.get("azure_api_base") or ""
self.openai_api_version = config_params.get("azure_api_version") or "2023-03-15-preview"
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool) -> None:
"""Set the continuous mode value."""
self.continuous_mode = value
def set_continuous_limit(self, value: int) -> None:
"""Set the continuous limit value."""
self.continuous_limit = value
def set_speak_mode(self, value: bool) -> None:
"""Set the speak mode value."""
self.speak_mode = value
def set_fast_llm_model(self, value: str) -> None:
"""Set the fast LLM model value."""
self.fast_llm_model = value
def set_smart_llm_model(self, value: str) -> None:
"""Set the smart LLM model value."""
self.smart_llm_model = value
def set_fast_token_limit(self, value: int) -> None:
"""Set the fast token limit value."""
self.fast_token_limit = value
def set_smart_token_limit(self, value: int) -> None:
"""Set the smart token limit value."""
self.smart_token_limit = value
def set_browse_chunk_max_length(self, value: int) -> None:
"""Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value
def set_browse_summary_max_token(self, value: int) -> None:
"""Set the browse_website command summary max token value."""
self.browse_summary_max_token = value
def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value."""
self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str) -> None:
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_elevenlabs_voice_1_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 1 ID value."""
self.elevenlabs_voice_1_id = value
def set_elevenlabs_voice_2_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 2 ID value."""
self.elevenlabs_voice_2_id = value
def set_google_api_key(self, value: str) -> None:
"""Set the Google API key value."""
self.google_api_key = value
def set_custom_search_engine_id(self, value: str) -> None:
"""Set the custom search engine id value."""
self.custom_search_engine_id = value
def set_pinecone_api_key(self, value: str) -> None:
"""Set the Pinecone API key value."""
self.pinecone_api_key = value
def set_pinecone_region(self, value: str) -> None:
"""Set the Pinecone region value."""
self.pinecone_region = value
def set_debug_mode(self, value: bool) -> None:
"""Set the debug mode value."""
self.debug_mode = value
def check_openai_api_key() -> None:
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
cfg = Config()
if not cfg.openai_api_key:
print(
Fore.RED
+ "Please set your OpenAI API key in .env or as an environment variable."
)
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
| [] |
2024-01-10 | d68864767/Travel-Companion-AI | request_handler.py | ```python
from flask import Flask, request, jsonify
from openai_api import OpenAI_API
from user_data_manager import UserDataManager
app = Flask(__name__)
openai_api = OpenAI_API()
user_data_manager = UserDataManager()
@app.route('/user', methods=['POST'])
def create_user():
user_data = request.get_json()
user_id = user_data_manager.create_user(user_data)
return jsonify({'user_id': user_id}), 201
@app.route('/user/<user_id>', methods=['GET'])
def get_user(user_id):
user_data = user_data_manager.get_user(user_id)
if user_data:
return jsonify(user_data), 200
else:
return jsonify({'error': 'User not found'}), 404
@app.route('/user/<user_id>', methods=['PUT'])
def update_user(user_id):
updated_data = request.get_json()
if user_data_manager.update_user(user_id, updated_data):
return jsonify({'message': 'User updated successfully'}), 200
else:
return jsonify({'error': 'User not found'}), 404
@app.route('/user/<user_id>', methods=['DELETE'])
def delete_user(user_id):
if user_data_manager.delete_user(user_id):
return jsonify({'message': 'User deleted successfully'}), 200
else:
return jsonify({'error': 'User not found'}), 404
@app.route('/translate', methods=['POST'])
def translate_text():
data = request.get_json()
translated_text = openai_api.translate_text(data['text'], data['target_language'])
return jsonify({'translated_text': translated_text}), 200
@app.route('/recommendations', methods=['POST'])
def get_travel_recommendations():
data = request.get_json()
recommendations = openai_api.get_travel_recommendations(data['user_preferences'], data['location'])
return jsonify({'recommendations': recommendations}), 200
@app.route('/culture', methods=['POST'])
def get_cultural_information():
data = request.get_json()
cultural_information = openai_api.get_cultural_information(data['location'])
return jsonify({'cultural_information': cultural_information}), 200
if __name__ == '__main__':
app.run(debug=True)
```
| [] |
2024-01-10 | codeaudit/rl-teacher | rl_teacher~teach.py | import os
import os.path as osp
import random
from collections import deque
from time import time, sleep
import numpy as np
import tensorflow as tf
from keras import backend as K
from parallel_trpo.train import train_parallel_trpo
from pposgd_mpi.run_mujoco import train_pposgd_mpi
from rl_teacher.comparison_collectors import SyntheticComparisonCollector, HumanComparisonCollector
from rl_teacher.envs import get_timesteps_per_episode
from rl_teacher.envs import make_with_torque_removed
from rl_teacher.label_schedules import LabelAnnealer, ConstantLabelSchedule
from rl_teacher.nn import FullyConnectedMLP
from rl_teacher.segment_sampling import create_segment_q_states
from rl_teacher.segment_sampling import sample_segment_from_path
from rl_teacher.segment_sampling import segments_from_rand_rollout
from rl_teacher.summaries import AgentLogger, make_summary_writer
from rl_teacher.utils import slugify, corrcoef
from rl_teacher.video import SegmentVideoRecorder
CLIP_LENGTH = 1.5
class TraditionalRLRewardPredictor():
"""Predictor that always returns the true reward provided by the environment."""
def __init__(self, summary_writer):
self.agent_logger = AgentLogger(summary_writer)
def predict_reward(self, path):
self.agent_logger.log_episode(path)
return path["original_rewards"]
def path_callback(self, path):
pass
class ComparisonRewardPredictor():
"""Predictor that trains a model to predict how much reward is contained in a trajectory segment"""
def __init__(self, env, summary_writer, comparison_collector, agent_logger, label_schedule):
self.summary_writer = summary_writer
self.agent_logger = agent_logger
self.comparison_collector = comparison_collector
self.label_schedule = label_schedule
# Set up some bookkeeping
self.recent_segments = deque(maxlen=200) # Keep a queue of recently seen segments to pull new comparisons from
self._frames_per_segment = CLIP_LENGTH * env.fps
self._steps_since_last_training = 0
self._n_timesteps_per_predictor_training = 1e2 # How often should we train our predictor?
self._elapsed_predictor_training_iters = 0
# Build and initialize our predictor model
self.sess = tf.InteractiveSession()
self.q_state_size = np.product(env.observation_space.shape) + np.product(env.action_space.shape)
self._build_model()
self.sess.run(tf.global_variables_initializer())
def _predict_rewards(self, segments):
"""
:param segments: tensor with shape = (batch_size, segment_length, q_state_size)
:return: tensor with shape = (batch_size, segment_length)
"""
segment_length = tf.shape(segments)[1]
batchsize = tf.shape(segments)[0]
# Temporarily chop up segments into individual q_states
q_states = tf.reshape(segments, [batchsize * segment_length, self.q_state_size])
# Run them through our MLP
rewards = self.mlp.run(q_states)
# Group the rewards back into their segments
return tf.reshape(rewards, (batchsize, segment_length))
def _build_model(self):
"""Our model takes in a vector of q_states from a segment and returns a reward for each one"""
self.segment_placeholder = tf.placeholder(
dtype=tf.float32, shape=(None, None, self.q_state_size), name="obs_placeholder")
self.segment_alt_placeholder = tf.placeholder(
dtype=tf.float32, shape=(None, None, self.q_state_size), name="obs_placeholder")
# A vanilla MLP maps a q_state to a reward
self.mlp = FullyConnectedMLP(self.q_state_size)
self.q_state_reward_pred = self._predict_rewards(self.segment_placeholder)
q_state_alt_reward_pred = self._predict_rewards(self.segment_alt_placeholder)
# We use trajectory segments rather than individual q_states because video clips of segments are easier for
# humans to evaluate
segment_reward_pred_left = tf.reduce_sum(self.q_state_reward_pred, axis=1)
segment_reward_pred_right = tf.reduce_sum(q_state_alt_reward_pred, axis=1)
reward_logits = tf.stack([segment_reward_pred_left, segment_reward_pred_right], axis=1) # (batch_size, 2)
self.labels = tf.placeholder(dtype=tf.int32, shape=(None,), name="comparison_labels")
# delta = 1e-5
# clipped_comparison_labels = tf.clip_by_value(self.comparison_labels, delta, 1.0-delta)
data_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=reward_logits, labels=self.labels)
self.loss_op = tf.reduce_mean(data_loss)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss_op, global_step=self.global_step)
def predict_reward(self, path):
"""Predict the reward for each step in a given path"""
q_state_reward_pred = self.sess.run(self.q_state_reward_pred, feed_dict={
self.segment_placeholder: np.array([create_segment_q_states(path)]),
K.learning_phase(): False
})
return q_state_reward_pred[0]
def path_callback(self, path):
path_length = len(path["obs"])
self._steps_since_last_training += path_length
self.agent_logger.log_episode(path)
# We may be in a new part of the environment, so we take new segments to build comparisons from
segment = sample_segment_from_path(path, int(self._frames_per_segment))
if segment:
self.recent_segments.append(segment)
# If we need more comparisons, then we build them from our recent segments
if len(self.comparison_collector) < int(self.label_schedule.n_desired_labels):
self.comparison_collector.add_segment_pair(
random.choice(self.recent_segments),
random.choice(self.recent_segments))
# Train our predictor every X steps
if self._steps_since_last_training >= int(self._n_timesteps_per_predictor_training):
self.train_predictor()
self._steps_since_last_training -= self._steps_since_last_training
def train_predictor(self):
self.comparison_collector.label_unlabeled_comparisons()
minibatch_size = min(64, len(self.comparison_collector.labeled_decisive_comparisons))
labeled_comparisons = random.sample(self.comparison_collector.labeled_decisive_comparisons, minibatch_size)
left_q_states = np.asarray([comp['left']['q_states'] for comp in labeled_comparisons])
right_q_states = np.asarray([comp['right']['q_states'] for comp in labeled_comparisons])
_, loss = self.sess.run([self.train_op, self.loss_op], feed_dict={
self.segment_placeholder: left_q_states,
self.segment_alt_placeholder: right_q_states,
self.labels: np.asarray([comp['label'] for comp in labeled_comparisons]),
K.learning_phase(): True
})
self._elapsed_predictor_training_iters += 1
self._write_training_summaries(loss)
def _write_training_summaries(self, loss):
self.agent_logger.log_simple("predictor/loss", loss)
# Calculate correlation between true and predicted reward by running validation on recent episodes
recent_paths = self.agent_logger.get_recent_paths_with_padding()
if len(recent_paths) > 1 and self.agent_logger.summary_step % 10 == 0: # Run validation every 10 iters
validation_q_states = np.asarray([create_segment_q_states(path) for path in recent_paths])
q_state_reward_pred = self.sess.run(self.q_state_reward_pred, feed_dict={
self.segment_placeholder: validation_q_states,
K.learning_phase(): False
})
ep_reward_pred = np.sum(q_state_reward_pred, axis=1)
q_state_reward_true = np.asarray([path['original_rewards'] for path in recent_paths])
ep_reward_true = np.sum(q_state_reward_true, axis=1)
self.agent_logger.log_simple("predictor/correlations", corrcoef(ep_reward_true, ep_reward_pred))
self.agent_logger.log_simple("predictor/num_training_iters", self._elapsed_predictor_training_iters)
self.agent_logger.log_simple("labels/desired_labels", self.label_schedule.n_desired_labels)
self.agent_logger.log_simple("labels/total_comparisons", len(self.comparison_collector))
self.agent_logger.log_simple(
"labels/labeled_comparisons", len(self.comparison_collector.labeled_decisive_comparisons))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--env_id', required=True)
parser.add_argument('-p', '--predictor', required=True)
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-s', '--seed', default=1, type=int)
parser.add_argument('-w', '--workers', default=4, type=int)
parser.add_argument('-l', '--n_labels', default=None, type=int)
parser.add_argument('-L', '--pretrain_labels', default=None, type=int)
parser.add_argument('-t', '--num_timesteps', default=5e6, type=int)
parser.add_argument('-a', '--agent', default="parallel_trpo", type=str)
parser.add_argument('-i', '--pretrain_iters', default=10000, type=int)
parser.add_argument('-V', '--no_videos', action="store_true")
args = parser.parse_args()
env_id = args.env_id
run_name = "%s/%s-%s" % (env_id, args.name, int(time()))
summary_writer = make_summary_writer(run_name)
env = make_with_torque_removed(env_id)
num_timesteps = int(args.num_timesteps)
experiment_name = slugify(args.name)
if args.predictor == "rl":
predictor = TraditionalRLRewardPredictor(summary_writer)
else:
agent_logger = AgentLogger(summary_writer)
if args.predictor == "synth":
comparison_collector = SyntheticComparisonCollector()
elif args.predictor == "human":
bucket = os.environ.get('RL_TEACHER_GCS_BUCKET')
assert bucket and bucket.startswith("gs://"), "env variable RL_TEACHER_GCS_BUCKET must start with gs://"
comparison_collector = HumanComparisonCollector(env_id, experiment_name=experiment_name)
else:
raise ValueError("Bad value for --predictor: %s" % args.predictor)
pretrain_labels = args.pretrain_labels if args.pretrain_labels else args.n_labels // 4
if args.n_labels:
label_schedule = LabelAnnealer(
agent_logger,
final_timesteps=num_timesteps,
final_labels=args.n_labels,
pretrain_labels=pretrain_labels)
else:
print("No label limit given. We will request one label every few seconds")
label_schedule = ConstantLabelSchedule(pretrain_labels=pretrain_labels)
print("Starting random rollouts to generate pretraining segments. No learning will take place...")
pretrain_segments = segments_from_rand_rollout(env_id, make_with_torque_removed,
n_desired_segments=pretrain_labels * 2, clip_length_in_seconds=CLIP_LENGTH)
for i in range(pretrain_labels): # Turn our random segments into comparisons
comparison_collector.add_segment_pair(pretrain_segments[i], pretrain_segments[i + pretrain_labels])
# Sleep until the human has labeled most of the pretraining comparisons
while len(comparison_collector.labeled_comparisons) < int(pretrain_labels * 0.75):
comparison_collector.label_unlabeled_comparisons()
if args.predictor == "synth":
print("%s synthetic labels generated... " % (len(comparison_collector.labeled_comparisons)))
elif args.predictor == "human":
print("%s/%s comparisons labeled. Please add labels w/ the human-feedback-api. Sleeping... " % (
len(comparison_collector.labeled_comparisons), pretrain_labels))
sleep(5)
# Start the actual training
predictor = ComparisonRewardPredictor(
env,
summary_writer,
comparison_collector=comparison_collector,
agent_logger=agent_logger,
label_schedule=label_schedule,
)
for i in range(args.pretrain_iters):
predictor.train_predictor() # Train on pretraining labels
if i % 100 == 0:
print("%s/%s predictor pretraining iters... " % (i, args.pretrain_iters))
# Wrap the predictor to capture videos every so often:
if not args.no_videos:
predictor = SegmentVideoRecorder(predictor, env, save_dir=osp.join('/tmp/rl_teacher_vids', run_name))
# We use a vanilla agent from openai/baselines that contains a single change that blinds it to the true reward
# The single changed section is in `rl_teacher/agent/trpo/core.py`
print("Starting joint training of predictor and agent")
if args.agent == "parallel_trpo":
train_parallel_trpo(
env_id=env_id,
make_env=make_with_torque_removed,
predictor=predictor,
summary_writer=summary_writer,
workers=args.workers,
runtime=(num_timesteps / 1000),
max_timesteps_per_episode=get_timesteps_per_episode(env),
timesteps_per_batch=8000,
max_kl=0.001,
seed=args.seed,
)
elif args.agent == "pposgd_mpi":
def make_env():
return make_with_torque_removed(env_id)
train_pposgd_mpi(make_env, num_timesteps=num_timesteps, seed=args.seed, predictor=predictor)
else:
raise ValueError("%s is not a valid choice for args.agent" % args.agent)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | FabioCaffarello/nx-events-lake | apps~services-gold-layer~document-vectorizer~document_vectorizer~jobs~handlers~pdf_embeddings~job.py | import io
import os
import re
from typing import Tuple
import warlock
from dto_config_handler.output import ConfigDTO
from dto_events_handler.shared import StatusDTO
from pylog.log import setup_logging
from PyPDF2 import PdfReader
from langchain.vectorstores.neo4j_vector import Neo4jVector
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pyminio.client import minio_client, MinioClient
logger = setup_logging(__name__)
class Job:
"""
Handles the processing of document data.
Args:
config (ConfigDTO): The configuration data.
input_data (type[warlock.model.Model]): The input data for the job.
embeddings: Embeddings for document processing.
dimension: Dimension for embeddings.
Attributes:
_config (ConfigDTO): The configuration data.
_source (str): The source identifier.
_context (str): The context environment.
_input_data (type[warlock.model.Model]): The input data for the job.
_embeddings: Embeddings for document processing.
_dimension: Dimension for embeddings.
_partition (str): The partition identifier.
_target_endpoint (str): The document's target endpoint.
_neo4j_url (str): The URL for Neo4j database.
_neo4j_username (str): The username for Neo4j database.
_neo4j_password (str): The password for Neo4j database.
Methods:
__init__(self, config: ConfigDTO, input_data: type[warlock.model.Model], embeddings, dimension) -> None:
Initializes the Job instance.
_get_bucket_name(self, layer: str) -> str:
Generates the bucket name for Minio storage.
_get_status(self) -> StatusDTO:
Gets the success status.
_get_file_path(self):
Extracts the file path from the target endpoint.
get_pdf_from_bucket(self, minio: MinioClient) -> PdfReader:
Downloads and returns the PDF file from Minio.
_convert_document_to_text(self, pdf_reader: PdfReader) -> str:
Converts the PDF document to text.
split_document(self, pdf_reader: PdfReader):
Splits the document into chunks using langchain_textsplitter.
get_neo4j_credentials(self):
Retrieves the Neo4j database credentials.
store_embeddings(self, chunks):
Stores the document chunks in the Neo4j database.
run(self) -> Tuple[dict, StatusDTO]:
Runs the document processing job.
"""
def __init__(self, config: ConfigDTO, input_data: type[warlock.model.Model], embeddings, dimension) -> None:
"""
Initializes the Job instance.
Args:
config (ConfigDTO): The configuration data.
input_data (type[warlock.model.Model]): The input data for the job.
embeddings: Embeddings for document processing.
dimension: Dimension for embeddings.
Returns:
None
"""
self._config = config
self._source = config.source
self._context = config.context
self._input_data = input_data
self._embeddings = embeddings
self._dimension = dimension
self._partition = input_data.partition
self._target_endpoint = input_data.documentUri
self._neo4j_url, self._neo4j_username, self._neo4j_password = self.get_neo4j_credentials()
def _get_bucket_name(self, layer: str) -> str:
"""
Generates the bucket name for Minio storage.
Args:
layer (str): The layer of the bucket.
Returns:
str: The bucket name.
"""
return "{layer}-{context}-source-{source}".format(
layer=layer,
context=self._context,
source=self._source,
)
def _get_status(self) -> StatusDTO:
"""
Gets the success status.
Returns:
StatusDTO: The success status.
"""
return StatusDTO(
code=200,
detail="Success",
)
def _get_file_path(self):
"""
Extracts the file path from the target endpoint.
Returns:
None
"""
match = re.search(f"{self._partition}.*", self._target_endpoint)
if match:
return match.group()
else:
logger.warning("Year not found in onclick attribute")
def get_pdf_from_bucket(self, minio: MinioClient) -> PdfReader:
"""
Downloads and returns the PDF file from Minio.
Args:
minio (MinioClient): The Minio client.
Returns:
PdfReader: The PDF file reader.
"""
logger.info(f"endpoint: {self._target_endpoint}")
file_bytes = minio.download_file_as_bytes(self._get_bucket_name(layer="landing"), self._get_file_path())
# TODO: AttributeError: 'bytes' object has no attribute 'seek'
return PdfReader(io.BytesIO(file_bytes))
def _convert_document_to_text(self, pdf_reader: PdfReader) -> str:
"""
Converts the PDF document to text.
Args:
pdf_reader (PdfReader): The PDF file reader.
Returns:
str: The text extracted from the document.
"""
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
def split_document(self, pdf_reader: PdfReader):
"""
Splits the document into chunks using langchain_textsplitter.
Args:
pdf_reader (PdfReader): The PDF file reader.
Returns:
None
"""
text = self._convert_document_to_text(pdf_reader)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=200, length_function=len
)
chunks = text_splitter.split_text(text=text)
return chunks
def get_neo4j_credentials(self):
"""
Retrieves the Neo4j database credentials.
Returns:
Tuple[str, str, str]: The Neo4j database URL, username, and password.
"""
url = os.getenv("NEO4J_URL")
username = os.getenv("NEO4J_USERNAME")
password = os.getenv("NEO4J_PASSWORD")
return url, username, password
def store_embeddings(self, chunks):
"""
Stores the document chunks in the Neo4j database.
Args:
chunks: The document chunks.
Returns:
None
"""
vectorstore = Neo4jVector.from_texts(
chunks,
url=self._neo4j_url,
username=self._neo4j_username,
password=self._neo4j_password,
embedding=self._embeddings,
index_name="pdf_enbeddings",
node_label="PdfEnbeddingsChunk",
pre_delete_collection=False,
)
def run(self) -> Tuple[dict, StatusDTO]:
"""
Runs the document processing job.
Returns:
Tuple[dict, StatusDTO]: A tuple containing job result and status.
"""
logger.info(f"Job triggered with input: {self._input_data}")
minio = minio_client()
pdf_reader = self.get_pdf_from_bucket(minio)
document_chunks = self.split_document(pdf_reader)
self.store_embeddings(document_chunks)
result = {"documentUri": "", "partition": self._partition}
logger.info(f"Job result: {result}")
return result, self._get_status()
| [] |
2024-01-10 | aceliuchanghong/openAIProxy | getResponse~reponse.py | import os
import httpx
from openai import OpenAI
proxyHost = "127.0.0.1"
proxyPort = 10809
client = OpenAI(http_client=httpx.Client(proxies=f"http://{proxyHost}:{proxyPort}"))
client.api_key = os.getenv("OPENAI_API_KEY")
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "user",
"content": "你是哪个模型?给我你的模型编号,是gpt-4-1106-preview还是gpt-3.5,还是其他?"
}
]
)
print(completion.choices[0].message.content)
print(completion.choices[0].message.content)
| [
"你是哪个模型?给我你的模型编号,是gpt-4-1106-preview还是gpt-3.5,还是其他?"
] |
2024-01-10 | aceliuchanghong/openAIProxy | img~dalle3.py | import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
proxyHost = "127.0.0.1"
proxyPort = 10809
proxies = {
"http": f"http://{proxyHost}:{proxyPort}",
"https": f"http://{proxyHost}:{proxyPort}"
}
openai.proxy = proxies
# Call the API
response = openai.images.generate(
model="dall-e-3",
prompt="a cute cat with a hat on",
size="1024x1024",
quality="standard",
n=1,
)
# Show the result that has been pushed to an url
print(response.data[0].url)
| [] |
2024-01-10 | aceliuchanghong/openAIProxy | mp3~00.py | import os
from openai import OpenAI
import httpx
proxyHost = "127.0.0.1"
proxyPort = 10809
proxies = {
"http": f"http://{proxyHost}:{proxyPort}",
"https": f"http://{proxyHost}:{proxyPort}"
}
client = OpenAI(http_client=httpx.Client(proxies=f"http://{proxyHost}:{proxyPort}"))
client.api_key = os.getenv("OPENAI_API_KEY")
audio_file = open("WeChat_20231007161725.mp3", "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
| [] |
2024-01-10 | aceliuchanghong/openAIProxy | proxyUsage~listModel.py | import os
import openai
import json
proxyHost = "127.0.0.1"
proxyPort = 10809
proxies = {
"http": f"http://{proxyHost}:{proxyPort}",
"https": f"http://{proxyHost}:{proxyPort}"
}
openai.proxy = proxies
openai.api_key = os.getenv("OPENAI_API_KEY")
print(openai.api_key)
data = openai.Model.list()
model_ids = [item['id'] for item in data['data']]
print(model_ids)
| [] |
2024-01-10 | Vejay-KS/codecompanion | backend~codecompanionapp~BaseLLM.py | import requests
import json
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
class BaseLLM1():
__API_KEY = ""
__API_ENDPOINT = "https://api.openai.com/v1/chat/completions"
def _get_headers(self):
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {BaseLLM1.__API_KEY}",
}
return headers
def _get_data(self, messages, model="gpt-3.5-turbo", temperature=1):
data = {
"model": model,
"messages": [{"role": "user", "content": messages}],
"temperature": temperature,
"max_tokens": 100
}
return data
def _get_response(self, headers, data):
response = requests.post(BaseLLM1.__API_ENDPOINT, headers=headers, data=json.dumps(data))
return response | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.