date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | sohampatil17/langchain-summarizer | InternalGPT-langchain.py | import streamlit as st
import os
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain;
from htmlTemplates import css, bot_template, user_template;
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
#embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts = text_chunks, embedding = embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
memory = ConversationBufferMemory(memory_key = 'chat_history', return_messages = True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm = llm,
retriever = vectorstore.as_retriever(),
memory = memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html = True)
else:
st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html = True)
def main():
os.environ['OPENAI_API_KEY'] = "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"
st.set_page_config(page_title="Attorney AI", page_icon= "📄")
st.write(css, unsafe_allow_html = True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Document AI 📄")
user_question = st.text_input("Ask a question....")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Company Documents")
pdf_docs = st.file_uploader("Upload your files here!", accept_multiple_files = True)
if st.button("Upload"):
with st.spinner("Processing"):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
vectorstore = get_vectorstore(text_chunks)
st.session_state.conversation = get_conversation_chain(vectorstore)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | CMU-IDS-2020/fp-index-out-of-bounds | alternative~predict.py | import torch, os
from pytorch_pretrained_bert import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
from tqdm import tqdm
import numpy as np
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME
#from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from bertviz.bertviz.pytorch_pretrained_bert import BertModel, BertTokenizer
special_tokens = ['<POS>', '<NEG>','<CON_START>','<START>','<END>'] # Set the special tokens
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt', special_tokens=special_tokens)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt', num_special_tokens=len(special_tokens))
path = os.path.join(os.getcwd(), "./pytorch_model_zero_grad_1.bin") ## Model Path
model_state_dict = torch.load(path, map_location=device)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
bert_classifier_dir = "./bert_classifier/"
model_cls = BertForSequenceClassification.from_pretrained(bert_classifier_dir, num_labels=2)
tokenizer_cls = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
#model_cls.to(device)
#model_cls.eval()
max_seq_len=70
sm = torch.nn.Softmax(dim=-1)
model.config.n_positions
def preditction_with_beam_search(ref_text, beam_width=3, vocab_length=40483):
"""
This function decodes sentences using Beam Seach.
It will output #sentences = beam_width. This function works on a single example.
ref_text : string : Input sentence
beam_width : int : Width of the output beam
vocab_length : int : Size of the Vocab after adding the special tokens
"""
done = [False for i in range(beam_width)] # To track which beams are already decoded
stop_decode = False
decoded_sentences=[] # List of decoded sentences at any given time
sm = torch.nn.Softmax(dim=-1) # To calculate Softmax over the final layer Logits
tokens = tokenizer.tokenize(ref_text) # Tokenize the input text
print(tokens)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens) # Convert tokens to ids
index_tokens = [indexed_tokens for i in range(beam_width)] # Replication of Input ids for all the beams
#index_tokens = [indexed_tokens for i in range(beam_width)]
torch_tensor = torch.tensor(index_tokens).to(device)
beam_indexes = [[] for i in range(beam_width)] # indexes of the current decoded beams
best_scoes = [0 for i in range(beam_width)] # A list of lists to store Probability values of each decoded token of best beams
count = 0
while count < model.config.n_positions and not stop_decode:
if count == 0: # For the first step when only one sentence is availabe
with torch.no_grad():
# Calculate output probability distribution over the Vocab,
preds = sm(model(torch_tensor)) # shape = [beam_bidth, len(input_sen)+1,Vocab_length]
top_v, top_i = preds[:,-1,:].topk(beam_width) # Fatch top indexes and it's values
[beam_indexes[i].append(top_i[0][i].tolist()) for i in range(beam_width)] # Update the Beam indexes
# Update the best_scores, for first time just add the topk values directly
for i in range(beam_width):
best_scoes[i] = top_v[0][i].item()
count += 1
else: # After first step
# Prepare the current_state by concating original input and decoded beam indexes
current_state = torch.cat((torch_tensor, torch.tensor(beam_indexes).to(device)), dim=1)
# Prediction on the current state
with torch.no_grad():
preds = sm(model(current_state))
# Multiply new probability predictions with corresponding best scores
# Total socres = beam_width * Vocab_Size
flatten_score = (preds[:,-1,:]*torch.tensor(best_scoes).to(device).unsqueeze(1)).view(-1)
# Fatch the top scores and indexes
vals, inx = flatten_score.topk(beam_width)
# best_score_inx saves the index of best beams after multiplying the probability of new prediction
best_scoes_inx = (inx // vocab_length).tolist()
best_scoes = vals.tolist()
# Unflatten the index
correct_inx = (inx % vocab_length).tolist()
# Check if done for all the Beams
for i in range(beam_width):
if correct_inx[i] == tokenizer.special_tokens["<END>"]:
done[i] = True
# Update the best score for each the current Beams
for i in range(beam_width):
if not done[i]:
best_scoes[i] = vals.tolist()[i]
# Check is All the Beams are Done
if (sum(done) == beam_width):
stop_decode = True
# Prepapre the new beams
temp_lt=[0 for i in range(beam_width)]
for i,x in enumerate(best_scoes_inx):
temp_lt[i] = beam_indexes[x] + [correct_inx[i]]
# Update the Beam indexes
beam_indexes = temp_lt
del temp_lt
count += 1
# Decode All the beam indexes to till <END> token only and convert into sentence
for i in range(beam_width):
try:
end_index = beam_indexes[i].index(tokenizer.special_tokens["<END>"])
except ValueError:
end_index = len(beam_indexes[i])
decoded_sentences.append(tokenizer.decode(beam_indexes[i][:end_index]))
return decoded_sentences
def get_best_sentence(input_sentences, sentiment=1):
"""
This function selects the sentence from the Beam of the sentences,
based on the classification probability score.
input_sentences : list of strings : Sentences generated by the Beam search decoding
sentiment: int : Expected sentiment (in general class for the classification)
"""
# BERT pre-processing
ids = []
segment_ids = []
input_masks = []
pred_lt = []
for sen in input_sentences:
text_tokens = tokenizer_cls.tokenize(sen)
tokens = ["[CLS]"] + text_tokens + ["[SEP]"]
temp_ids = tokenizer_cls.convert_tokens_to_ids(tokens)
input_mask = [1] * len(temp_ids)
segment_id = [0] * len(temp_ids)
padding = [0] * (max_seq_len - len(temp_ids))
temp_ids += padding
input_mask += padding
segment_id += padding
ids.append(temp_ids)
input_masks.append(input_mask)
segment_ids.append(segment_id)
ids = torch.tensor(ids).to(device)
segment_ids = torch.tensor(segment_ids).to(device)
input_masks = torch.tensor(input_masks).to(device)
# prediction
with torch.no_grad():
preds = sm(model_cls(ids, segment_ids, input_masks))
preds = preds.tolist()
inx, inx_val = None, 0
for i in range(len(input_sentences)):
temp = preds[i][sentiment]
if temp > inx_val:
inx = i
inx_val = temp
return input_sentences[inx]
op=preditction_with_beam_search("<NEG> <CON_START> there are foods <START>",1)
print(op)
# print(get_best_sentence(op))
| [] |
2024-01-10 | jeremysilva1098/AITrendTracker | trackers~research~arxiv.py | import requests
import xml.etree.ElementTree as ET
import json
import datetime
from typing import List, Optional
import os
from data_models import Article
from langchain.document_loaders import PyPDFLoader
class arxivApi:
def __init__(self):
self.baseUrl = "http://export.arxiv.org/api/query"
def search_category(self, category: str, num_results: int = 10,
minDate: Optional[str] = None,
maxDate: Optional[str] = None) -> List[Article]:
params = {
"search_query": f"cat:{category}",
"sortBy": "submittedDate",
"sortOrder": "descending",
"max_results": num_results
}
response = requests.get(self.baseUrl, params=params)
# get the XML
root = ET.fromstring(response.content)
# create output dict
resSet = []
for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
id_ = entry.find('{http://www.w3.org/2005/Atom}id').text
title = entry.find('{http://www.w3.org/2005/Atom}title').text
summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
categories = [category.get('term') for category in entry.findall('{http://www.w3.org/2005/Atom}category')]
publishDate = entry.find('{http://www.w3.org/2005/Atom}published').text
links = [link.get('href') for link in entry.findall('{http://www.w3.org/2005/Atom}link')]
article = Article(
title=title,
summary=summary,
categories=categories,
publishDate=publishDate,
link=[l for l in links if 'pdf' in l][0]
)
# check if published date is in range
if minDate and maxDate:
if minDate <= datetime.datetime.strptime(publishDate, "%Y-%m-%dT%H:%M:%SZ") <= maxDate:
resSet.append(article)
else:
resSet.append(article)
return resSet
def search_keywords(self, keywords: str, num_results: int = 200,
minDate: Optional[str] = None,
maxDate: Optional[str] = None) -> List[Article]:
params = {
"search_query": keywords,
"sortBy": "relevance",
"sortOrder": "descending",
"max_results": num_results,
"start": 0
}
response = requests.get(self.baseUrl, params=params)
# get the XML
root = ET.fromstring(response.content)
# create output dict
resSet = []
for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
id_ = entry.find('{http://www.w3.org/2005/Atom}id').text
title = entry.find('{http://www.w3.org/2005/Atom}title').text
summary = entry.find('{http://www.w3.org/2005/Atom}summary').text
categories = [category.get('term') for category in entry.findall('{http://www.w3.org/2005/Atom}category')]
publishDate = entry.find('{http://www.w3.org/2005/Atom}published').text
links = [link.get('href') for link in entry.findall('{http://www.w3.org/2005/Atom}link')]
article = Article(
title=title,
summary=summary,
categories=categories,
publishDate=publishDate,
link=[l for l in links if 'pdf' in l][0]
)
# check if published date is in range
if minDate and maxDate:
if minDate <= datetime.datetime.strptime(publishDate, "%Y-%m-%dT%H:%M:%SZ") <= maxDate:
resSet.append(article)
else:
resSet.append(article)
return resSet
def get_full_article_text(self, link: str, max_tokens: int = 16000) -> str:
'''get the text from the pdf link as raw text'''
# get the pdf link
response = requests.get(link)
# convert the res to raw text
fname = "temp.pdf"
with open(fname, "wb") as f:
f.write(response.content)
loader = PyPDFLoader(fname)
content = loader.load_and_split()
# delete the file
os.remove(fname)
outputStr = ""
for doc in content:
# cut of the aritcle when we hit the references section
if "References" in doc.page_content:
break
# make sure summary doesn't exceed the model's context window
elif (len(outputStr) / 4) > max_tokens * 0.80:
break
outputStr += doc.page_content
outputStr += "\n\n"
return outputStr
'''if __name__ == "__main__":
api = arxivApi()
maxDate = datetime.datetime.now()
minDate = maxDate - datetime.timedelta(days=7)
print(api.search_category("cs.AI", minDate=minDate, maxDate=maxDate))''' | [] |
2024-01-10 | jeremysilva1098/AITrendTracker | trackers~news-and-blogs~news.py | import requests
import json
from dotenv import load_dotenv
import os
from data_models import Article
import pathlib
from typing import List, Optional
from langchain.document_loaders import UnstructuredURLLoader, SeleniumURLLoader
from dotenv import load_dotenv
# load in the env variables
'''par_dir = pathlib.Path(__file__).parent.parent
dotenv_dir = f"{par_dir}/.env"
print("Reading .env variables from: ", dotenv_dir)
load_dotenv(dotenv_path=dotenv_dir)'''
class newsApi:
def __init__(self) -> None:
self.news_key = os.getenv("NEWS_API_KEY")
self.news_url = "https://newsapi.org/v2/everything"
def get_url_content(self, url: str) -> str:
loader = UnstructuredURLLoader([url])
#loader = SeleniumURLLoader([url])
data = loader.load()
content = ""
for page in data:
content += page.page_content
return content
def search_keywords(self, keywords: str, num_results: int = 10,
minDate: Optional[str] = None,
maxDate: Optional[str] = None) -> List[Article]:
params = {
"q": keywords,
"from": minDate,
"to": maxDate,
"language": "en",
"sortBy": "relavancy",
"pageSize": num_results * 2,
}
headers = {
# add the key as beaer token
"Authorization": f"Bearer {self.news_key}"
}
response = requests.get(self.news_url, params=params, headers=headers)
if response.status_code != 200:
print("Status code: ", response.status_code)
raise Exception(f"An error occurred: {response.content}")
# get the JSON
res = json.loads(response.content)
# create output
resSet = []
count = 0 # Counter to keep track of the number of results
for article in res['articles']:
title = article['title']
source = article['source']['name']
description = article['description']
url = article['url']
content = self.get_url_content(url)
# make sure there is content
if len(content) > 500:
resSet.append(Article(
title=title,
source=source,
description=description,
url=url,
content=content
))
count += 1 # Increment the counter
if count == num_results:
break # Exit the loop when the desired number of results is reached
else:
continue
return resSet | [] |
2024-01-10 | onepointconsulting/hr-chatbot | generate_embeddings.py | from langchain.schema import Document
from langchain.document_loaders import PyPDFium2Loader
from langchain.vectorstores import FAISS
from typing import TypeVar, List
from pathlib import Path
from dotenv import load_dotenv
import numpy as np
import os
import re
from config import cfg
from log_init import logger
load_dotenv()
VST = TypeVar("VST", bound="VectorStore")
def load_pdfs(path: Path) -> List[Document]:
"""
Loads the PDFs and extracts a document per page.
The page details are added to the extracted metadata
Parameters:
path (Path): The path where the PDFs are saved.
Returns:
List[Document]: Returns a list of values
"""
assert path.exists()
all_pages = []
for pdf in path.glob("*.pdf"):
loader = PyPDFium2Loader(str(pdf.absolute()))
pages: List[Document] = loader.load_and_split()
for i, p in enumerate(pages):
file_name = re.sub(r".+[\\/]", "", p.metadata["source"])
p.metadata["source"] = f"{file_name} page {i + 1}"
all_pages.extend(pages)
logger.info(f"Processed {pdf}, all_pages size: {len(all_pages)}")
log_stats(all_pages)
return all_pages
def log_stats(documents: List[Document]):
logger.info(f"Total number of documents {len(documents)}")
counts = []
for d in documents:
counts.append(count_words(d))
logger.info(f"Tokens Max {np.max(counts)}")
logger.info(f"Tokens Min {np.min(counts)}")
logger.info(f"Tokens Min {np.mean(counts)}")
def generate_embeddings(
documents: List[Document], path: Path, faiss_persist_directory: str
) -> VST:
"""
Receives a list of documents and generates the embeddings via OpenAI API.
Parameters:
documents (List[Document]): The document list with one page per document.
path (Path): The path where the documents are found.
Returns:
VST: Recturs a reference to the vector store.
"""
try:
docsearch = FAISS.from_documents(documents, cfg.embeddings)
docsearch.save_local(faiss_persist_directory)
logger.info("Vector database persisted")
except Exception as e:
logger.error(f"Failed to process {path}: {str(e)}")
if "docsearch" in vars() or "docsearch" in globals():
docsearch.persist()
return docsearch
return docsearch
def count_words(document: Document) -> int:
splits = [s for s in re.split("[\s,.]", document.page_content) if len(s) > 0]
return len(splits)
if __name__ == "__main__":
doc_location: str = os.environ["DOC_LOCATION"]
documents = load_pdfs(Path(doc_location))
assert len(documents) > 0
logger.info(documents[2].page_content)
generate_embeddings(documents, doc_location)
| [] |
2024-01-10 | onepointconsulting/hr-chatbot | hr_chatbot_chainlit.py | from langchain.chains import RetrievalQAWithSourcesChain
import chainlit as cl
from chain_factory import load_all_chains
from geolocation import extract_ip_address, geolocate
from source_splitter import source_splitter
from chainlit.context import get_emitter
from log_init import logger
from pathlib import Path
from typing import Dict, Optional
from config import cfg
KEY_META_DATAS = "metadatas"
KEY_TEXTS = "texts"
KEY_GEOLOCATION_COUNTRY_CODE = "geolocation_country_code"
def set_session_vars(user_session_dict: Dict):
for k, v in user_session_dict.items():
cl.user_session.set(k, v)
def create_pdf(pdf_name: str, pdf_path: str) -> Optional[cl.File]:
"""
Creates a file download button for a PDF file in case it is found.
Parameters:
pdf_name (str): The file name
pdf_path (str): The file name
Returns:
RetrievalQAWithSourcesChain: The QA chain
"""
logger.info(f"Creating pdf for {pdf_path}")
# Sending a pdf with the local file path
country_code = cl.user_session.get(KEY_GEOLOCATION_COUNTRY_CODE)
country_config = cfg.location_persistence_map.get(country_code)
if country_config:
logger.info("country_config found")
doc_location: Path = country_config.get("doc_location")
doc_path = doc_location / pdf_path
if doc_path.exists():
logger.info("Creating pdf component")
return cl.File(
name=pdf_name, display="inline", path=str(doc_path.absolute())
)
else:
logger.info(f"doc path {doc_path} does not exist.")
return None
@cl.langchain_factory(use_async=True)
async def init():
"""
Loads the vector data store object and the PDF documents. Creates the QA chain.
Sets up some session variables and removes the Chainlit footer.
Parameters:
use_async (bool): Determines whether async is to be used or not.
Returns:
RetrievalQAWithSourcesChain: The QA chain
"""
emitter = get_emitter()
# Please note this works only with a modified version of Streamlit
# The repo with this modification are here: https://github.com/gilfernandes/chainlit_hr_extension
country_code = "GB"
geolocation_failed = False
try:
remote_address = extract_ip_address(emitter.session.environ)
geo_location = geolocate(remote_address)
if geo_location.country_code != "Not found":
country_code = geo_location.country_code
# await display_location_details(geo_location, country_code)
except:
logger.exception("Could not locate properly")
geolocation_failed = True
if geolocation_failed:
await cl.Message(content=f"Geolocation failed ... I do not know where you are.").send()
else:
logger.info(f"Geo location: {geo_location}")
msg = cl.Message(content=f"Processing files. Please wait.")
await msg.send()
chain_dict = load_all_chains(country_code)
qa_data = chain_dict[country_code]
documents = qa_data.documents
chain: RetrievalQAWithSourcesChain = qa_data.chain
metadatas = [d.metadata for d in documents]
texts = [d.page_content for d in documents]
set_session_vars(
{
KEY_META_DATAS: metadatas,
KEY_TEXTS: texts,
KEY_GEOLOCATION_COUNTRY_CODE: country_code,
}
)
msg.content = f"You can now ask questions about Onepoint HR ({country_code})!"
await msg.send()
return chain
async def display_location_details(geo_location, country_code):
geo_location_msg = cl.Message(
content=f"""Geo location:
- country: {geo_location.country_name}
- country code: {country_code}"""
)
await geo_location_msg.send()
@cl.langchain_postprocess
async def process_response(res) -> cl.Message:
"""
Tries to extract the sources and corresponding texts from the sources.
Parameters:
res (dict): A dictionary with the answer and sources provided by the LLM via LangChain.
Returns:
cl.Message: The message containing the answer and the list of sources with corresponding texts.
"""
answer = res["answer"]
sources = res["sources"].strip()
source_elements = []
# Get the metadata and texts from the user session
metadatas = cl.user_session.get(KEY_META_DATAS)
all_sources = [m["source"] for m in metadatas]
texts = cl.user_session.get(KEY_TEXTS)
found_sources = []
pdf_elements = []
if sources:
logger.info(f"sources: {sources}")
raw_sources, file_sources = source_splitter(sources)
for i, source in enumerate(raw_sources):
try:
source_name = file_sources[i]
pdf_element = create_pdf(source_name, source_name)
if pdf_element:
pdf_elements.append(pdf_element)
logger.info(f"PDF Elements: {pdf_elements}")
else:
logger.warning(f"No pdf element for {source_name}")
index = all_sources.index(source)
text = texts[index]
found_sources.append(source)
# Create the text element referenced in the message
logger.info(f"Found text in {source_name}")
source_elements.append(cl.Text(content=text, name=source_name))
except ValueError as e:
logger.error(f"Value error {e}")
continue
if found_sources:
answer += f"\nSources: {', '.join(found_sources)}"
else:
answer += f"\n{sources}"
logger.info(f"PDF Elements: {pdf_elements}")
await cl.Message(content=answer, elements=source_elements).send()
await cl.Message(content="PDF Downloads", elements=pdf_elements).send()
if __name__ == "__main__":
pass
| [] |
2024-01-10 | onepointconsulting/hr-chatbot | config.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from pathlib import Path
import os
from dotenv import load_dotenv
load_dotenv()
class Config:
faiss_persist_directory_uk = Path(os.environ["FAISS_STORE_UK"])
faiss_persist_directory_india = Path(os.environ["FAISS_STORE_INDIA"])
faiss_dirs = [faiss_persist_directory_uk, faiss_persist_directory_india]
for d in faiss_dirs:
if not d.exists():
d.mkdir()
doc_location_uk = Path(os.environ["DOC_LOCATION_UK"])
doc_location_india = Path(os.environ["DOC_LOCATION_INDIA"])
doc_locations = [doc_location_uk, doc_location_india]
location_persistence_map = {
"GB": {
"faiss_persist_directory": faiss_persist_directory_uk,
"doc_location": doc_location_uk,
},
"IN": {
"faiss_persist_directory": faiss_persist_directory_india,
"doc_location": doc_location_india,
},
}
for location in doc_locations:
if not location.exists():
raise Exception(f"File not found: {location}")
embeddings = OpenAIEmbeddings(chunk_size=100)
model = "gpt-3.5-turbo-16k"
# model = 'gpt-4'
llm = ChatOpenAI(model=model, temperature=0)
search_results = 5
def __repr__(self) -> str:
return f"""# Configuration
faiss_persist_directories: {self.faiss_dirs}
doc_locations: {self.doc_locations}
embeddings: {self.embeddings}
llm: {self.llm}
"""
cfg = Config()
if __name__ == "__main__":
print(cfg)
| [] |
2024-01-10 | onepointconsulting/hr-chatbot | hr_chatbot_cli.py | from langchain.chains import RetrievalQAWithSourcesChain
from prompt_toolkit import HTML, prompt, PromptSession
from prompt_toolkit.history import FileHistory
from chain_factory import create_retrieval_chain, load_embeddinges
from log_init import logger
import sys
def init_chain():
humour = False
if len(sys.argv) > 1:
if sys.argv[1] == "humor":
humour = True
logger.warning("Humor flag activated")
session = PromptSession(history=FileHistory(".agent-history-file"))
docsearch, documents = load_embeddinges()
chain: RetrievalQAWithSourcesChain = create_retrieval_chain(
docsearch, humour=humour
)
return session, chain
if __name__ == "__main__":
session, chain = init_chain()
while True:
question = session.prompt(
HTML("<b>Type <u>Your question</u></b> ('q' to exit): ")
)
if question.lower() in ["q", "exit", "quit"]:
break
response = chain({"question": question})
logger.info(f"Answer: {response['answer']}")
logger.info(f"Sources: {response['sources']}")
| [] |
2024-01-10 | onepointconsulting/hr-chatbot | hr_model.py | from dataclasses import dataclass
from typing import Any, Dict, Tuple, List, TypeVar
from langchain.schema import Document
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.vectorstores.base import VectorStore
VST = TypeVar("VST", bound="VectorStore")
@dataclass
class QAData:
vst: VST
documents: List[Document]
chain: RetrievalQAWithSourcesChain
| [] |
2024-01-10 | onepointconsulting/hr-chatbot | chain_factory.py | from langchain.chains import RetrievalQAWithSourcesChain
from langchain.memory import ConversationSummaryBufferMemory
from langchain.vectorstores import FAISS
from langchain.schema import Document
from langchain.prompts import PromptTemplate
from langchain.memory.utils import get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever, VectorStore
from config import cfg
from typing import Any, Dict, Tuple, List, TypeVar
import os
from pathlib import Path
from generate_embeddings import load_pdfs, generate_embeddings
from hr_model import QAData
from log_init import logger
VST = TypeVar("VST", bound="VectorStore")
class KeySourceMemory(ConversationSummaryBufferMemory):
def _get_input_output(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> Tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
output_key = "answer"
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def load_embeddinges(
embedding_dir: Path = cfg.faiss_persist_directory_uk,
doc_location: Path = cfg.doc_location_uk,
) -> Tuple[VST, List[Document]]:
"""
Loads the PDF documents to support text extraction in the Chainlit UI.
In case there are no persisted embeddings, the embeddings are generated.
In case the embeddings are persisted, then they are loaded from the file system.
Returns:
Tuple[VST, List[Document]]: Recturs a reference to the vector store and the list of all pdf files.
"""
logger.info(f"Checking: {embedding_dir}")
documents = load_pdfs(doc_location)
assert len(documents) > 0
if embedding_dir.exists() and len(list(embedding_dir.glob("*"))) > 0:
logger.info(f"reading from existing directory: {embedding_dir}")
docsearch = FAISS.load_local(embedding_dir, cfg.embeddings)
return docsearch, documents
return (
generate_embeddings(documents, doc_location, embedding_dir.absolute()),
documents,
)
template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES"). If you know a joke about the subject, make sure that you include it in the response.
If you don't know the answer, say that you don't know and make up some joke about the subject. Don't try to make up an answer.
ALWAYS return a "SOURCES" part in your answer.
QUESTION: Which state/country's law governs the interpretation of the contract?
=========
Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
Source: 28-pl
Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
Source: 30-pl
Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
Source: 4-pl
=========
FINAL ANSWER: This Agreement is governed by English law.
SOURCES: 28-pl
QUESTION: What did the president say about Michael Jackson?
=========
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
Source: 0-pl
Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
Source: 24-pl
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
Source: 5-pl
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
Source: 34-pl
=========
FINAL ANSWER: The president did not mention Michael Jackson. And here is a joke about Michael Jackson: Why did Michael Jackson go to the bakery? Because he wanted to "beat it" and grab some "moon-pies"!
SOURCES:
QUESTION: {question}
=========
{summaries}
=========
FINAL ANSWER:"""
HUMOUR_PROMPT = PromptTemplate(
template=template, input_variables=["summaries", "question"]
)
def create_retrieval_chain(
docsearch: VST, verbose: bool = False, humour: bool = True
) -> RetrievalQAWithSourcesChain:
"""
This function creates the QA chain with memory and in case the humour parameter is true,
then a manipulated prompt - that tends to create jokes on certain occasions - is used.
Parameters:
docsearch (VST): A reference to the vector store.
verbose (bool): Determines whether LangChain's internal logging is printed to the console or not.
humour (bool): Determines whether the prompt for answers with jokes is used or not.
Returns:
RetrievalQAWithSourcesChain: The QA chain
"""
memory = KeySourceMemory(llm=cfg.llm, input_key="question", output_key="answer")
chain_type_kwargs = {}
if verbose:
chain_type_kwargs["verbose"] = True
if humour:
chain_type_kwargs["prompt"] = HUMOUR_PROMPT
search_retriever: VectorStoreRetriever = docsearch.as_retriever()
search_retriever.search_kwargs = {"k": cfg.search_results}
qa_chain = RetrievalQAWithSourcesChain.from_chain_type(
cfg.llm,
retriever=search_retriever,
chain_type="stuff",
memory=memory,
chain_type_kwargs=chain_type_kwargs,
)
return qa_chain
def load_all_chains(country_filter: str = None) -> Dict[str, QAData]:
res = {}
for country, v in cfg.location_persistence_map.items():
if country_filter is None or country_filter == country:
faiss_persist_directory = v["faiss_persist_directory"]
doc_location = v["doc_location"]
vst, documents = load_embeddinges(faiss_persist_directory, doc_location)
chain = create_retrieval_chain(vst, humour=os.getenv("HUMOUR") == "true")
res[country] = QAData(vst=vst, documents=documents, chain=chain)
return res
if __name__ == "__main__":
chain_dict = load_all_chains()
logger.info(len(chain_dict.items()))
| [
"beat it",
"question",
"moon-pies",
"t know the answer, say that you don",
"Given the following extracted parts of a long document and a question, create a final answer with references (\"SOURCES\"). If you know a joke about the subject, make sure that you include it in the response.\nIf you don't know the answer, say that you don't know and make up some joke about the subject. Don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\n\nQUESTION: Which state/country's law governs the interpretation of the contract?\n=========\nContent: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\nSource: 28-pl\nContent: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\nSource: 30-pl\nContent: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\nSource: 4-pl\n=========\nFINAL ANSWER: This Agreement is governed by English law.\nSOURCES: 28-pl\n\nQUESTION: What did the president say about Michael Jackson?\n=========\nContent: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.\nSource: 0-pl\nContent: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\nSource: 24-pl\nContent: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\nSource: 5-pl\nContent: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\nSource: 34-pl\n=========\nFINAL ANSWER: The president did not mention Michael Jackson. And here is a joke about Michael Jackson: Why did Michael Jackson go to the bakery? Because he wanted to \"beat it\" and grab some \"moon-pies\"! \nSOURCES:\n\nQUESTION: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:"
] |
2024-01-10 | xorbitsai/Audits-Demo | app~engine.py | import logging
import os
from typing import List
import tiktoken
from langchain.embeddings import XinferenceEmbeddings
from langchain.llms import Xinference
from llama_index.chat_engine import CondenseQuestionChatEngine
from llama_index.llms import OpenAI
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.callbacks import LlamaDebugHandler
from llama_index.callbacks.base import CallbackManager
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings import LangchainEmbedding
from llama_index.embeddings.openai import (
OpenAIEmbedding,
OpenAIEmbeddingMode,
OpenAIEmbeddingModelType,
)
from llama_index.memory import ChatMemoryBuffer
from llama_index.node_parser import SimpleNodeParser
from llama_index.text_splitter import SentenceSplitter
from .models.schema import Document as DocumentSchema
from .utils import fetch_and_read_documents
from .constants import NODE_PARSER_CHUNK_SIZE, NODE_PARSER_CHUNK_OVERLAP
from .prompts import get_context_prompt_template, get_sys_prompt
logger = logging.getLogger(__name__)
def get_llm():
llm = OpenAI(
temperature=0,
model_name="gpt-3.5-turbo-0613",
streaming=False,
api_key="<API-KEY>",
)
return llm
def get_embedding_model():
embedding = OpenAIEmbedding(
mode=OpenAIEmbeddingMode.SIMILARITY_MODE,
model_type=OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
api_key="<API-KEY>",
)
return embedding
def get_service_context(callback_handlers):
callback_manager = CallbackManager(callback_handlers)
embedding_model = get_embedding_model()
llm = get_llm()
text_splitter = SentenceSplitter(
separator=" ",
chunk_size=NODE_PARSER_CHUNK_SIZE,
chunk_overlap=NODE_PARSER_CHUNK_OVERLAP,
paragraph_separator="\n\n\n",
secondary_chunking_regex="[^,.;。]+[,.;。]?",
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=text_splitter,
callback_manager=callback_manager,
)
return ServiceContext.from_defaults(
callback_manager=callback_manager,
llm=llm,
embed_model=embedding_model,
node_parser=node_parser,
)
def get_chat_engine() -> OpenAI:
chat_engine = OpenAI(
temperature=0,
model_name="gpt-3.5-turbo-0613",
streaming=False,
api_key="<API-KEY>",
)
return chat_engine
| [] |
2024-01-10 | Whiteknightai2/camel | camel~agents~tool_agents~hugging_face_tool_agent.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from typing import Any, Optional
from camel.agents.tool_agents import BaseToolAgent
# flake8: noqa :E501
class HuggingFaceToolAgent(BaseToolAgent):
r"""Tool agent for calling HuggingFace models. This agent is a wrapper
around agents from the `transformers` library. For more information
about the available models, please see the `transformers` documentation
at https://huggingface.co/docs/transformers/transformers_agents.
Args:
name (str): The name of the agent.
*args (Any): Additional positional arguments to pass to the underlying
Agent class.
remote (bool, optional): Flag indicating whether to run the agent
remotely. (default: :obj:`True`)
**kwargs (Any): Additional keyword arguments to pass to the underlying
Agent class.
"""
def __init__(
self,
name: str,
*args: Any,
remote: bool = True,
**kwargs: Any,
) -> None:
try:
# TODO: Support other tool agents
from transformers.tools import OpenAiAgent
except ImportError:
raise ValueError(
"Could not import transformers tool agents. "
"Please setup the environment with "
"pip install huggingface_hub==0.14.1 transformers==4.29.0 diffusers accelerate datasets torch soundfile sentencepiece opencv-python"
)
self.agent = OpenAiAgent(*args, **kwargs)
description = f"""The `{name}` is a tool agent that can perform a variety of tasks including:
- Document question answering: given a document (such as a PDF) in image format, answer a question on this document
- Text question answering: given a long text and a question, answer the question in the text
- Unconditional image captioning: Caption the image!
- Image question answering: given an image, answer a question on this image
- Image segmentation: given an image and a prompt, output the segmentation mask of that prompt
- Speech to text: given an audio recording of a person talking, transcribe the speech into text
- Text to speech: convert text to speech
- Zero-shot text classification: given a text and a list of labels, identify to which label the text corresponds the most
- Text summarization: summarize a long text in one or a few sentences
- Translation: translate the text into a given language
- Text downloading: to download a text from a web URL
- Text to image: generate an image according to a prompt, leveraging stable diffusion
- Image transformation: modify an image given an initial image and a prompt, leveraging instruct pix2pix stable diffusion
- Text to video: generate a small video according to a prompt
Here are some python code examples of what you can do with this agent:
Single execution (step) mode, the single execution method is when using the step() method of the agent:
```
# Text to image
rivers_and_lakes_image = {name}.step("Draw me a picture of rivers and lakes.")
rivers_and_lakes_image.save("./rivers_and_lakes_image.png")
# Text to image -> Image transformation
sea_add_island_image = {name}.step("Draw me a picture of the sea then transform the picture to add an island")
sea_add_island_image.save("./sea_add_island_image.png")
# If you'd like to keep a state across executions or to pass non-text objects to the agent,
# you can do so by specifying variables that you would like the agent to use. For example,
# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following:
picture = {name}.step("Generate a picture of rivers and lakes.")
picture.save("./picture.png")
updated_picture = {name}.step("Transform the image in `picture` to add an island to it.", picture=picture)
updated_picture.save("./updated_picture.png")
capybara_sea_image = {name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea")
capybara_sea_image.save("./capybara_sea_image.png")
# Document question answering
answer = {name}.step(
"In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?",
document=document,
)
print(answer)
# Text to image
boat_image = {name}.step("Generate an image of a boat in the water")
boat_image.save("./boat_image.png")
# Unconditional image captioning
boat_image_caption = {name}.step("Can you caption the `boat_image`?", boat_image=boat_image)
print(boat_image_caption)
# Text to image -> Unconditional image captioning -> Text to speech
boat_audio = {name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards")
# Text downloading
document = {name}.step("Download the text from http://hf.co")
print(document)
# Text summarization
summary = {name}.step("Summarize the following text: `document`", document=document)
print(summary)
# Text downloading -> Text summarization -> Text to speech
audio = {name}.step("Read out loud the summary of http://hf.co")
```
Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method:
```
# Clean the chat history
{name}.reset()
# Text to image
capybara_image = {name}.chat("Show me an an image of a capybara")
capybara_image.save("./capybara_image.png")
# Image transformation
transformed_capybara_image = {name}.chat("Transform the image so that it snows")
transformed_capybara_image.save("./transformed_capybara_image.png")
# Image segmentation
segmented_transformed_capybara_image = {name}.chat("Show me a mask of the snowy capybaras")
segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png")
```
"""
super(HuggingFaceToolAgent, self).__init__(name, description)
self.remote = remote
def reset(self) -> None:
r"""Resets the chat history of the agent."""
self.agent.prepare_for_new_chat()
def step(
self,
*args: Any,
remote: Optional[bool] = None,
**kwargs: Any,
) -> Any:
r"""Runs the agent in single execution mode.
Args:
*args (Any): Positional arguments to pass to the agent.
remote (bool, optional): Flag indicating whether to run the agent
remotely. Overrides the default setting. (default: :obj:`None`)
**kwargs (Any): Keyword arguments to pass to the agent.
Returns:
str: The response from the agent.
"""
if remote is None:
remote = self.remote
return self.agent.run(*args, remote=remote, **kwargs)
def chat(
self,
*args: Any,
remote: Optional[bool] = None,
**kwargs: Any,
) -> Any:
r"""Runs the agent in a chat conversation mode.
Args:
*args (Any): Positional arguments to pass to the agent.
remote (bool, optional): Flag indicating whether to run the agent
remotely. Overrides the default setting. (default: :obj:`None`)
**kwargs (Any): Keyword arguments to pass to the agent.
Returns:
str: The response from the agent.
"""
if remote is None:
remote = self.remote
return self.agent.chat(*args, remote=remote, **kwargs)
| [] |
2024-01-10 | Whiteknightai2/camel | camel~models~openai_model.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from types import GeneratorType
from typing import Any, Dict, List
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.typing import ModelType
class OpenAIModel(BaseModelBackend):
r"""OpenAI API in a unified BaseModelBackend interface."""
def __init__(self, model_type: ModelType,
model_config_dict: Dict[str, Any]) -> None:
r"""Constructor for OpenAI backend.
Args:
model_type (ModelType): Model for which a backend is created,
one of GPT_* series.
model_config_dict (Dict[str, Any]): A dictionary that will
be fed into openai.ChatCompletion.create().
"""
super().__init__(model_type, model_config_dict)
def run(self, messages: List[Dict]) -> Dict[str, Any]:
r"""Run inference of OpenAI chat completion.
Args:
messages (List[Dict]): Message list with the chat history
in OpenAI API format.
Returns:
Dict[str, Any]: Response in the OpenAI API format.
"""
import openai
messages_openai: List[OpenAIMessage] = messages
response = openai.ChatCompletion.create(messages=messages_openai,
model=self.model_type.value,
**self.model_config_dict)
if not self.stream:
if not isinstance(response, Dict):
raise RuntimeError("Unexpected batch return from OpenAI API")
else:
if not isinstance(response, GeneratorType):
raise RuntimeError("Unexpected stream return from OpenAI API")
return response
@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode,
which sends partial results each time.
Returns:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get('stream', False)
| [] |
2024-01-10 | Whiteknightai2/camel | apps~agents~agents.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
"""
Gradio-based web app Agents that uses OpenAI API to generate
a chat between collaborative agents.
"""
import argparse
import os
import re
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import gradio as gr
import openai
import openai.error
import tenacity
from apps.agents.text_utils import split_markdown_code
from camel.agents import TaskSpecifyAgent
from camel.messages import BaseMessage
from camel.societies import RolePlaying
REPO_ROOT = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
ChatBotHistory = List[Tuple[Optional[str], Optional[str]]]
@dataclass
class State:
session: Optional[RolePlaying]
max_messages: int
chat: ChatBotHistory
saved_assistant_msg: Optional[BaseMessage]
@classmethod
def empty(cls) -> 'State':
return cls(None, 0, [], None)
@staticmethod
def construct_inplace(state: 'State', session: Optional[RolePlaying],
max_messages: int, chat: ChatBotHistory,
saved_assistant_msg: Optional[BaseMessage]) -> None:
state.session = session
state.max_messages = max_messages
state.chat = chat
state.saved_assistant_msg = saved_assistant_msg
def parse_arguments():
""" Get command line arguments. """
parser = argparse.ArgumentParser("Camel data explorer")
parser.add_argument('--api-key', type=str, default=None,
help='OpenAI API key')
parser.add_argument('--share', type=bool, default=False,
help='Expose the web UI to Gradio')
parser.add_argument('--server-port', type=int, default=8080,
help='Port ot run the web page on')
parser.add_argument('--inbrowser', type=bool, default=False,
help='Open the web UI in the default browser on lunch')
parser.add_argument(
'--concurrency-count', type=int, default=1,
help='Number if concurrent threads at Gradio websocket queue. ' +
'Increase to serve more requests but keep an eye on RAM usage.')
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print("Unknown args: ", unknown)
return args
def load_roles(path: str) -> List[str]:
""" Load roles from list files.
Args:
path (str): Path to the TXT file.
Returns:
List[str]: List of roles.
"""
assert os.path.exists(path)
roles = []
with open(path, "r") as f:
lines = f.readlines()
for line in lines:
match = re.search(r"^\d+\.\s*(.+)\n*$", line)
if match:
role = match.group(1)
roles.append(role)
else:
print("Warning: no match")
return roles
def cleanup_on_launch(state) -> Tuple[State, ChatBotHistory, Dict]:
""" Prepare the UI for a new session.
Args:
state (State): Role playing state.
Returns:
Tuple[State, ChatBotHistory, Dict]:
- Updated state.
- Chatbot window contents.
- Start button state (disabled).
"""
# The line below breaks the every=N runner
# `state = State.empty()`
State.construct_inplace(state, None, 0, [], None)
return state, [], gr.update(interactive=False)
def role_playing_start(
state,
assistant: str,
user: str,
original_task: str,
max_messages: float,
with_task_specifier: bool,
word_limit: int,
) -> Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
""" Creates a role playing session.
Args:
state (State): Role playing state.
assistant (str): Contents of the Assistant field.
user (str): Contents of the User field.
original_task (str): Original task field.
with_task_specifier (bool): Enable/Disable task specifier.
word_limit (int): Limit of words for task specifier.
Returns:
Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
- Updated state.
- Generated specified task.
- Planned task (if any).
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is not None:
print("Double click")
return {} # may fail
try:
task_specify_kwargs = dict(word_limit=word_limit) \
if with_task_specifier else None
session = RolePlaying(assistant, user, original_task,
with_task_specify=with_task_specifier,
task_specify_agent_kwargs=task_specify_kwargs,
with_task_planner=False)
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 0 " + str(ex))
return (state, str(ex), "", [], gr.update())
# Can't re-create a state like below since it
# breaks 'role_playing_chat_cont' runner with every=N.
# `state = State(session=session, max_messages=int(max_messages), chat=[],`
# ` saved_assistant_msg=None)`
State.construct_inplace(state, session, int(max_messages), [], None)
specified_task_prompt = session.specified_task_prompt \
if session.specified_task_prompt is not None else ""
planned_task_prompt = session.planned_task_prompt \
if session.planned_task_prompt is not None else ""
planned_task_upd = gr.update(
value=planned_task_prompt, visible=session.planned_task_prompt
is not None)
progress_update = gr.update(maximum=state.max_messages, value=1,
visible=True)
return (state, specified_task_prompt, planned_task_upd, state.chat,
progress_update)
def role_playing_chat_init(state) -> \
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
""" Initialize role playing.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
"""
if state.session is None:
print("Error: session is none on role_playing_chat_init call")
return state, state.chat, gr.update()
session: RolePlaying = state.session
try:
init_assistant_msg: BaseMessage
init_assistant_msg, _ = session.init_chat()
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 1 " + str(ex))
state.session = None
return state, state.chat, gr.update()
state.saved_assistant_msg = init_assistant_msg
progress_update = gr.update(maximum=state.max_messages, value=1,
visible=True)
return state, state.chat, progress_update
# WORKAROUND: do not add type hints for session and chatbot_history
def role_playing_chat_cont(state) -> \
Tuple[State, ChatBotHistory, Dict, Dict]:
""" Produce a pair of messages by an assistant and a user.
To be run multiple times.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Chatbot window contents.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
if state.session is None:
return state, state.chat, gr.update(visible=False), gr.update()
session: RolePlaying = state.session
if state.saved_assistant_msg is None:
return state, state.chat, gr.update(), gr.update()
try:
assistant_response, user_response = session.step(
state.saved_assistant_msg)
except (openai.error.RateLimitError, tenacity.RetryError,
RuntimeError) as ex:
print("OpenAI API exception 2 " + str(ex))
state.session = None
return state, state.chat, gr.update(), gr.update()
if len(user_response.msgs) != 1 or len(assistant_response.msgs) != 1:
return state, state.chat, gr.update(), gr.update()
u_msg = user_response.msg
a_msg = assistant_response.msg
state.saved_assistant_msg = a_msg
state.chat.append((None, split_markdown_code(u_msg.content)))
state.chat.append((split_markdown_code(a_msg.content), None))
if len(state.chat) >= state.max_messages:
state.session = None
if "CAMEL_TASK_DONE" in a_msg.content or \
"CAMEL_TASK_DONE" in u_msg.content:
state.session = None
progress_update = gr.update(maximum=state.max_messages,
value=len(state.chat), visible=state.session
is not None)
start_bn_update = gr.update(interactive=state.session is None)
return state, state.chat, progress_update, start_bn_update
def stop_session(state) -> Tuple[State, Dict, Dict]:
""" Finish the session and leave chat contents as an artefact.
Args:
state (State): Role playing state.
Returns:
Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
- Updated state.
- Progress bar contents.
- Start button state (to be eventually enabled).
"""
state.session = None
return state, gr.update(visible=False), gr.update(interactive=True)
def construct_ui(blocks, api_key: Optional[str] = None) -> None:
""" Build Gradio UI and populate with topics.
Args:
api_key (str): OpenAI API key.
Returns:
None
"""
if api_key is not None:
openai.api_key = api_key
assistant_role_path = \
os.path.join(REPO_ROOT, "data/ai_society/assistant_roles.txt")
user_role_path = \
os.path.join(REPO_ROOT, "data/ai_society/user_roles.txt")
assistant_roles = load_roles(assistant_role_path)
user_roles = load_roles(user_role_path)
assistant_role = "Python Programmer"
user_role = "Stock Trader"
default_task = "Develop a trading bot for the stock market"
with gr.Row():
with gr.Column(scale=1):
assistant_dd = gr.Dropdown(assistant_roles,
label="Example assistant roles",
value=assistant_role, interactive=True)
assistant_ta = gr.TextArea(label="Assistant role (EDIT ME)",
lines=1, interactive=True)
with gr.Column(scale=1):
user_dd = gr.Dropdown(user_roles, label="Example user roles",
value=user_role, interactive=True)
user_ta = gr.TextArea(label="User role (EDIT ME)", lines=1,
interactive=True)
with gr.Column(scale=1):
gr.Markdown(
"## CAMEL: Communicative Agents for \"Mind\" Exploration"
" of Large Scale Language Model Society\n"
"Github repo: [https://github.com/lightaime/camel]"
"(https://github.com/lightaime/camel)"
'<div style="display:flex; justify-content:center;">'
'<img src="https://raw.githubusercontent.com/lightaime/camel/'
'master/misc/logo.png" alt="Logo" style="max-width:50%;">'
'</div>')
with gr.Row():
with gr.Column(scale=9):
original_task_ta = gr.TextArea(
label="Give me a preliminary idea (EDIT ME)",
value=default_task, lines=1, interactive=True)
with gr.Column(scale=1):
universal_task_bn = gr.Button("Insert universal task")
with gr.Row():
with gr.Column():
with gr.Row():
task_specifier_cb = gr.Checkbox(value=True,
label="With task specifier")
with gr.Row():
ts_word_limit_nb = gr.Number(
value=TaskSpecifyAgent.DEFAULT_WORD_LIMIT,
label="Word limit for task specifier",
visible=task_specifier_cb.value)
with gr.Column():
num_messages_sl = gr.Slider(minimum=1, maximum=50, step=1,
value=10, interactive=True,
label="Messages to generate")
with gr.Column(scale=2):
with gr.Row():
start_bn = gr.Button("Make agents chat [takes time]",
elem_id="start_button")
with gr.Row():
clear_bn = gr.Button("Interrupt the current query")
progress_sl = gr.Slider(minimum=0, maximum=100, value=0, step=1,
label="Progress", interactive=False, visible=False)
specified_task_ta = gr.TextArea(
label="Specified task prompt given to the role-playing session"
" based on the original (simplistic) idea", lines=1, interactive=False)
task_prompt_ta = gr.TextArea(label="Planned task prompt", lines=1,
interactive=False, visible=False)
chatbot = gr.Chatbot(label="Chat between autonomous agents")
session_state = gr.State(State.empty())
universal_task_bn.click(lambda: "Help me to do my job", None,
original_task_ta)
task_specifier_cb.change(lambda v: gr.update(visible=v), task_specifier_cb,
ts_word_limit_nb)
start_bn.click(cleanup_on_launch, session_state,
[session_state, chatbot, start_bn], queue=False) \
.then(role_playing_start,
[session_state, assistant_ta, user_ta,
original_task_ta, num_messages_sl,
task_specifier_cb, ts_word_limit_nb],
[session_state, specified_task_ta, task_prompt_ta,
chatbot, progress_sl],
queue=False) \
.then(role_playing_chat_init, session_state,
[session_state, chatbot, progress_sl], queue=False)
blocks.load(role_playing_chat_cont, session_state,
[session_state, chatbot, progress_sl, start_bn], every=0.5)
clear_bn.click(stop_session, session_state,
[session_state, progress_sl, start_bn])
assistant_dd.change(lambda dd: dd, assistant_dd, assistant_ta)
user_dd.change(lambda dd: dd, user_dd, user_ta)
blocks.load(lambda dd: dd, assistant_dd, assistant_ta)
blocks.load(lambda dd: dd, user_dd, user_ta)
def construct_blocks(api_key: Optional[str]):
""" Construct Agents app but do not launch it.
Args:
api_key (Optional[str]): OpenAI API key.
Returns:
gr.Blocks: Blocks instance.
"""
css_str = "#start_button {border: 3px solid #4CAF50; font-size: 20px;}"
with gr.Blocks(css=css_str) as blocks:
construct_ui(blocks, api_key)
return blocks
def main():
""" Entry point. """
args = parse_arguments()
print("Getting Agents web server online...")
blocks = construct_blocks(args.api_key)
blocks.queue(args.concurrency_count) \
.launch(share=args.share, inbrowser=args.inbrowser,
server_name="0.0.0.0", server_port=args.server_port,
debug=True)
print("Exiting.")
if __name__ == "__main__":
main()
| [
"Planned task prompt"
] |
2024-01-10 | Whiteknightai2/camel | camel~models~base_model.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from abc import ABC, abstractmethod
from typing import Any, Dict, List
from camel.typing import ModelType
class BaseModelBackend(ABC):
r"""Base class for different model backends.
May be OpenAI API, a local LLM, a stub for unit tests, etc."""
def __init__(self, model_type: ModelType,
model_config_dict: Dict[str, Any]) -> None:
r"""Constructor for the model backend.
Args:
model_type (ModelType): Model for which a backend is created.
model_config_dict (Dict[str, Any]): A config dictionary.
"""
self.model_type = model_type
self.model_config_dict = model_config_dict
@abstractmethod
def run(self, messages: List[Dict]) -> Dict[str, Any]:
r"""Runs the query to the backend model.
Args:
messages (List[Dict]): message list with the chat history
in OpenAI API format.
Raises:
RuntimeError: if the return value from OpenAI API
is not a dict that is expected.
Returns:
Dict[str, Any]: All backends must return a dict in OpenAI format.
"""
pass
@property
def token_limit(self) -> int:
r"""Returns the maximum token limit for a given model.
Returns:
int: The maximum token limit for the given model.
"""
return self.model_type.token_limit
@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode,
which sends partial results each time.
Returns:
bool: Whether the model is in stream mode.
"""
return False
| [] |
2024-01-10 | Whiteknightai2/camel | camel~utils~functions.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
import re
import time
import zipfile
from functools import wraps
from typing import Any, Callable, List, Optional, Set, TypeVar, cast
import requests
import tiktoken
from camel.messages import OpenAIMessage
from camel.typing import ModelType, TaskType
F = TypeVar('F', bound=Callable[..., Any])
def get_model_encoding(value_for_tiktoken: str):
r"""Get model encoding from tiktoken.
Args:
value_for_tiktoken: Model value for tiktoken.
Returns:
tiktoken.Encoding: Model encoding.
"""
try:
encoding = tiktoken.encoding_for_model(value_for_tiktoken)
except KeyError:
print("Model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
return encoding
def count_tokens_openai_chat_models(
messages: List[OpenAIMessage],
encoding: tiktoken.Encoding,
tokens_per_message: int,
tokens_per_name: int,
) -> int:
r"""Counts the number of tokens required to generate an OpenAI chat based
on a given list of messages.
Args:
messages (List[OpenAIMessage]): The list of messages.
encoding (tiktoken.Encoding): The encoding method to use.
tokens_per_message (int): Number of tokens to be added
to each message.
tokens_per_name (int): Number of tokens to be added if
name existed in the message.
Returns:
int: The number of tokens required.
"""
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def num_tokens_from_messages(
messages: List[OpenAIMessage],
model: ModelType,
) -> int:
r"""Returns the number of tokens used by a list of messages.
Args:
messages (List[OpenAIMessage]): The list of messages to count the
number of tokens for.
model (ModelType): The OpenAI model used to encode the messages.
Returns:
int: The total number of tokens used by the messages.
Raises:
NotImplementedError: If the specified `model` is not implemented.
References:
- https://github.com/openai/openai-python/blob/main/chatml.md
- https://platform.openai.com/docs/models/gpt-4
- https://platform.openai.com/docs/models/gpt-3-5
"""
return _num_tokens_from_messages(messages, model.value_for_tiktoken)
# flake8: noqa :E501
def _num_tokens_from_messages(messages: List[OpenAIMessage], model: str):
r"""Return the number of tokens used by a list of messages.
References:
- https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
# Every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_message = 4
# If there's a name, the role is omitted
tokens_per_name = -1
elif "gpt-3.5-turbo" in model:
return _num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
return _num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"`num_tokens_from_messages`` is not presently implemented "
f"for model {model}. "
f"See https://github.com/openai/openai-python/blob/main/chatml.md "
f"for information on how messages are converted to tokens. "
f"See https://platform.openai.com/docs/models/gpt-4"
f"or https://platform.openai.com/docs/models/gpt-3-5"
f"for information about openai chat models.")
encoding = get_model_encoding(model)
return count_tokens_openai_chat_models(messages, encoding,
tokens_per_message, tokens_per_name)
def openai_api_key_required(func: F) -> F:
r"""Decorator that checks if the OpenAI API key is available in the
environment variables.
Args:
func (callable): The function to be wrapped.
Returns:
callable: The decorated function.
Raises:
ValueError: If the OpenAI API key is not found in the environment
variables.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
from camel.agents.chat_agent import ChatAgent
if not isinstance(self, ChatAgent):
raise ValueError("Expected ChatAgent")
if self.model == ModelType.STUB:
return func(self, *args, **kwargs)
elif 'OPENAI_API_KEY' in os.environ:
return func(self, *args, **kwargs)
else:
raise ValueError('OpenAI API key not found.')
return cast(F, wrapper)
def print_text_animated(text, delay: float = 0.02, end: str = ""):
r"""Prints the given text with an animated effect.
Args:
text (str): The text to print.
delay (float, optional): The delay between each character printed.
(default: :obj:`0.02`)
end (str, optional): The end character to print after each
character of text. (default: :obj:`""`)
"""
for char in text:
print(char, end=end, flush=True)
time.sleep(delay)
print('\n')
def get_prompt_template_key_words(template: str) -> Set[str]:
r"""Given a string template containing curly braces {}, return a set of
the words inside the braces.
Args:
template (str): A string containing curly braces.
Returns:
List[str]: A list of the words inside the curly braces.
Example:
>>> get_prompt_template_key_words('Hi, {name}! How are you {status}?')
{'name', 'status'}
"""
return set(re.findall(r'{([^}]*)}', template))
def get_first_int(string: str) -> Optional[int]:
r"""Returns the first integer number found in the given string.
If no integer number is found, returns None.
Args:
string (str): The input string.
Returns:
int or None: The first integer number found in the string, or None if
no integer number is found.
"""
match = re.search(r'\d+', string)
if match:
return int(match.group())
else:
return None
def download_tasks(task: TaskType, folder_path: str) -> None:
# Define the path to save the zip file
zip_file_path = os.path.join(folder_path, "tasks.zip")
# Download the zip file from the Google Drive link
response = requests.get("https://huggingface.co/datasets/camel-ai/"
f"metadata/resolve/main/{task.value}_tasks.zip")
# Save the zip file
with open(zip_file_path, "wb") as f:
f.write(response.content)
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(folder_path)
# Delete the zip file
os.remove(zip_file_path)
| [] |
2024-01-10 | codefuse-ai/CodeFuse-ModelCache | modelcache~adapter~adapter.py | # -*- coding: utf-8 -*-
import logging
import openai
from modelcache.adapter.adapter_query import adapt_query
from modelcache.adapter.adapter_insert import adapt_insert
from modelcache.adapter.adapter_remove import adapt_remove
class ChatCompletion(openai.ChatCompletion):
"""Openai ChatCompletion Wrapper"""
@classmethod
def create_query(cls, *args, **kwargs):
def cache_data_convert(cache_data, cache_query):
return construct_resp_from_cache(cache_data, cache_query)
try:
return adapt_query(
cache_data_convert,
*args,
**kwargs
)
except Exception as e:
return str(e)
@classmethod
def create_insert(cls, *args, **kwargs):
try:
return adapt_insert(
*args,
**kwargs
)
except Exception as e:
return str(e)
@classmethod
def create_remove(cls, *args, **kwargs):
try:
return adapt_remove(
*args,
**kwargs
)
except Exception as e:
logging.info('adapt_remove_e: {}'.format(e))
return str(e)
def construct_resp_from_cache(return_message, return_query):
return {
"modelcache": True,
"hitQuery": return_query,
"data": return_message,
"errorCode": 0
}
| [] |
2024-01-10 | nervesscat/Personal-Finance-Bot | chooseCategory.py | import openai
import json
import os
class Category:
def __init__(self):
# Json load
jsonFile = open('config.json')
config = json.load(jsonFile)
# Load environment variables
envVar = config['open-ai']
openai.api_key = os.environ.get(envVar)
def chooseCategory(self, prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt="Categorize this word in one of this(studies, food, clothes, fun, misc) ex:UNAH=studies\n+ " + prompt + "=",
temperature=0.7,
max_tokens=3,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response = str(response['choices'][0]['text'])
response = response.replace(" ", "")
response = response.lower()
return response
| [
"Categorize this word in one of this(studies, food, clothes, fun, misc) ex:UNAH=studies\n+ PLACEHOLDER="
] |
2024-01-10 | usc-sail/mica-character-attribute-extraction | 00-llm-only-annotation~04-prompting~40_prompt.py | """Prompt samples using zero-shot, few-shot, and chain-of-thought (CoT) prompting methods to find attribute-values
Input
- samples csv file
- path = mica-character-attribute-extraction/prompt-results/samples.csv
- contains attribute-type, id, imdb id, passage id, passage, character name, genres, answer probability fields
Output
- completions json file
path = mica-character-attribute-extraction/prompt-results/{zero/few/cot}.json
- completions txt file
path = mica-character-attribute-extraction/prompt-results/{zero/few/cot}.txt
Parameters
- prompt method
"""
import os
import re
import json
import tqdm
import openai
import tenacity
import tiktoken
import collections
import pandas as pd
from absl import flags
from absl import app
FLAGS = flags.FLAGS
flags.DEFINE_bool("prompt", default=False, help="set to prompt, otherwise only the expected charge is calculated")
flags.DEFINE_enum("prompt_type", default="zero", help="prompting method", enum_values=["zero", "few", "cot"])
@tenacity.retry(wait=tenacity.wait_random_exponential(min=1, max=60), stop=tenacity.stop_after_attempt(10))
def completion_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
def prompt_sample(prompt, max_tokens=256):
try:
response = completion_with_backoff(
model="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=max_tokens,
logprobs=1
)
return response.to_dict()
except Exception:
return
def zero_shot_annot(_):
# openai
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.organization = "org-xPjDKPQ58le6x8A7CE13e8O6"
encoding = tiktoken.encoding_for_model("text-davinci-003")
# read samples
data_dir = os.path.join(os.getenv("DATA_DIR"), "mica-character-attribute-extraction")
samples_file = os.path.join(data_dir, "prompt-results/samples.csv")
output_json_file = os.path.join(data_dir, f"prompt-results/{FLAGS.prompt_type}.json")
output_txt_file = os.path.join(data_dir, f"prompt-results/{FLAGS.prompt_type}.txt")
df = pd.read_csv(samples_file, index_col=None)
df["is_goal"] = df["attr"] == "goal"
df.sort_values("is_goal", ascending=False, inplace=True)
# read brief goal cot prompt
with open(os.path.join(data_dir, "attr_instr/goal/cot_prompt_brief.txt")) as fr:
brief_goal_instr = fr.read().strip()
# read instructions
attrs = sorted(df["attr"].unique())
instrs = []
max_response_sizes = []
avg_response_sizes = []
for attr in attrs:
if FLAGS.prompt_type == "cot":
prompt_file = os.path.join(data_dir, "attr_instr", attr, "cot_prompt.txt")
else:
prompt_file = os.path.join(data_dir, "attr_instr", attr, "prompt.txt")
with open(prompt_file) as fr:
lines = fr.read().strip().split("\n")
instr = lines[0].strip() if FLAGS.prompt_type == "zero" else "\n".join(lines).strip()
instrs.append(instr)
response_sizes = []
if attr == "goal":
i = 2
while i < len(lines):
while lines[i].strip() != "":
i += 1
response = lines[i + 2].lstrip("Answer:").strip()
if "CANNOT ANSWER" not in response:
response_sizes.append(len(encoding.encode(response)))
i += 5
else:
i = 4
while i < len(lines):
response = lines[i].lstrip("Answer:").strip()
if "CANNOT ANSWER" not in response:
response_sizes.append(len(encoding.encode(response)))
i += 4
max_response_sizes.append(max(response_sizes))
avg_response_sizes.append(sum(response_sizes)/len(response_sizes))
# print average response sizes
print("response sizes =>")
for attr, av, mx in zip(attrs, avg_response_sizes, max_response_sizes):
print(f"\t{attr:30s} : avg = {av:.1f} tokens, max = {mx:3d} tokens")
print()
# prompt
n_tokens = 0
responses_json, responses_txt = [], []
max_prompt_sizes = collections.defaultdict(int)
n_times_brief_goal_prompt_used = 0
n_times_output_tokens_lt_256 = 0
tbar = tqdm.tqdm(df.iterrows(), total=len(df), unit="sample")
for _, row in tbar:
attr = row["attr"]
tbar.set_description(attr)
i = attrs.index(attr)
instr = instrs[i]
text, character = row["text"], row["character"]
if attr == "goal":
prompt = f"{instr}\n\nPassage:\n{text}\n\nCharacter: {character}\nAnswer:"
if len(encoding.encode(prompt)) + 256 > 4096:
prompt = f"{brief_goal_instr}\n\nPassage:\n{text}\n\nCharacter: {character}\nAnswer:"
n_times_brief_goal_prompt_used += 1
else:
prompt = f"{instr}\n\nPassage: {text}\nCharacter: {character}\nAnswer:"
n_prompt_tokens = len(encoding.encode(prompt))
n_sample_tokens = 0 # sample = prompt + completion
if FLAGS.prompt:
max_tokens = min(256, 4096 - n_prompt_tokens)
response = prompt_sample(prompt, max_tokens=max_tokens)
if max_tokens < 256:
n_times_output_tokens_lt_256 += 1
if response is not None:
responses_json.append(response)
answer = re.sub(r"\s+", " ", response["choices"][0]["text"]).strip()
responses_txt.append(answer)
n_sample_tokens = response["usage"]["total_tokens"]
else:
responses_json.append({})
responses_txt.append("ERROR")
n_sample_tokens = n_prompt_tokens
else:
n_sample_tokens = n_prompt_tokens + 256
max_prompt_sizes[attr] = max(max_prompt_sizes[attr], n_sample_tokens)
n_tokens += n_sample_tokens
charge = (0.02 * n_tokens) / 1000
max_prompt_sizes = sorted(max_prompt_sizes.items())
print(f"{n_times_brief_goal_prompt_used} times the brief goal prompt used")
print(f"{n_times_output_tokens_lt_256} times the number of completion tokens decreased from 256\n")
# print max tokens used per sample per attribute
print("max tokens per sample =>")
for attr, mt in max_prompt_sizes:
print(f"\t{attr:30s} = {mt:4d} tokens")
print()
if FLAGS.prompt:
with open(output_json_file, "w") as fw:
json.dump(responses_json, fw, indent=2)
with open(output_txt_file, "w") as fw:
fw.write("\n".join(responses_txt))
print(f"Total tokens = {n_tokens:.1f}")
print(f"Total charge = ${charge:.1f}")
else:
print(f"Maximum total tokens = {n_tokens:.1f}")
print(f"Maximum total charge = ${charge:.1f}")
if __name__=="__main__":
app.run(zero_shot_annot) | [
"PLACEHOLDER\n\nPassage:\nPLACEHOLDER\n\nCharacter: PLACEHOLDER\nAnswer:",
"0",
"PLACEHOLDER\n\nPassage: PLACEHOLDER\nCharacter: PLACEHOLDER\nAnswer:",
"1",
"cot_prompt.txt",
"prompt.txt",
"attr_instr"
] |
2024-01-10 | usc-sail/mica-character-attribute-extraction | 00-llm-only-annotation~01-attribute-types~11_prompt_attribute_types.py | """Prompt character descriptions for attribute types
Input
- character descriptions json file
- path = mica-character-attribute-extraction/attribute-types/character_descriptions.json
- contains <imdb id, character name, character description> tuples
- created by sample_character_descriptions.py
#
Output
- character attribute types json file
- path = mica-character-attribute-extraction/attribute-types/character_attribute_types.json
- contains list of GPT completions
Parameters
- n
number of character descriptions from the character descriptions json file to prompt
"""
import os
import openai
import json
import tenacity
import tqdm
import random
from absl import flags
from absl import app
FLAGS = flags.FLAGS
data_dir = os.path.join(os.getenv("DATA_DIR"), "mica-character-attribute-extraction")
input_file = os.path.join(data_dir, "attribute-types/character_descriptions.json")
output_file = os.path.join(data_dir, "attribute-types/character_attribute_types.json")
flags.DEFINE_integer("n", default=50, help="number of character descriptions to prompt")
template = """List the attribute types of <CHARACTER> described in <PASSAGE> as a comma-separated list. If the <PASSAGE> does not describe any attribute of <CHARACTER>, answer as NONE.
PASSAGE: The bell has just rung, and Mike Damone comes out of Youth and Law class. He has an absorbed, driven look on his face. He walks past the rows of lockers, and doesn't even notice as he passes Stacy Hamilton standing by her locker. She smiles, grabs his arm affectionately.
CHARACTER: Stacy Hamilton
ATTRIBUTE-TYPES: NONE
PASSAGE: Sharon Pogue is driving the car. It is two years since we saw her at the accident site. Her partner, ROBBY LEWIS, sips coffee and keeps one eye on the CAD monitor which lists all area police calls. She slows behind a car that is crawling along, an old 60s car, driven by a young man and woman who sit very close on the bench seat. The car's ENGINE is MISSING and smoking.
CHARACTER: Sharon Pogue
ATTRIBUTE-TYPES: NONE
PASSAGE: DETECTIVE SERGEANT ALONZO HARRIS, in black shirt, black leather jacket. And just enough platinum and diamonds to look like somebody. He reads the paper in a booth. The gun leather-tough LAPD vet is a hands-on, blue-collar cop who can kick your ass with a look.
CHARACTER: Alonzo Harris
ATTRIBUTE-TYPES: Profession, Attire, Attitude
PASSAGE: The SPORTS COMMENTATOR is at the airport and about to interview the heavyweight champion of the world, APOLLO CREED. Creed is twenty-eight years old. He is a tall, smooth-muscled black man with barely a scar on his light coffee-colored face...
CHARACTER: Apollo Creed
ATTRIBUTE-TYPES: Age, Race, Appearance, Profession, Accomplishment
PASSAGE: A woman's face BANKS INTO SHOT, her head resting against grimy wallpaper. She is tense, sweaty, wide-eyed with concentration. This is CLARICE STARLING, mid-20's, trim, very pretty. She wears Kevlar body armor over a navy windbreaker, khaki pants. Her thick hair is piled under a navy baseball cap. A revolver, clutched in her right hand, hovers by her ear.
CHARACTER: Clarice Starling
ATTRIBUTE-TYPES: Age, Appearance, Hair Type, Attire, Possession, Emotion, Posture
PASSAGE: The tremendous heat of two huge twin suns settle on a lone figure, Luke Skywalker, a farm boy with heroic aspirations who looks much younger than his eighteen years. His shaggy hair and baggy tunic give him the air of a simple but lovable lad with a prize-winning smile. The lovely young girl huddles in a small alcove as the stormtroopers search through the ship. She is Princess Leia Organa, a member of the Alderaan Senate. Han is a tough, roguish starpilot about thirty years old. A mercenary on a starship, he is simple, sentimental, and cocksure.
CHARACTER: Princess Leia Organa
ATTRIBUTE-TYPES: Age, Profession
PASSAGE: MICHAEL CORLEONE, dressed in the uniform of a Marine Captain, leads KAY ADAMS through the wedding crowd, occasionally stopped and greeted by friends of the family.
CHARACTER: Michael Corleone
ATTRIBUTE-TYPES: Attire
PASSAGE: HE IS MELVIN PURVIS (30) cleancut and handsome. A square jaw. Chester Gould modelled Dick Tracy's profile after Purvis. He's not big, but he's tenacious. He's an incarnation of the social elite of his time: white, Southern patrician and a Christian gentleman. With Purvis are Special Agents WARREN BARTON (31) and Purvis's friend CARTER DAUM (29) They have a harder time in the steep woods. They're chasing someone. They are guided by East Liverpool police chief
CHARACTER: Melvin Purvis
ATTRIBUTE-TYPES: Age, Appearance, Ethnicity, Social Status, Religion, Qualities
PASSAGE: A TRANSPORT PLANE has just landed. The cargo doors are open and a tricky unloading operation is under way. Robin Cavendish is being hoisted out of the plane, strapped to a stretcher. A big battery powered respirator is being unloaded alongside him. An ambulance waits to receive him. Medics, RAF personnel and ground crew are gathered round.
CHARACTER: Robin Cavendish
ATTRIBUTE-TYPES: Health Status
"""
@tenacity.retry(wait=tenacity.wait_random_exponential(min=1, max=60), stop=tenacity.stop_after_attempt(10))
def completion_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
def prompt_sample(passage, character):
prompt = f"{template}PASSAGE: {passage}\nCHARACTER: {character}\nATTRIBUTE-TYPES:"
try:
response = completion_with_backoff(
model="text-davinci-003",
prompt=prompt,
temperature=0.7,
max_tokens=1024,
)
return response.to_dict()
except Exception:
return
def prompt_attribute_types(_):
n = FLAGS.n
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.organization = "org-xPjDKPQ58le6x8A7CE13e8O6"
with open(input_file) as fr:
descs = json.load(fr)
sampled_descs = random.sample(descs, n) if n < len(descs) else descs
completions = []
for desc in tqdm.tqdm(sampled_descs, unit="desc"):
output = prompt_sample(desc["desc"], desc["character"])
if output is not None:
output.update({"desc": desc["desc"], "character": desc["character"]})
completions.append(output)
with open(output_file, "w") as fw:
json.dump(completions, fw, indent=2)
if __name__ == '__main__':
app.run(prompt_attribute_types) | [
"List the attribute types of <CHARACTER> described in <PASSAGE> as a comma-separated list. If the <PASSAGE> does not describe any attribute of <CHARACTER>, answer as NONE.\n\nPASSAGE: The bell has just rung, and Mike Damone comes out of Youth and Law class. He has an absorbed, driven look on his face. He walks past the rows of lockers, and doesn't even notice as he passes Stacy Hamilton standing by her locker. She smiles, grabs his arm affectionately.\nCHARACTER: Stacy Hamilton\nATTRIBUTE-TYPES: NONE\n\nPASSAGE: Sharon Pogue is driving the car. It is two years since we saw her at the accident site. Her partner, ROBBY LEWIS, sips coffee and keeps one eye on the CAD monitor which lists all area police calls. She slows behind a car that is crawling along, an old 60s car, driven by a young man and woman who sit very close on the bench seat. The car's ENGINE is MISSING and smoking.\nCHARACTER: Sharon Pogue\nATTRIBUTE-TYPES: NONE\n\nPASSAGE: DETECTIVE SERGEANT ALONZO HARRIS, in black shirt, black leather jacket. And just enough platinum and diamonds to look like somebody. He reads the paper in a booth. The gun leather-tough LAPD vet is a hands-on, blue-collar cop who can kick your ass with a look.\nCHARACTER: Alonzo Harris\nATTRIBUTE-TYPES: Profession, Attire, Attitude\n\nPASSAGE: The SPORTS COMMENTATOR is at the airport and about to interview the heavyweight champion of the world, APOLLO CREED. Creed is twenty-eight years old. He is a tall, smooth-muscled black man with barely a scar on his light coffee-colored face...\nCHARACTER: Apollo Creed\nATTRIBUTE-TYPES: Age, Race, Appearance, Profession, Accomplishment\n\nPASSAGE: A woman's face BANKS INTO SHOT, her head resting against grimy wallpaper. She is tense, sweaty, wide-eyed with concentration. This is CLARICE STARLING, mid-20's, trim, very pretty. She wears Kevlar body armor over a navy windbreaker, khaki pants. Her thick hair is piled under a navy baseball cap. A revolver, clutched in her right hand, hovers by her ear.\nCHARACTER: Clarice Starling\nATTRIBUTE-TYPES: Age, Appearance, Hair Type, Attire, Possession, Emotion, Posture\n\nPASSAGE: The tremendous heat of two huge twin suns settle on a lone figure, Luke Skywalker, a farm boy with heroic aspirations who looks much younger than his eighteen years. His shaggy hair and baggy tunic give him the air of a simple but lovable lad with a prize-winning smile. The lovely young girl huddles in a small alcove as the stormtroopers search through the ship. She is Princess Leia Organa, a member of the Alderaan Senate. Han is a tough, roguish starpilot about thirty years old. A mercenary on a starship, he is simple, sentimental, and cocksure.\nCHARACTER: Princess Leia Organa\nATTRIBUTE-TYPES: Age, Profession\n\nPASSAGE: MICHAEL CORLEONE, dressed in the uniform of a Marine Captain, leads KAY ADAMS through the wedding crowd, occasionally stopped and greeted by friends of the family.\nCHARACTER: Michael Corleone\nATTRIBUTE-TYPES: Attire\n\nPASSAGE: HE IS MELVIN PURVIS (30) cleancut and handsome. A square jaw. Chester Gould modelled Dick Tracy's profile after Purvis. He's not big, but he's tenacious. He's an incarnation of the social elite of his time: white, Southern patrician and a Christian gentleman. With Purvis are Special Agents WARREN BARTON (31) and Purvis's friend CARTER DAUM (29) They have a harder time in the steep woods. They're chasing someone. They are guided by East Liverpool police chief\nCHARACTER: Melvin Purvis\nATTRIBUTE-TYPES: Age, Appearance, Ethnicity, Social Status, Religion, Qualities\n\nPASSAGE: A TRANSPORT PLANE has just landed. The cargo doors are open and a tricky unloading operation is under way. Robin Cavendish is being hoisted out of the plane, strapped to a stretcher. A big battery powered respirator is being unloaded alongside him. An ambulance waits to receive him. Medics, RAF personnel and ground crew are gathered round.\nCHARACTER: Robin Cavendish\nATTRIBUTE-TYPES: Health Status\n\n",
"List the attribute types of <CHARACTER> described in <PASSAGE> as a comma-separated list. If the <PASSAGE> does not describe any attribute of <CHARACTER>, answer as NONE.\n\nPASSAGE: The bell has just rung, and Mike Damone comes out of Youth and Law class. He has an absorbed, driven look on his face. He walks past the rows of lockers, and doesn't even notice as he passes Stacy Hamilton standing by her locker. She smiles, grabs his arm affectionately.\nCHARACTER: Stacy Hamilton\nATTRIBUTE-TYPES: NONE\n\nPASSAGE: Sharon Pogue is driving the car. It is two years since we saw her at the accident site. Her partner, ROBBY LEWIS, sips coffee and keeps one eye on the CAD monitor which lists all area police calls. She slows behind a car that is crawling along, an old 60s car, driven by a young man and woman who sit very close on the bench seat. The car's ENGINE is MISSING and smoking.\nCHARACTER: Sharon Pogue\nATTRIBUTE-TYPES: NONE\n\nPASSAGE: DETECTIVE SERGEANT ALONZO HARRIS, in black shirt, black leather jacket. And just enough platinum and diamonds to look like somebody. He reads the paper in a booth. The gun leather-tough LAPD vet is a hands-on, blue-collar cop who can kick your ass with a look.\nCHARACTER: Alonzo Harris\nATTRIBUTE-TYPES: Profession, Attire, Attitude\n\nPASSAGE: The SPORTS COMMENTATOR is at the airport and about to interview the heavyweight champion of the world, APOLLO CREED. Creed is twenty-eight years old. He is a tall, smooth-muscled black man with barely a scar on his light coffee-colored face...\nCHARACTER: Apollo Creed\nATTRIBUTE-TYPES: Age, Race, Appearance, Profession, Accomplishment\n\nPASSAGE: A woman's face BANKS INTO SHOT, her head resting against grimy wallpaper. She is tense, sweaty, wide-eyed with concentration. This is CLARICE STARLING, mid-20's, trim, very pretty. She wears Kevlar body armor over a navy windbreaker, khaki pants. Her thick hair is piled under a navy baseball cap. A revolver, clutched in her right hand, hovers by her ear.\nCHARACTER: Clarice Starling\nATTRIBUTE-TYPES: Age, Appearance, Hair Type, Attire, Possession, Emotion, Posture\n\nPASSAGE: The tremendous heat of two huge twin suns settle on a lone figure, Luke Skywalker, a farm boy with heroic aspirations who looks much younger than his eighteen years. His shaggy hair and baggy tunic give him the air of a simple but lovable lad with a prize-winning smile. The lovely young girl huddles in a small alcove as the stormtroopers search through the ship. She is Princess Leia Organa, a member of the Alderaan Senate. Han is a tough, roguish starpilot about thirty years old. A mercenary on a starship, he is simple, sentimental, and cocksure.\nCHARACTER: Princess Leia Organa\nATTRIBUTE-TYPES: Age, Profession\n\nPASSAGE: MICHAEL CORLEONE, dressed in the uniform of a Marine Captain, leads KAY ADAMS through the wedding crowd, occasionally stopped and greeted by friends of the family.\nCHARACTER: Michael Corleone\nATTRIBUTE-TYPES: Attire\n\nPASSAGE: HE IS MELVIN PURVIS (30) cleancut and handsome. A square jaw. Chester Gould modelled Dick Tracy's profile after Purvis. He's not big, but he's tenacious. He's an incarnation of the social elite of his time: white, Southern patrician and a Christian gentleman. With Purvis are Special Agents WARREN BARTON (31) and Purvis's friend CARTER DAUM (29) They have a harder time in the steep woods. They're chasing someone. They are guided by East Liverpool police chief\nCHARACTER: Melvin Purvis\nATTRIBUTE-TYPES: Age, Appearance, Ethnicity, Social Status, Religion, Qualities\n\nPASSAGE: A TRANSPORT PLANE has just landed. The cargo doors are open and a tricky unloading operation is under way. Robin Cavendish is being hoisted out of the plane, strapped to a stretcher. A big battery powered respirator is being unloaded alongside him. An ambulance waits to receive him. Medics, RAF personnel and ground crew are gathered round.\nCHARACTER: Robin Cavendish\nATTRIBUTE-TYPES: Health Status\n\nPASSAGE: PLACEHOLDER\nCHARACTER: PLACEHOLDER\nATTRIBUTE-TYPES:"
] |
2024-01-10 | usc-sail/mica-character-attribute-extraction | 04-find-attributes-llm.py | """Find character attributes described in a random sample of passages
Input
-----
script-passages
path = mica-character-attribute-extraction/script-passages.csv
csv file containing imdb-id, passage-id, passage, and characters fields
book-passages
path = mica-character-attribute-extraction/book-passages.csv
csv file containing book, passage-id, passage, and characters fields
Output
-----
attributes
path = mica-character-attribute-extraction/passage-attributes.csv
csv file containing story-id, passage-id, passage, characters, attributes
story-id could be imdb-id or book name
attributes are (type, value) pairs
Parameters
-----
model
type of gpt model to use
sample
sample size for each source
seed
random seed
calculate-cost
boolean flag that you can set to estimate the cost
"""
from lib import openai_prompting
import os
import re
import pandas as pd
import tqdm
from copy import deepcopy
from absl import flags
from absl import app
# define command-line flags
FLAGS = flags.FLAGS
flags.DEFINE_string("model", default="gpt-4-turbo-1106-preview", help="OpenAI model to use for prompting")
flags.DEFINE_integer("sample", default=1000, help="Number of story segments to sample")
flags.DEFINE_integer("seed", default=99, help="Seed for random sampling")
flags.DEFINE_bool("cost", default=False, help="Estimate cost only")
flags.DEFINE_float("input_rate", default=3e-6, help="model token rate for input tokens")
flags.DEFINE_float("output_rate", default=6e-6, help="model token rate for input tokens")
# directories and files
data_dir = os.path.join(os.getenv("DATA_DIR"), "mica-character-attribute-extraction")
scripts_file = os.path.join(data_dir, "script-passages.csv")
books_file = os.path.join(data_dir, "book-passages.csv")
attributes_file = os.path.join(data_dir, "passage-attributes.csv")
def prompt_character_attributes(_):
# read passages
scripts_df = pd.read_csv(scripts_file, index_col=None)
books_df = pd.read_csv(books_file, index_col=None)
# sample story segments
sampled_scripts_df = scripts_df.sample(FLAGS.sample, random_state=FLAGS.seed)
sampled_books_df = books_df.sample(FLAGS.sample, random_state=FLAGS.seed)
sampled_scripts_df.rename(columns={"imdb-id": "story-id"}, inplace=True)
sampled_books_df.rename(columns={"book": "story-id"}, inplace=True)
passages_df = pd.concat([sampled_scripts_df, sampled_books_df])
print(f"{len(passages_df)} passages will be prompted")
# create the message template
messages_template = [
{
"role": "user",
"content": ("Find the character attributes in the following passage and write them as (character, "
"attribute-type, attribute-value) tuples in a numbered list. The attribute-type text should "
"be as brief and concise as possible.\nPassage: ")
}
]
# run the prompts or estimate the cost
estimated_cost = 0
actual_cost = 0
attributes = []
completions = []
for segment in tqdm.tqdm(passages_df["passage"], unit="passage", desc="prompting"):
messages = deepcopy(messages_template)
segment = re.sub("\s+", " ", segment.strip())
messages[0]["content"] += segment
if FLAGS.cost:
estimated_cost += openai_prompting.estimate_cost(messages, FLAGS.model, FLAGS.input_rate,
FLAGS.output_rate, 50)
else:
n_prompt_tokens = openai_prompting.num_tokens_from_messages(messages, FLAGS.model)
completion = openai_prompting.prompt_sample(messages, FLAGS.model, max_tokens=n_prompt_tokens + 256)
if completion is not None:
attributes.append(completion.choices[0].message.content)
completions.append(completion.model_dump_json())
actual_cost += (completion.usage.completion_tokens * FLAGS.input_rate
+ completion.usage.prompt_tokens * FLAGS.output_rate)
else:
attributes.append("")
completions.append("")
# print the cost, save the output
if FLAGS.cost:
print(f"Estimated cost = ${estimated_cost}")
else:
print(f"Cost incurred = ${actual_cost}")
passages_df["attributes"] = attributes
passages_df["completions"] = completions
passages_df.to_csv(attributes_file, index=False)
if __name__ == '__main__':
app.run(prompt_character_attributes) | [
"Find the character attributes in the following passage and write them as (character, attribute-type, attribute-value) tuples in a numbered list. The attribute-type text should be as brief and concise as possible.\nPassage: "
] |
2024-01-10 | Jimicef/Jimi | backend~handlers.py | from fastapi import Query, UploadFile
from typing import Annotated
from bs4 import BeautifulSoup
import requests
from prompts import *
from utils import *
from opensearchpy import OpenSearch
from fastapi.responses import StreamingResponse
import openai
import json
import os
openai.api_key = os.environ["OPENAI_API_KEY"]
Google_API_KEY = os.environ["Google_API_KEY"]
Google_SEARCH_ENGINE_ID = os.environ["Google_SEARCH_ENGINE_ID"]
opensearch_url = os.environ["OPENSEARCH_URL"]
auth = (os.environ["OPENSEARCH_ID"], os.environ["OPENSEARCH_PW"])
client = OpenSearch(
opensearch_url,
http_compress=True, # enables gzip compression for request bodies
http_auth=auth,
use_ssl=False,
verify_certs=False,
ssl_assert_hostname=False,
ssl_show_warn=False,
)
async def get_service_list(keyword : str = Query(None,description = "검색 키워드"),
count : int = Query(0,description = "페이지 번호"),
chktype1 : str = Query(None,description = "서비스 분야"),
siGunGuArea : str = Query(None,description = "시/군/구 코드"),
sidocode : str = Query(None,description = "시/도 코드"),
svccd : str = Query(None,description = "사용자 구분"),
voice : bool = Query(None,description = "시각 장애인 자막 생성 여부")
):
url = "https://www.gov.kr/portal/rcvfvrSvc/svcFind/svcSearchAll"
div_count = count // 2
last_page = False
voice_answer = ""
params = {
"siGunGuArea" : siGunGuArea,
"sidocode" : sidocode,
'svccd' : svccd,
"chktype1" : chktype1,
"startCount": 12*div_count,
"query": keyword
}
response = requests.get(url,params=params)
if response.status_code == 200:
html = response.text
soup = BeautifulSoup(html, 'html.parser')
else :
return response.status_code
option_tag = soup.select_one('#orgSel option')
text = option_tag.text # '전체 (9,880)'
# 괄호 안의 숫자를 추출
result_count = int(''.join(filter(str.isdigit, text)))
if result_count == 0:
return {
"vocieAnswer": "검색 결과가 없습니다.",
"answer" : None,
"support" : None,
"lastpage" : True
}
page_count = (result_count - 1) // 12
if result_count == 0:
last_page = True
elif div_count == page_count :
# 첫페이지가 last인 경우
if count % 2 == 0 and result_count - (6 * count) <= 6:
last_page = True
# 두번째 페이지가 last인 경우
elif count % 2 !=0 and result_count - (6 * count) <= 6:
last_page = True
card_data_list = []
cards = soup.find_all('div', class_='card-item')
for card in cards:
card_tag = soup.find('div', class_='card-tag')
department = card_tag.find('em', class_='chip').text
title = card.find('a', class_='card-title')
card_title = title.text
card_id = title.get('href').split('/')[4].split('?')[0]
card_desc = card.find('p', class_='card-desc').text
card_info_list = card.find_all('li', class_='card-list')
card_info = {
"institution" : department,
"serviceId" : card_id,
"title" : card_title,
"description" : card_desc
}
for info in card_info_list:
try:
strong_text = info.find('strong', class_='card-sub').text
except:
strong_text = None
try:
card_text = info.find('span', class_='card-text').text
except:
card_text = None
if strong_text.split()[0] == "신청기간":
card_info["dueDate"] = card_text
elif strong_text.split()[0] == "접수기관":
card_info["rcvInstitution"] = card_text
elif strong_text.split()[0] == "전화문의":
card_info["phone"] = card_text
elif strong_text.split()[0] == "지원형태":
card_info["format"] = card_text
if len(card_info.keys()) > 6:
card_data_list.append(card_info)
else:
break
if count % 2 == 0:
card_data_list = card_data_list[:6]
else :
card_data_list = card_data_list[6:]
# request = requests.Request('GET', url, params=params)
# prepared_request = request.prepare()
# # 최종 요청되는 URL 확인
# final_url = prepared_request.url
if keyword:
message = f"{keyword}에 대한 {result_count}개의 통합검색 결과입니다."
else:
message = f"선택한 조건에 대한 {result_count}개의 통합검색 결과입니다."
if voice:
for i in range(6):
try:
voice_answer += f"{i+1}번: {card_data_list[i]['title']}\n"
except:
print(i,len(card_data_list))
return {
"answer" : message,
"support" : card_data_list,
"lastpage" : last_page,
"voiceAnswer" : voice_answer
}
async def get_chat(serviceId : str = Query(None,description = "서비스 ID"),
voice : bool = Query(None,description = "시각 장애인 자막 생성 여부")):
cond = serviceId
voice_answer = ""
url = f"http://api.odcloud.kr/api/gov24/v3/serviceDetail?page=1&perPage=10&cond%5B%EC%84%9C%EB%B9%84%EC%8A%A4ID%3A%3AEQ%5D={cond}&serviceKey=aVyQkv5W8mV6fweNFyOmB3fvxjmcuMvbOl4fkTCOVH1kCgOCcSkFa8UKeUBljB3Czd5VwvoIYKkH%2FpWWwVvpKQ%3D%3D"
response = requests.get(url)
res = response.json()
ret = {}
ret["url"] = "https://www.gov.kr/portal/rcvfvrSvc/dtlEx/"+serviceId
for key, value in res['data'][0].items():
askey = key
if key == '구비서류':
askey = "docs"
elif key == '소관기관명':
askey = "institution"
elif key == '서비스ID':
askey = "serviceId"
elif key == "서비스명":
askey = "title"
elif key == "서비스목적":
askey = "description"
elif key == "선정기준":
askey = "selection"
elif key == "문의처":
askey = "rcvInstitution"
elif key == "신청기한":
askey = "dueDate"
elif key == "신청방법":
askey = "way"
elif key == "지원내용":
askey = "content"
elif key == "지원대상":
askey = "target"
elif key == "지원유형":
askey = "format"
else:
askey = key
if key != askey :
ret[askey] = value
if voice:
messages = [
{"role": "user","content": f"{GET_CHAT_PROMPT}"},
{"role": "user","content": f"SERVICE INFORMATION: {ret}"}
]
gpt_response = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0,
)
voice_answer = gpt_response["choices"][0]["message"]["content"]
return {
"voiceAnswer" : voice_answer,
"summary" : ret
}
async def post_chat(data: Annotated[dict,{
"username" : str,
"question" : str,
"history" : list,
"summary" : dict,
"voice" : int
}]):
is_stream = not data["voice"]
result = [{'link':None},{'link':None},{'link':None}]
messages = [
{"role": "system", "content": f"You can use this ***subsidy service information***: {data['summary']}"},
{"role": "user","content": f"""
user query : {data['question']}"""}
]
messages.extend(data['history'])
first_response = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0,
functions=FUNCTIONS
)
if first_response['choices'][0]['finish_reason'] == 'function_call':
full_message = first_response["choices"][0]
if full_message["message"]["function_call"]["name"] == "answer_with_service_info":
# parsed_output = json.loads(full_message["message"]["function_call"]["arguments"])
result[0]['link'] = data['summary']['url']
messages=[
{"role": "system", "content": MAIN_PROMPT},
{"role": "user", "content": CHAT_PROMPT},
{
"role": "user",
"content": f"""
"Please generate a response based on the chat history below, but focus on the last user query when creating the answer."
CHAT_HISTORY:
{data['history']}
"""
},
]
# messages.extend(data['history'])
del data['summary']['url']
messages.append(
{
"role": "user",
"content": f"""Please generate your response by referring specifically to the service information's key-value pairs that directly relate to the user's query.
You will follow the conversation and respond to the queries asked by the 'user's content. You will act as the assistant.
you NEVER include links(e.g. [링크](https://obank.kbstar.com/quics?page=C016613&cc=b061496:b061645&isNew=N&prcode=DP01000935)) in the response.
User query: {data['question']}
service information:\n{data['summary']}\nAnswer:\n""",
}
)
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0,
max_tokens = 1000,
stream=is_stream
)
elif full_message["message"]["function_call"]["name"] == "get_search_info":
parsed_output = json.loads(full_message["message"]["function_call"]["arguments"])
search_query = parsed_output["keyword"]
url = f"https://www.googleapis.com/customsearch/v1?key={Google_API_KEY}&cx={Google_SEARCH_ENGINE_ID}&q={search_query}"
res = requests.get(url).json()
search_result = res.get("items")
cnt = 0
for i in range(len(search_result)):
if cnt == 3:
break
if "snippet" in search_result[i].keys():
search_info = {}
search_info['link'] = search_result[i]['link']
search_info['title'] = search_result[i]['title']
search_info['snippet'] = search_result[i]['snippet']
result[cnt] = search_info
cnt += 1
messages=[
{"role": "system", "content": MAIN_PROMPT},
{"role": "user", "content": CHAT_PROMPT},
{
"role": "user",
"content": f"""
"Please generate a response based on the chat history below, but focus on the last user query when creating the answer."
CHAT_HISTORY:
{data['history']}
"""
}
]
# messages.extend(data['history'])
messages.append(
{
"role": "user",
"content": f"""Please generate your response by referring specifically to google search result's key-value pairs that directly relate to the user's query.
You will follow the conversation and respond to the queries asked by the 'user's content. You will act as the assistant
you don't have to provide links(e.g. [링크](https://obank.kbstar.com/quics?page=C016613&cc=b061496:b061645&isNew=N&prcode=DP01000935)) in the response.
please answer in korean.
User query: {data['question']}
Google search result:\n{result}\nAnswer:\n""",
}
)
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0,
max_tokens=1000,
stream=is_stream
)
else:
raise Exception("Function does not exist and cannot be called")
else:
response = first_response['choices'][0]['message']['content']
def generate_chunks_default():
for chunk in response:
yield chunk
if is_stream:
return StreamingResponse(
content=generate_chunks_default(),
media_type="text/plain"
)
else:
return {
"voiceAnswer" : response,
"links" : [None,None,None]
}
if is_stream:
def generate_chunks():
for chunk in response:
try :
yield chunk["choices"][0]["delta"].content
except :
yield f"ˇ{result[0]['link']}˘{result[1]['link']}˘{result[2]['link']}"
return StreamingResponse(
content=generate_chunks(),
media_type="text/plain"
)
else:
return {
"voiceAnswer" : response['choices'][0]['message']['content'],
"links" : [result[0]['link'],result[1]['link'],result[2]['link']]
}
async def post_voice_chat(file: UploadFile, history: UploadFile):
# 업로드된 MP3 파일을 저장
voice_answer=""
function_name = ""
get_service_params = {}
get_chat_params = {}
post_chat_params = {}
history_json = await history.read()
chat_history = json.loads(history_json)
with open(file.filename, "wb") as f:
f.write(file.file.read())
# 저장한 파일 경로를 사용하여 업로드된 MP3 파일을 transcribe 함수에 전달
with open(file.filename, "rb") as f:
transcript = openai.Audio.transcribe(
file=f,
model=AUDIO_MODEL,
prompt="This conversation is in Korean",
)
os.remove(file.filename)
# messages = [{"role": "system", "content" : "If it's unclear which function to use, you should ask the user for the required function arguments again."},
# {"role": "system", "content" : "The function call should prioritize the user's content with the highest weight, which is the last one."}
# ]
messages = [{"role": "system", "content" : VOICE_FUNCTION_CALL_PROMPT},]
# messages = [{"role": "system", "content" : "you must function call post_api_chat If you determine that it is not the appropriate time to call the 'get_api_service_list' or 'get_api_chat' functions"},]
messages.extend(chat_history)
messages.append({"role": "user","content": transcript["text"]})
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0,
functions=VOICE_FUNCTIONS,
)
if response["choices"][0]['finish_reason'] != 'function_call':
# raise Exception("finish_reason is not function_call")
voice_answer = response["choices"][0]['message']['content']
else:
function_name = response['choices'][0]['message']['function_call']['name']
params = json.loads(response['choices'][0]['message']['function_call']['arguments'])
if function_name == 'get_api_service_list':
get_service_params = params
# try:
# get_service_params['siGunGuArea'] = sub_region_code[get_service_params['sidocode']][get_service_params['siGunGuArea']]
# except:
# get_service_params['siGunGuArea'] = sub_region_code[get_service_params['sidocode']]["전체"]
# get_service_params['chktype1'] = ["생활안정", "주거·자립", "보육·교육", "고용·창업", "보건·의료", "행정·안전", "임신·출산", "보호·돌봄", "문화·환경", "농림축산어업"]
try:
get_service_params['sidocode'] = [get_service_params['sidocode']+" "+get_service_params['siGunGuArea']]
get_service_params['chktype1'] = [get_service_params['chktype1']]
except:
print(get_service_params)
get_service_params['svccd'] = [get_service_params['svccd']]
elif function_name == 'get_number':
get_chat_params = params
elif function_name == 'post_api_chat':
post_chat_params = params
else:
raise Exception("Function does not exist")
return {
"userText": transcript["text"],
"voiceAnswer": voice_answer,
"function": function_name,
"serviceParams": get_service_params,
"getChatParams": get_chat_params,
"postChatParams": post_chat_params,
}
async def get_voice_chat():
return {
"voiceAnswer": "안녕하세요! 저는 지원금 찾기 도우미, 지미입니다. 현재 거주하고 계신 지역과 지원받고 싶은 상황에 대해 아래 버튼을 누르고 말씀해주세요"
}
async def post_opensearch_service_list(data: Annotated[dict,{
"keyword" : str,
"count" : int,
"chktype1" : list,
"sidocode" : list,
"svccd" : list,
"voice" : int
}]):
last_page = False
voice_answer = ""
low_query = {
"query": {
"bool": {
"must": [
{ "terms": { "소관기관명.keyword": data['sidocode'] } }
],
"should": [
{ "match": { "서비스명": data['keyword'] } },
{ "terms": { "사용자구분.keyword": data['svccd'] } },
{ "terms": { "서비스분야.keyword": data['chktype1'] } },#["생활안정","주거·자립"] 이렇게 줘야함
]
}
}
}
query = {
"size" : 6,
"from" : 6*data['count'],
"query": low_query['query']
}
response = client.search(
body = query,
index = 'jimi-index'
)
card_data_list = []
for hit in response['hits']['hits']:
card_info = {}
card_info["institution"] = hit['_source']['소관기관명']
card_info["serviceId"] = hit['_source']['서비스ID']
card_info["title"] = hit['_source']['서비스명']
card_info["description"] = hit['_source']['지원내용']
card_info["dueDate"] = hit['_source']['신청기한']
card_info["rcvInstitution"] = hit['_source']['부서명']
card_info["phone"] = hit['_source']['전화문의']
card_info["format"] = hit['_source']['지원유형']
card_data_list.append(card_info)
if data['keyword']:
message = f"{data['keyword']}에 대한 {response['hits']['total']['value']}개의 통합검색 결과입니다."
else:
message = f"선택한 조건에 대한 {response['hits']['total']['value']}개의 통합검색 결과입니다."
if data['voice']:
for i, card in enumerate(card_data_list):
voice_answer += f"{i+1}번: {card['title']}\n"
# for i in range(6):
# try:
# voice_answer += f"{i+1}번: {card_data_list[i]['title']}\n"
# except:
# print(i,len(card_data_list))
if len(card_data_list) == 0:
voice_answer += "검색 결과가 없습니다! 다른 키워드를 검색해주세요"
else:
voice_answer += "자세히 알고싶은 지원금 있다면, 번호를 말씀해주세요! 없다면, 다음이라고 말해주세요!"
if (data['count']+1)*6 >= response['hits']['total']['value']:
last_page = True
support_array = [card['title'] for card in card_data_list]
return {
"answer" : message,
"support" : card_data_list,
"lastpage" : last_page,
"voiceAnswer" : voice_answer,
"supportArray" : support_array
} | [
"SERVICE INFORMATION: PLACEHOLDER",
"\n \"Please generate a response based on the chat history below, but focus on the last user query when creating the answer.\"\n CHAT_HISTORY:\n PLACEHOLDER\n ",
"You can use this ***subsidy service information***: PLACEHOLDER",
"PLACEHOLDER",
"question",
"\n user query : PLACEHOLDER",
"\n \"Please generate a response based on the chat history below, but focus on the last user query when creating the answer.\"\n CHAT_HISTORY:\n PLACEHOLDER\n "
] |
2024-01-10 | DJBen/modal-examples | 06_gpu_and_ml~langchains~potus_speech_qanda.py | # ---
# args: ["--query", "How many oil barrels were released from reserves"]
# ---
# # Question-answering with LangChain
#
# In this example we create a large-language-model (LLM) powered question answering
# web endpoint and CLI. Only a single document is used as the knowledge-base of the application,
# the 2022 USA State of the Union address by President Joe Biden. However, this same application structure
# could be extended to do question-answering over all State of the Union speeches, or other large text corpuses.
#
# It's the [LangChain](https://github.com/hwchase17/langchain) library that makes this all so easy. This demo is only around 100 lines of code!
# ## Defining dependencies
#
# The example uses three PyPi packages to make scraping easy, and three to build and run the question-answering functionality.
# These are installed into a Debian Slim base image using the `pip_install` function.
#
# Because OpenAI's API is used, we also specify the `openai-secret` Modal Secret, which contains an OpenAI API key.
#
# A `docsearch` global variable is also declared to facilitate caching a slow operation in the code below.
import itertools
from pathlib import Path
import modal
image = modal.Image.debian_slim().pip_install(
# scraping pkgs
"beautifulsoup4~=4.11.1",
"httpx~=0.23.3",
"lxml~=4.9.2",
# langchain pkgs
"faiss-cpu~=1.7.3",
"langchain~=0.0.7",
"openai~=0.26.3",
"tenacity~=8.2.1",
)
stub = modal.Stub(
name="example-langchain-qanda",
image=image,
secrets=[modal.Secret.from_name("openai-secret")],
)
docsearch = None # embedding index that's relatively expensive to compute, so caching with global var.
# ## Scraping the speech from whitehouse.gov
#
# It's super easy to scrape the transcipt of Biden's speech using `httpx` and `BeautifulSoup`.
# This speech is just one document and it's relatively short, but it's enough to demonstrate
# the question-answering capability of the LLM chain.
def scrape_state_of_the_union() -> str:
import httpx
from bs4 import BeautifulSoup
url = "https://www.whitehouse.gov/state-of-the-union-2022/"
# fetch article; simulate desktop browser
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9"
}
response = httpx.get(url, headers=headers)
soup = BeautifulSoup(response.text, "lxml")
# get all text paragraphs & construct string of article text
speech_text = ""
speech_section = soup.find_all(
"div", {"class": "sotu-annotations__content"}
)
if speech_section:
paragraph_tags = speech_section[0].find_all("p")
speech_text = "".join([p.get_text() for p in paragraph_tags])
return speech_text.replace("\t", "")
# ## Constructing the Q&A chain
#
# At a high-level, this LLM chain will be able to answer questions asked about Biden's speech and provide
# references to which parts of the speech contain the evidence for given answers.
#
# The chain combines a text-embedding index over parts of Biden's speech with OpenAI's [GPT-3 LLM](https://openai.com/blog/chatgpt/).
# The index is used to select the most likely relevant parts of the speech given the question, and these
# are used to build a specialized prompt for the OpenAI language model.
#
# For more information on this, see [LangChain's "Question Answering" notebook](https://langchain.readthedocs.io/en/latest/use_cases/evaluation/question_answering.html).
def retrieve_sources(sources_refs: str, texts: list[str]) -> list[str]:
"""
Map back from the references given by the LLM's output to the original text parts.
"""
clean_indices = [
r.replace("-pl", "").strip() for r in sources_refs.split(",")
]
numeric_indices = (int(r) if r.isnumeric() else None for r in clean_indices)
return [
texts[i] if i is not None else "INVALID SOURCE" for i in numeric_indices
]
def create_retrying_openai_embedder():
"""
New OpenAI accounts have a very low rate-limit for their first 48 hrs.
It's too low to embed even just this single Biden speech.
As a workaround this wrapper handles rate-limit errors and slows embedding requests.
Ref: https://platform.openai.com/docs/guides/rate-limits/overview.
"""
from langchain.embeddings.openai import OpenAIEmbeddings
from tenacity import retry, wait_exponential
def batched(iterable, n):
if n < 1:
raise ValueError("n must be at least one")
it = iter(iterable)
batch = list(itertools.islice(it, n))
while batch:
yield batch
batch = list(itertools.islice(it, n))
class RetryingEmbedder(OpenAIEmbeddings):
def embed_documents(self, texts: list[str]) -> list[list[float]]:
retrying_fn = retry(
wait=wait_exponential(multiplier=1, min=4, max=10)
)(super().embed_documents)
all_embeddings = []
for i, batch in enumerate(batched(texts, n=5)):
print(f"embedding documents batch {i}...")
all_embeddings.extend(retrying_fn(batch))
return all_embeddings
return RetryingEmbedder()
def qanda_langchain(query: str) -> tuple[str, list[str]]:
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
# Support caching speech text on disk.
speech_file_path = Path("state-of-the-union.txt")
if speech_file_path.exists():
state_of_the_union = speech_file_path.read_text()
else:
print("scraping the 2022 State of the Union speech")
state_of_the_union = scrape_state_of_the_union()
speech_file_path.write_text(state_of_the_union)
# We cannot send the entire speech to the model because OpenAI's model
# has a maximum limit on input tokens. So we split up the speech
# into smaller chunks.
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
print("splitting speech into text chunks")
texts = text_splitter.split_text(state_of_the_union)
# Embedding-based query<->text similarity comparison is used to select
# a small subset of the speech text chunks.
# Generating the `docsearch` index is too slow to re-run on every request,
# so we do rudimentary caching using a global variable.
global docsearch
if not docsearch:
print("generating docsearch indexer")
embeddings = create_retrying_openai_embedder()
docsearch = FAISS.from_texts(
texts,
embeddings,
metadatas=[{"source": i} for i in range(len(texts))],
)
print("selecting text parts by similarity to query")
docs = docsearch.similarity_search(query)
chain = load_qa_with_sources_chain(
OpenAI(temperature=0), chain_type="stuff"
)
print("running query against Q&A chain.\n")
result = chain(
{"input_documents": docs, "question": query}, return_only_outputs=True
)
output: str = result["output_text"]
parts = output.split("SOURCES: ")
if len(parts) == 2:
answer, sources_refs = parts
sources = retrieve_sources(sources_refs, texts)
elif len(parts) == 1:
answer = parts[0]
sources = []
else:
raise RuntimeError(
f"Expected to receive an answer with a single 'SOURCES' block, got:\n{output}"
)
return answer.strip(), sources
# ## Modal Functions
#
# With our application's functionality implemented we can hook it into Modal.
# As said above, we're implementing a web endpoint, `web`, and a CLI command, `cli`.
@stub.function()
@stub.web_endpoint(method="GET")
def web(query: str, show_sources: bool = False):
answer, sources = qanda_langchain(query)
if show_sources:
return {
"answer": answer,
"sources": sources,
}
else:
return {
"answer": answer,
}
@stub.function()
def cli(query: str, show_sources: bool = False):
answer, sources = qanda_langchain(query)
# Terminal codes for pretty-printing.
bold, end = "\033[1m", "\033[0m"
print(f"🦜 {bold}ANSWER:{end}")
print(answer)
if show_sources:
print(f"🔗 {bold}SOURCES:{end}")
for text in sources:
print(text)
print("----")
# ## Test run the CLI
#
# ```bash
# modal run potus_speech_qanda.py --query "What did the president say about Justice Breyer"
# 🦜 ANSWER:
# The president thanked Justice Breyer for his service and mentioned his legacy of excellence. He also nominated Ketanji Brown Jackson to continue in Justice Breyer's legacy.
# ```
#
# To see the text of the sources the model chain used to provide the answer, set the `--show-sources` flag.
#
# ```bash
# modal run potus_speech_qanda.py \
# --query "How many oil barrels were released from reserves" \
# --show-sources=True
# ```
#
# ## Test run the web endpoint
#
# Modal makes it trivially easy to ship LangChain chains to the web. We can test drive this app's web endpoint
# by running `modal serve potus_speech_qanda.py` and then hitting the endpoint with `curl`:
#
# ```bash
# curl --get \
# --data-urlencode "query=What did the president say about Justice Breyer" \
# https://modal-labs--example-langchain-qanda-web.modal.run
# ```
#
# ```json
# {
# "answer": "The president thanked Justice Breyer for his service and mentioned his legacy of excellence. He also nominated Ketanji Brown Jackson to continue in Justice Breyer's legacy."
# }
# ```
| [] |
2024-01-10 | mertbozkir/PresentX | mypyscript.py | import os
import openai
import streamlit as st
import time
api_key = 'api_key'
openai.api_key = api_key
st.title('GPT-3 Hackathon')
st.text('Instructions : \n User: "description of output"\n Code:')
text_input = st.text_area(label='description',height=100)
prompt_key = r"\nUser: Make a detailed and informative presentation on Glioblastoma. First page should be title page, with title Malignant Brain Tumor.\nCode: \documentclass{beamer}\mode<presentation> {\usetheme{Madrid}\usecolortheme{wolverine}}\title[Short title]{Malignant Brain Tumor}\begin{document}\begin{frame}\titlepage\end{frame}\begin{frame}{Glioblastoma} \begin{block}{Introduction} \begin{itemize} \item Glioblastoma (GBM) is a type of cancerous brain tumor that originates from the glial cells, which are the cells that surround and support neurons. \item The prognosis for GBM is poor with a mean survival time of 12–15 months from diagnosis. \end{itemize} \end{block} \begin{block}{Causes} \begin{itemize} \item The exact cause of GBM is unknown, but it is thought to be caused by a combination of genetic and environmental factors.\item Glioblastoma is known to occur with exposure to ionizing radiation, benzene, aniline, certain chemicals, and asbestos.\end{itemize} \end{block} \begin{block}{Symptoms} \begin{itemize} \item The symptoms of GBM are related to both the location and the size of the tumor. \item The symptoms are described as a spectrum ranging from completely asymptomatic to very serious.\end{itemize} \end{block} \end{frame}\end{document}\n\nUser: I need a slide consisting of table on Epoch and Accuracy\nCode:\documentclass{beamer}\mode<presentation> { \usetheme{Madrid} }\begin{document} \begin{frame} \frametitle{Epochs} \begin{table} \begin{tabular}{l l l} \toprule \textbf{Epoch} & \textbf{Accuracy} \\ \midrule Epoch 10 & 53.2\% \\ Epoch 20 & 60\% \\ Epoch 30 & 68.8\% \\ \bottomrule \end{tabular} \caption{Epochs and Accuracy} \end{table} \end{frame} \end{document}\n\nUser: Make a presentation with one slide that shows Einstein's equation relating mass to energy.\nCode: \documentclass{beamer}\mode<presentation> {\usetheme{Madrid}\usecolortheme{wolverine}}}\begin{document}\begin{frame}\frametitle{Einstein's equation for Special Relativity}\begin{theorem}[Mass--energy equivalence]$E = mc^2$\end{theorem}\end{frame}\end{document}\n\nUser: Give me a slide on Euler Equations\nCode: \documentclass{beamer} \mode<presentation> { \usetheme{Madrid}\n} \begin{document} \begin{frame} \frametitle{Euler Equations} {Euler Equations} \begin{equation}e^{i\pi}+1=0\end{equation} \begin{equation}i\,\!=\,\sqrt{-1} \end{equation} \begin{equation}e^{\pi i}+1=0\end{equation} \begin{equation}i^{(1)}=\sqrt{-1} \end{equation} \begin{equation}e^{i\pi}=-1\end{equation} \begin{equation}i^{(2)}=-1\end{equation} \begin{equation}e^{-i\pi}=-1\end{equation} \begin{equation}i^{(3)}=-1\end{equation} \end{frame} \end{document}\n\nUser: Create a presentation with the title Machine Learning & AI. First page should be title page. Second page should be an overview slide, third page should have examples of application of modern Machine Learning. \nCode: \documentclass{beamer}\mode<presentation> {\usetheme{Madrid}\usecolortheme{wolverine}}\title[Short title]{Machine Learning & AI}\begin{document}\begin{frame}\titlepage \end{frame}\begin{frame}\frametitle{Overview}\tableofcontents\end{frame}\section{Sub disciplines within Machine Learning} \subsection{Computer Vision, Natural Language Processing, Reinforcement Learning}\begin{frame}\frametitle{Modern Machine Learning Applications}\begin{block}{Reinforcement Learning}{Autonomous vehicles and self-driving cars}\end{block}\begin{block}{Computer Vision}{Automatic face recognition}\end{block}\begin{block}{Natural Language Processing}Using language models to auto generate programming code from natural instructions\end{block}\end{frame}\end{document}\n\n:"
if len(text_input)> 0:
response = openai.Completion.create(
engine="davinci",
prompt=prompt_key+text_input,
temperature=0.7,
max_tokens=736,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n", "User:", "Code:"])
time.sleep(10)
st.text('Super convenient code: ')
# st.write(response)
st.write(response['choices'][0]['text'])
else:
print('NoInputYet')
| [
"\\nUser: Make a detailed and informative presentation on Glioblastoma. First page should be title page, with title Malignant Brain Tumor.\\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}\\title[Short title]{Malignant Brain Tumor}\\begin{document}\\begin{frame}\\titlepage\\end{frame}\\begin{frame}{Glioblastoma} \\begin{block}{Introduction} \\begin{itemize} \\item Glioblastoma (GBM) is a type of cancerous brain tumor that originates from the glial cells, which are the cells that surround and support neurons. \\item The prognosis for GBM is poor with a mean survival time of 12–15 months from diagnosis. \\end{itemize} \\end{block} \\begin{block}{Causes} \\begin{itemize} \\item The exact cause of GBM is unknown, but it is thought to be caused by a combination of genetic and environmental factors.\\item Glioblastoma is known to occur with exposure to ionizing radiation, benzene, aniline, certain chemicals, and asbestos.\\end{itemize} \\end{block} \\begin{block}{Symptoms} \\begin{itemize} \\item The symptoms of GBM are related to both the location and the size of the tumor. \\item The symptoms are described as a spectrum ranging from completely asymptomatic to very serious.\\end{itemize} \\end{block} \\end{frame}\\end{document}\\n\\nUser: I need a slide consisting of table on Epoch and Accuracy\\nCode:\\documentclass{beamer}\\mode<presentation> { \\usetheme{Madrid} }\\begin{document} \\begin{frame} \\frametitle{Epochs} \\begin{table} \\begin{tabular}{l l l} \\toprule \\textbf{Epoch} & \\textbf{Accuracy} \\\\ \\midrule Epoch 10 & 53.2\\% \\\\ Epoch 20 & 60\\% \\\\ Epoch 30 & 68.8\\% \\\\ \\bottomrule \\end{tabular} \\caption{Epochs and Accuracy} \\end{table} \\end{frame} \\end{document}\\n\\nUser: Make a presentation with one slide that shows Einstein's equation relating mass to energy.\\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}}\\begin{document}\\begin{frame}\\frametitle{Einstein's equation for Special Relativity}\\begin{theorem}[Mass--energy equivalence]$E = mc^2$\\end{theorem}\\end{frame}\\end{document}\\n\\nUser: Give me a slide on Euler Equations\\nCode: \\documentclass{beamer} \\mode<presentation> { \\usetheme{Madrid}\\n} \\begin{document} \\begin{frame} \\frametitle{Euler Equations} {Euler Equations} \\begin{equation}e^{i\\pi}+1=0\\end{equation} \\begin{equation}i\\,\\!=\\,\\sqrt{-1} \\end{equation} \\begin{equation}e^{\\pi i}+1=0\\end{equation} \\begin{equation}i^{(1)}=\\sqrt{-1} \\end{equation} \\begin{equation}e^{i\\pi}=-1\\end{equation} \\begin{equation}i^{(2)}=-1\\end{equation} \\begin{equation}e^{-i\\pi}=-1\\end{equation} \\begin{equation}i^{(3)}=-1\\end{equation} \\end{frame} \\end{document}\\n\\nUser: Create a presentation with the title Machine Learning & AI. First page should be title page. Second page should be an overview slide, third page should have examples of application of modern Machine Learning. \\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}\\title[Short title]{Machine Learning & AI}\\begin{document}\\begin{frame}\\titlepage \\end{frame}\\begin{frame}\\frametitle{Overview}\\tableofcontents\\end{frame}\\section{Sub disciplines within Machine Learning} \\subsection{Computer Vision, Natural Language Processing, Reinforcement Learning}\\begin{frame}\\frametitle{Modern Machine Learning Applications}\\begin{block}{Reinforcement Learning}{Autonomous vehicles and self-driving cars}\\end{block}\\begin{block}{Computer Vision}{Automatic face recognition}\\end{block}\\begin{block}{Natural Language Processing}Using language models to auto generate programming code from natural instructions\\end{block}\\end{frame}\\end{document}\\n\\n:",
"\\nUser: Make a detailed and informative presentation on Glioblastoma. First page should be title page, with title Malignant Brain Tumor.\\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}\\title[Short title]{Malignant Brain Tumor}\\begin{document}\\begin{frame}\\titlepage\\end{frame}\\begin{frame}{Glioblastoma} \\begin{block}{Introduction} \\begin{itemize} \\item Glioblastoma (GBM) is a type of cancerous brain tumor that originates from the glial cells, which are the cells that surround and support neurons. \\item The prognosis for GBM is poor with a mean survival time of 12–15 months from diagnosis. \\end{itemize} \\end{block} \\begin{block}{Causes} \\begin{itemize} \\item The exact cause of GBM is unknown, but it is thought to be caused by a combination of genetic and environmental factors.\\item Glioblastoma is known to occur with exposure to ionizing radiation, benzene, aniline, certain chemicals, and asbestos.\\end{itemize} \\end{block} \\begin{block}{Symptoms} \\begin{itemize} \\item The symptoms of GBM are related to both the location and the size of the tumor. \\item The symptoms are described as a spectrum ranging from completely asymptomatic to very serious.\\end{itemize} \\end{block} \\end{frame}\\end{document}\\n\\nUser: I need a slide consisting of table on Epoch and Accuracy\\nCode:\\documentclass{beamer}\\mode<presentation> { \\usetheme{Madrid} }\\begin{document} \\begin{frame} \\frametitle{Epochs} \\begin{table} \\begin{tabular}{l l l} \\toprule \\textbf{Epoch} & \\textbf{Accuracy} \\\\ \\midrule Epoch 10 & 53.2\\% \\\\ Epoch 20 & 60\\% \\\\ Epoch 30 & 68.8\\% \\\\ \\bottomrule \\end{tabular} \\caption{Epochs and Accuracy} \\end{table} \\end{frame} \\end{document}\\n\\nUser: Make a presentation with one slide that shows Einstein's equation relating mass to energy.\\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}}\\begin{document}\\begin{frame}\\frametitle{Einstein's equation for Special Relativity}\\begin{theorem}[Mass--energy equivalence]$E = mc^2$\\end{theorem}\\end{frame}\\end{document}\\n\\nUser: Give me a slide on Euler Equations\\nCode: \\documentclass{beamer} \\mode<presentation> { \\usetheme{Madrid}\\n} \\begin{document} \\begin{frame} \\frametitle{Euler Equations} {Euler Equations} \\begin{equation}e^{i\\pi}+1=0\\end{equation} \\begin{equation}i\\,\\!=\\,\\sqrt{-1} \\end{equation} \\begin{equation}e^{\\pi i}+1=0\\end{equation} \\begin{equation}i^{(1)}=\\sqrt{-1} \\end{equation} \\begin{equation}e^{i\\pi}=-1\\end{equation} \\begin{equation}i^{(2)}=-1\\end{equation} \\begin{equation}e^{-i\\pi}=-1\\end{equation} \\begin{equation}i^{(3)}=-1\\end{equation} \\end{frame} \\end{document}\\n\\nUser: Create a presentation with the title Machine Learning & AI. First page should be title page. Second page should be an overview slide, third page should have examples of application of modern Machine Learning. \\nCode: \\documentclass{beamer}\\mode<presentation> {\\usetheme{Madrid}\\usecolortheme{wolverine}}\\title[Short title]{Machine Learning & AI}\\begin{document}\\begin{frame}\\titlepage \\end{frame}\\begin{frame}\\frametitle{Overview}\\tableofcontents\\end{frame}\\section{Sub disciplines within Machine Learning} \\subsection{Computer Vision, Natural Language Processing, Reinforcement Learning}\\begin{frame}\\frametitle{Modern Machine Learning Applications}\\begin{block}{Reinforcement Learning}{Autonomous vehicles and self-driving cars}\\end{block}\\begin{block}{Computer Vision}{Automatic face recognition}\\end{block}\\begin{block}{Natural Language Processing}Using language models to auto generate programming code from natural instructions\\end{block}\\end{frame}\\end{document}\\n\\n:PLACEHOLDER"
] |
2024-01-10 | CCityCapital/SearchEngine | corpus_query~services~ingestion~chunk.py | import argparse
from enum import Enum
import logging
from typing import Generator, Optional
from langchain.text_splitter import (
Language,
MarkdownHeaderTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.docstore.document import Document
from pydantic import BaseModel
def chunk_file_by_line(corpus_string: str) -> Generator[str, None, None]:
"""
Chunk a corpus string into smaller strings.
"""
prev_line = None
for line in corpus_string.split("\n"):
stripped_line = line.strip()
if len(stripped_line) == 0:
continue
if prev_line is not None:
yield " ".join([prev_line, stripped_line])
prev_line = stripped_line
class FileTypes(Enum):
MARKDOWN = "md"
TEXT = "txt"
HTML = "html"
PDF = "pdf"
class ChunkingOptions(BaseModel):
chunk_size: int
chunk_overlap: int
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
def chunk_by_file_type(
corpus_text: str, file_type: FileTypes, options: Optional[ChunkingOptions] = None
) -> list[Document]:
logging.info("chunking value: '%s'", corpus_text)
if options is None:
options = ChunkingOptions(chunk_size=100, chunk_overlap=20)
if file_type == FileTypes.MARKDOWN:
txt_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on
)
markdown_docs = txt_splitter.split_text(corpus_text)
txt_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.MARKDOWN,
chunk_size=options.chunk_size,
chunk_overlap=options.chunk_overlap,
)
return txt_splitter.split_documents(markdown_docs)
elif file_type == FileTypes.HTML:
html_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.HTML,
chunk_size=options.chunk_size,
chunk_overlap=options.chunk_overlap,
)
return [Document(s) for s in html_splitter.split_text(corpus_text)]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=options.chunk_size,
chunk_overlap=options.chunk_overlap,
)
return [Document(page_content=s) for s in text_splitter.split_text(corpus_text)]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("corpus_file", type=str, help="Path to corpus file.")
args = parser.parse_args()
with open(args.corpus_file, "r", encoding="utf-8") as f:
for d in chunk_by_file_type(f.read(), FileTypes.MARKDOWN):
print(d)
| [] |
2024-01-10 | morpheuslord/GPT_Vuln-analyzer | components~models.py | import json
import re
from typing import Any
from typing import Optional
import openai
import requests
model_engine = "gpt-3.5-turbo-0613"
class DNS_AI_MODEL():
@staticmethod
def BardAI(key: str, data: Any) -> str:
prompt = f"""
Perform a thorough DNS analysis based on the provided DNS scan information. The resulting output must conform to a JSON format designed for integration into a penetration testing (pentest) report.
The objective is to provide accurate and essential information from the perspective of a pentester, ensuring the final output is minimal and concise.
DNS Data to be Analyzed
Analysis Guidelines:
Pentester's Viewpoint: Approach the DNS analysis from a pentester's perspective, focusing on security implications and potential vulnerabilities.
Output Format Compliance: Ensure that the final output strictly adheres to the specified JSON format. Each section (DNS Records, Reverse DNS, Zone Transfer Scan) must be appropriately populated.
Conciseness: Keep the final output minimal. Include only crucial information relevant to a pentest report.
Insights to Include:
DNS Records: Provide information on A, AAAA, NS, MX, PTR, SOA, and TXT records.
Reverse DNS: Include details about the reverse DNS lookup, showcasing the relationship between IP addresses and corresponding domains.
Zone Transfer Scan: Indicate whether zone transfers are allowed and, if so, list the name servers associated.
The output format:
{{
"DNS_Records": {{
"A": [""],
"AAAA": [""],
"NS": [""],
"MX": [""],
"PTR": [""],
"SOA": [""],
"TXT": [""]
}},
"Reverse_DNS": {{
"IP_Address": "",
"Domain": ""
}},
"Zone_Transfer_Scan": {{
"Allowed": false,
"Name_Servers": [""]
}}
}}
DNS Data to be analyzed: {data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
data = dns_ai_data_regex(str(generated_text))
print(data)
return dns_ai_data_regex(str(generated_text))
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def llama_AI(self, data: str, mode: str, lkey, lendpoint):
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Do a DNS scan analysis on the provided DNS scan information. The DNS output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The DNS scans must be done from a pentester point of view
2) The final output must be minimal according to the format given
3) The final output must be kept to a minimal
4) So the analysis and provide your view according to the given format
5) Remember to provide views as a security engineer or an security analyst.
The output format:
"A":
- List the A records and security views on them
"AAA":
- List the AAA records and security views on them
"NS":
- List the NS records and security views on them
"MX":
- List the MX records and security views on them
"PTR":
- List the PTR records and security views on them
"SOA":
- List the SOA records and security views on them
"TXT":
- List the TXT records and security views on them
"""
user_message = f"""
DNS Data to be analyzed: {data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> NMAP Data to be analyzed: {user_message} [/INST]"
bot_response = self.llama_runpod_api(prompt, lkey, lendpoint)
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
print("test")
if bot_response:
return bot_response
@staticmethod
def gpt_ai(analyze: str, key: Optional[str]) -> str:
openai.api_key = key
prompt = f"""
Perform a thorough DNS analysis based on the provided DNS scan information. The resulting output must conform to a JSON format designed for integration into a penetration testing (pentest) report.
The objective is to provide accurate and essential information from the perspective of a pentester, ensuring the final output is minimal and concise.
DNS Data to be Analyzed:
Analysis Guidelines:
Pentester's Viewpoint: Approach the DNS analysis from a pentester's perspective, focusing on security implications and potential vulnerabilities.
Output Format Compliance: Ensure that the final output strictly adheres to the specified JSON format. Each section (DNS Records, Reverse DNS, Zone Transfer Scan) must be appropriately populated.
Conciseness: Keep the final output minimal. Include only crucial information relevant to a pentest report.
Insights to Include:
DNS Records: Provide information on A, AAAA, NS, MX, PTR, SOA, and TXT records.
Reverse DNS: Include details about the reverse DNS lookup, showcasing the relationship between IP addresses and corresponding domains.
Zone Transfer Scan: Indicate whether zone transfers are allowed and, if so, list the name servers associated.
The output format:
{{
"DNS_Records": {{
"A": [""],
"AAAA": [""],
"NS": [""],
"MX": [""],
"PTR": [""],
"SOA": [""],
"TXT": [""]
}},
"Reverse_DNS": {{
"IP_Address": "",
"Domain": ""
}},
"Zone_Transfer_Scan": {{
"Allowed": false,
"Name_Servers": [""]
}}
}}
DNS Data to be analyzed: {analyze}
"""
try:
# A structure for the request
messages = [{"content": prompt, "role": "user"}]
# A structure for the request
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=1024,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
rsp = str(response)
return rsp
except KeyboardInterrupt:
print("Bye")
quit()
class NMAP_AI_MODEL():
@staticmethod
def BardAI(key: str, data: Any) -> str:
prompt = f"""
Do a NMAP scan analysis on the provided NMAP scan information
The NMAP output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
The output format:
{{
"critical score": [""],
"os information": [""],
"open ports": [""],
"open services": [""],
"vulnerable service": [""],
"found cve": [""]
}}
NMAP Data to be analyzed: {data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
return nmap_ai_data_regex(str(generated_text))
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def Llama_AI(data: str, mode: str, lkey: str, lendpoint: str) -> Any:
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Do a NMAP scan analysis on the provided NMAP scan information. The NMAP output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
7) mention all the data you found in the output format provided so that regex can be used on it.
8) avoid unnecessary explaination.
9) the critical score must be calculated based on the CVE if present or by the nature of the services open
10) the os information must contain the OS used my the target.
11) the open ports must include all the open ports listed in the data[tcp] and varifying if it by checking its states value. you should not negect even one open port.
12) the vulnerable services can be determined via speculation of the service nature or by analyzing the CVE's found.
The output format:
critical score:
- Give info on the criticality
"os information":
- List out the OS information
"open ports and services":
- List open ports
- List open ports services
"vulnerable service":
- Based on CVEs or nature of the ports opened list the vulnerable services
"found cve":
- List the CVE's found and list the main issues.
"""
user_message = f"""
NMAP Data to be analyzed: {data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> NMAP Data to be analyzed: {user_message} [/INST]"
bot_response = llama_runpod_api(prompt, lkey, lendpoint)
if bot_response:
return bot_response
@staticmethod
def GPT_AI(key: str, data: Any) -> str:
openai.api_key = key
try:
prompt = f"""
Do a NMAP scan analysis on the provided NMAP scan information
The NMAP output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
The output format:
{{
"critical score": [""],
"os information": [""],
"open ports": [""],
"open services": [""],
"vulnerable service": [""],
"found cve": [""]
}}
NMAP Data to be analyzed: {data}
"""
# A structure for the request
messages = [{"content": prompt, "role": "assistant"}]
# A structure for the request
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=2500,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
rsp = str(response)
return str(nmap_ai_data_regex(rsp))
except KeyboardInterrupt:
print("Bye")
quit()
class JWT_AI_MODEL():
@staticmethod
def BardAI(key: str, jwt_data: Any) -> str:
prompt = f"""
Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities
5) For the output "Algorithm Used" value use the Algorithm value from the JWT data.
6) For the output "Header" value use the Header value from the JWT data.
7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints.
8) For "Signature" mention the signatures discovered.
9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints"
The output format:
{{
"Algorithm Used": "",
"Header": "",
"Payload": "",
"Signature": "",
"PossibleAttacks": "",
"VulnerableEndpoints": ""
}}
JWT Token Data to be analyzed: {jwt_data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
jwt_analysis_data = jwt_ai_data_regex(str(generated_text))
print(jwt_analysis_data)
return jwt_analysis_data
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def llama_AI(self, jwt_data: str, mode: str, lkey, lendpoint):
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Perform a comprehensive analysis on the provided JWT token. The JWT analysis output must be in a asked format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks
The output format:
"Header":
- List the JWT header details and security views on them
"Payload":
- List the JWT payload details and security views on them
"Signature":
- Provide insights on the JWT signature
"PossibleAttacks":
- List possible JWT exploits and attacks
"""
user_message = f"""
JWT Token Data to be analyzed: {jwt_data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> JWT Token Data to be analyzed: {user_message} [/INST]"
bot_response = self.llama_runpod_api(prompt, lkey, lendpoint)
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
print("test")
if bot_response:
return bot_response
@staticmethod
def gpt_ai(analyze: str, api_key: Optional[str]) -> str:
openai.api_key = api_key
prompt = f"""
Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities
5) For the output "Algorithm Used" value use the Algorithm value from the JWT data.
6) For the output "Header" value use the Header value from the JWT data.
7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints.
8) For "Signature" mention the signatures discovered.
9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints"
The output format:
{{
"Algorithm Used": "",
"Header": "",
"Payload": "",
"Signature": "",
"PossibleAttacks": "",
"VulnerableEndpoints": ""
}}
JWT Token Data to be analyzed: {analyze}
"""
try:
messages = [{"content": prompt, "role": "user"}]
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=1024,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
rsp = str(response)
return rsp
except KeyboardInterrupt:
print("Bye")
quit()
def chat_with_api(api_url: str, user_message: str, user_instruction: str, model_name: str, file_name: str = None) -> Any:
# Prepare the request data in JSON format
data = {
'user_message': user_message,
'model_name': model_name,
'file_name': file_name,
'user_instruction': user_instruction
}
# Send the POST request to the API
response = requests.post(api_url, json=data)
# Check if the request was successful (status code 200)
if response.status_code == 200:
return response.json()['bot_response']
else:
# If there was an error, print the error message
print(f"Error: {response.status_code} - {response.text}")
return None
def llama_runpod_api(prompt: str, lkey: str, lendpoint: str) -> Any:
url = f"https://api.runpod.ai/v2/{lendpoint}/runsync"
payload = json.dumps({
"input": {
"prompt": prompt,
"max_new_tokens": 4500,
"temperature": 0.9,
"top_k": 50,
"top_p": 0.7,
"repetition_penalty": 1.2,
"batch_size": 8,
"stop": [
"</s>"
]
}
})
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {lkey}',
}
response = requests.request("POST", url, headers=headers, data=payload)
response_t = json.loads(response.text)
return response_t["output"]
def dns_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
A_pattern = r'"A": \["(.*?)"\]'
AAA_pattern = r'"AAAA": \["(.*?)"\]'
NS_pattern = r'"NS": \["(.*?)"\]'
MX_pattern = r'"MX": \["(.*?)"\]'
PTR_pattern = r'"PTR": \["(.*?)"\]'
SOA_pattern = r'"SOA": \["(.*?)"\]'
TXT_pattern = r'"TXT": \["(.*?)"\]'
Reverse_DNS_pattern = r'"Reverse_DNS": \{ "IP_Address": "(.*?)", "Domain": "(.*?)" \}'
Zone_Transfer_Scan_pattern = r'"Zone_Transfer_Scan": \{ "Allowed": (.*?), "Name_Servers": \["(.*?)"\] \}'
# Initialize variables for extracted data
A = None
AAA = None
NS = None
MX = None
PTR = None
SOA = None
TXT = None
Reverse_DNS_IP = None
Reverse_DNS_Domain = None
Zone_Transfer_Allowed = None
Zone_Transfer_Name_Servers = None
# Extract individual values using patterns
match = re.search(A_pattern, json_string)
if match:
A = match.group(1)
match = re.search(AAA_pattern, json_string)
if match:
AAA = match.group(1)
match = re.search(NS_pattern, json_string)
if match:
NS = match.group(1)
match = re.search(MX_pattern, json_string)
if match:
MX = match.group(1)
match = re.search(PTR_pattern, json_string)
if match:
PTR = match.group(1)
match = re.search(SOA_pattern, json_string)
if match:
SOA = match.group(1)
match = re.search(TXT_pattern, json_string)
if match:
TXT = match.group(1)
match = re.search(Reverse_DNS_pattern, json_string)
if match:
Reverse_DNS_IP = match.group(1)
Reverse_DNS_Domain = match.group(2)
match = re.search(Zone_Transfer_Scan_pattern, json_string)
if match:
Zone_Transfer_Allowed = bool(match.group(1))
Zone_Transfer_Name_Servers = match.group(2)
# Create a dictionary to store the extracted data
data = {
"DNS_Records": {
"A": A,
"AAAA": AAA,
"NS": NS,
"MX": MX,
"PTR": PTR,
"SOA": SOA,
"TXT": TXT
},
"Reverse_DNS": {
"IP_Address": Reverse_DNS_IP,
"Domain": Reverse_DNS_Domain
},
"Zone_Transfer_Scan": {
"Allowed": Zone_Transfer_Allowed,
"Name_Servers": [Zone_Transfer_Name_Servers] if Zone_Transfer_Name_Servers else []
}
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
def nmap_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
critical_score_pattern = r'"critical score": \["(.*?)"\]'
os_information_pattern = r'"os information": \["(.*?)"\]'
open_ports_pattern = r'"open ports": \["(.*?)"\]'
open_services_pattern = r'"open services": \["(.*?)"\]'
vulnerable_service_pattern = r'"vulnerable service": \["(.*?)"\]'
found_cve_pattern = r'"found cve": \["(.*?)"\]'
# Initialize variables for extracted data
critical_score = None
os_information = None
open_ports = None
open_services = None
vulnerable_service = None
found_cve = None
# Extract individual values using patterns
match = re.search(critical_score_pattern, json_string)
if match:
critical_score = match.group(1)
match = re.search(os_information_pattern, json_string)
if match:
os_information = match.group(1)
match = re.search(open_ports_pattern, json_string)
if match:
open_ports = match.group(1)
match = re.search(open_services_pattern, json_string)
if match:
open_services = match.group(1)
match = re.search(vulnerable_service_pattern, json_string)
if match:
vulnerable_service = match.group(1)
match = re.search(found_cve_pattern, json_string)
if match:
found_cve = match.group(1)
# Create a dictionary to store the extracted data
data = {
"critical score": critical_score,
"os information": os_information,
"open ports": open_ports,
"open services": open_services,
"vulnerable service": vulnerable_service,
"found cve": found_cve
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
def jwt_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
header_pattern = r'"Header": \{\s*"alg": "(.*?)",\s*"typ": "(.*?)"\s*\}'
payload_pattern = r'"Payload": \{\s*"iss": "(.*?)",\s*"sub": "(.*?)",\s*"aud": "(.*?)",\s*"exp": "(.*?)",\s*"nbf": "(.*?)",\s*"iat": "(.*?)"\s*\}'
signature_pattern = r'"Signature": "(.*?)"'
possible_attacks_pattern = r'"PossibleAttacks": "(.*?)"'
vulnerable_endpoints_pattern = r'"VulnerableEndpoints": "(.*?)"'
# Initialize variables for extracted data
header = {}
payload = {}
signature = ""
possible_attacks = ""
vulnerable_endpoints = ""
# Extract individual values using patterns
match_header = re.search(header_pattern, json_string)
if match_header:
header = {"alg": match_header.group(1), "typ": match_header.group(2)}
match_payload = re.search(payload_pattern, json_string)
if match_payload:
payload = {
"iss": match_payload.group(1),
"sub": match_payload.group(2),
"aud": match_payload.group(3),
"exp": match_payload.group(4),
"nbf": match_payload.group(5),
"iat": match_payload.group(6)
}
match_signature = re.search(signature_pattern, json_string)
if match_signature:
signature = match_signature.group(1)
match_attacks = re.search(possible_attacks_pattern, json_string)
if match_attacks:
possible_attacks = match_attacks.group(1)
match_endpoints = re.search(vulnerable_endpoints_pattern, json_string)
if match_endpoints:
vulnerable_endpoints = match_endpoints.group(1)
# Create a dictionary to store the extracted data
data = {
"Header": header,
"Payload": payload,
"Signature": signature,
"PossibleAttacks": possible_attacks,
"VulnerableEndpoints": vulnerable_endpoints
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
| [
"\n Perform a thorough DNS analysis based on the provided DNS scan information. The resulting output must conform to a JSON format designed for integration into a penetration testing (pentest) report.\n The objective is to provide accurate and essential information from the perspective of a pentester, ensuring the final output is minimal and concise.\n DNS Data to be Analyzed:\n Analysis Guidelines:\n Pentester's Viewpoint: Approach the DNS analysis from a pentester's perspective, focusing on security implications and potential vulnerabilities.\n Output Format Compliance: Ensure that the final output strictly adheres to the specified JSON format. Each section (DNS Records, Reverse DNS, Zone Transfer Scan) must be appropriately populated.\n Conciseness: Keep the final output minimal. Include only crucial information relevant to a pentest report.\n Insights to Include:\n DNS Records: Provide information on A, AAAA, NS, MX, PTR, SOA, and TXT records.\n Reverse DNS: Include details about the reverse DNS lookup, showcasing the relationship between IP addresses and corresponding domains.\n Zone Transfer Scan: Indicate whether zone transfers are allowed and, if so, list the name servers associated.\n\n The output format:\n {\n \"DNS_Records\": {\n \"A\": [\"\"],\n \"AAAA\": [\"\"],\n \"NS\": [\"\"],\n \"MX\": [\"\"],\n \"PTR\": [\"\"],\n \"SOA\": [\"\"],\n \"TXT\": [\"\"]\n },\n \"Reverse_DNS\": {\n \"IP_Address\": \"\",\n \"Domain\": \"\"\n },\n \"Zone_Transfer_Scan\": {\n \"Allowed\": false,\n \"Name_Servers\": [\"\"]\n }\n }\n\n DNS Data to be analyzed: PLACEHOLDER\n ",
"\n Do a NMAP scan analysis on the provided NMAP scan information\n The NMAP output must return in a JSON format accorging to the provided\n output format. The data must be accurate in regards towards a pentest report.\n The data must follow the following rules:\n 1) The NMAP scans must be done from a pentester point of view\n 2) The final output must be minimal according to the format given.\n 3) The final output must be kept to a minimal.\n 4) If a value not found in the scan just mention an empty string.\n 5) Analyze everything even the smallest of data.\n 6) Completely analyze the data provided and give a confirm answer using the output format.\n\n The output format:\n {\n \"critical score\": [\"\"],\n \"os information\": [\"\"],\n \"open ports\": [\"\"],\n \"open services\": [\"\"],\n \"vulnerable service\": [\"\"],\n \"found cve\": [\"\"]\n }\n\n NMAP Data to be analyzed: PLACEHOLDER\n ",
"\n Perform a thorough DNS analysis based on the provided DNS scan information. The resulting output must conform to a JSON format designed for integration into a penetration testing (pentest) report.\n The objective is to provide accurate and essential information from the perspective of a pentester, ensuring the final output is minimal and concise.\n DNS Data to be Analyzed\n Analysis Guidelines:\n Pentester's Viewpoint: Approach the DNS analysis from a pentester's perspective, focusing on security implications and potential vulnerabilities.\n Output Format Compliance: Ensure that the final output strictly adheres to the specified JSON format. Each section (DNS Records, Reverse DNS, Zone Transfer Scan) must be appropriately populated.\n Conciseness: Keep the final output minimal. Include only crucial information relevant to a pentest report.\n Insights to Include:\n DNS Records: Provide information on A, AAAA, NS, MX, PTR, SOA, and TXT records.\n Reverse DNS: Include details about the reverse DNS lookup, showcasing the relationship between IP addresses and corresponding domains.\n Zone Transfer Scan: Indicate whether zone transfers are allowed and, if so, list the name servers associated.\n\n The output format:\n {\n \"DNS_Records\": {\n \"A\": [\"\"],\n \"AAAA\": [\"\"],\n \"NS\": [\"\"],\n \"MX\": [\"\"],\n \"PTR\": [\"\"],\n \"SOA\": [\"\"],\n \"TXT\": [\"\"]\n },\n \"Reverse_DNS\": {\n \"IP_Address\": \"\",\n \"Domain\": \"\"\n },\n \"Zone_Transfer_Scan\": {\n \"Allowed\": false,\n \"Name_Servers\": [\"\"]\n }\n }\n\n DNS Data to be analyzed: PLACEHOLDER\n ",
"\n Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.\n Follow these guidelines:\n 1) Analyze the JWT token from a pentester's perspective\n 2) Keep the final output minimal while adhering to the given format\n 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities\n 5) For the output \"Algorithm Used\" value use the Algorithm value from the JWT data.\n 6) For the output \"Header\" value use the Header value from the JWT data.\n 7) For the \"Payload\" Use the decoded payloads as a reference and then analyze any attack endpoints.\n 8) For \"Signature\" mention the signatures discovered.\n 9) List a few endpoints you feel are vulnerable for \"VulnerableEndpoints\"\n\n The output format:\n {\n \"Algorithm Used\": \"\",\n \"Header\": \"\",\n \"Payload\": \"\",\n \"Signature\": \"\",\n \"PossibleAttacks\": \"\",\n \"VulnerableEndpoints\": \"\"\n }\n\n JWT Token Data to be analyzed: PLACEHOLDER\n ",
"[INST] <<SYS>> PLACEHOLDER<</SYS>> JWT Token Data to be analyzed: PLACEHOLDER [/INST]",
"[INST] <<SYS>> PLACEHOLDER<</SYS>> NMAP Data to be analyzed: PLACEHOLDER [/INST]"
] |
2024-01-10 | morpheuslord/GPT_Vuln-analyzer | package~GVA~ai_models.py | import json
import re
from typing import Any
from typing import Optional
import openai
import requests
model_engine = "gpt-3.5-turbo-0613"
class DNS_AI_MODEL():
@staticmethod
def BardAI(key: str, data: Any) -> str:
prompt = f"""
Do a DNS analysis on the provided DNS scan information
The DNS output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The DNS scans must be done from a pentester point of view
2) The final output must be minimal according to the format given
3) The final output must be kept to a minimal
The output format:
{{
"A": [""],
"AAA": [""],
"NS": [""],
"MX": [""],
"PTR": [""],
"SOA": [""],
"TXT": [""]
}}
DNS Data to be analyzed: {data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
data = dns_ai_data_regex(str(generated_text))
print(data)
return dns_ai_data_regex(str(generated_text))
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def llama_AI(self, data: str, mode: str, lkey, lendpoint):
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Do a DNS scan analysis on the provided DNS scan information. The DNS output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The DNS scans must be done from a pentester point of view
2) The final output must be minimal according to the format given
3) The final output must be kept to a minimal
4) So the analysis and provide your view according to the given format
5) Remember to provide views as a security engineer or an security analyst.
The output format:
"A":
- List the A records and security views on them
"AAA":
- List the AAA records and security views on them
"NS":
- List the NS records and security views on them
"MX":
- List the MX records and security views on them
"PTR":
- List the PTR records and security views on them
"SOA":
- List the SOA records and security views on them
"TXT":
- List the TXT records and security views on them
"""
user_message = f"""
DNS Data to be analyzed: {data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> NMAP Data to be analyzed: {user_message} [/INST]"
bot_response = self.llama_runpod_api(prompt, lkey, lendpoint)
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
print("test")
if bot_response:
return bot_response
@staticmethod
def gpt_ai(analyze: str, key: Optional[str]) -> str:
openai.api_key = key
prompt = f"""
Do a DNS analysis on the provided DNS scan information
The DNS output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The DNS scans must be done from a pentester point of view
2) The final output must be minimal according to the format given
3) The final output must be kept to a minimal
The output format:
{{
"A": [""],
"AAA": [""],
"NS": [""],
"MX": [""],
"PTR": [""],
"SOA": [""],
"TXT": [""]
}}
DNS Data to be analyzed: {analyze}
"""
try:
# A structure for the request
messages = [{"content": prompt, "role": "user"}]
# A structure for the request
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=1024,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
return dns_ai_data_regex(str(response))
except KeyboardInterrupt:
print("Bye")
quit()
class NMAP_AI_MODEL():
@staticmethod
def BardAI(key: str, data: Any) -> str:
prompt = f"""
Do a NMAP scan analysis on the provided NMAP scan information
The NMAP output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
The output format:
{{
"critical score": [""],
"os information": [""],
"open ports": [""],
"open services": [""],
"vulnerable service": [""],
"found cve": [""]
}}
NMAP Data to be analyzed: {data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
return nmap_ai_data_regex(str(generated_text))
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def Llama_AI(data: str, mode: str, lkey: str, lendpoint: str) -> Any:
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Do a NMAP scan analysis on the provided NMAP scan information. The NMAP output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
7) mention all the data you found in the output format provided so that regex can be used on it.
8) avoid unnecessary explaination.
9) the critical score must be calculated based on the CVE if present or by the nature of the services open
10) the os information must contain the OS used my the target.
11) the open ports must include all the open ports listed in the data[tcp] and varifying if it by checking its states value. you should not negect even one open port.
12) the vulnerable services can be determined via speculation of the service nature or by analyzing the CVE's found.
The output format:
critical score:
- Give info on the criticality
"os information":
- List out the OS information
"open ports and services":
- List open ports
- List open ports services
"vulnerable service":
- Based on CVEs or nature of the ports opened list the vulnerable services
"found cve":
- List the CVE's found and list the main issues.
"""
user_message = f"""
NMAP Data to be analyzed: {data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> NMAP Data to be analyzed: {user_message} [/INST]"
bot_response = llama_runpod_api(prompt, lkey, lendpoint)
if bot_response:
return bot_response
@staticmethod
def GPT_AI(key: str, data: Any) -> str:
openai.api_key = key
try:
prompt = f"""
Do a NMAP scan analysis on the provided NMAP scan information
The NMAP output must return in a JSON format accorging to the provided
output format. The data must be accurate in regards towards a pentest report.
The data must follow the following rules:
1) The NMAP scans must be done from a pentester point of view
2) The final output must be minimal according to the format given.
3) The final output must be kept to a minimal.
4) If a value not found in the scan just mention an empty string.
5) Analyze everything even the smallest of data.
6) Completely analyze the data provided and give a confirm answer using the output format.
The output format:
{{
"critical score": [""],
"os information": [""],
"open ports": [""],
"open services": [""],
"vulnerable service": [""],
"found cve": [""]
}}
NMAP Data to be analyzed: {data}
"""
# A structure for the request
messages = [{"content": prompt, "role": "assistant"}]
# A structure for the request
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=2500,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
rsp = str(response)
return str(nmap_ai_data_regex(rsp))
except KeyboardInterrupt:
print("Bye")
quit()
class JWT_AI_MODEL():
@staticmethod
def BardAI(key: str, jwt_data: Any) -> str:
prompt = f"""
Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities
5) For the output "Algorithm Used" value use the Algorithm value from the JWT data.
6) For the output "Header" value use the Header value from the JWT data.
7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints.
8) For "Signature" mention the signatures discovered.
9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints"
The output format:
{{
"Algorithm Used": "",
"Header": "",
"Payload": "",
"Signature": "",
"PossibleAttacks": "",
"VulnerableEndpoints": ""
}}
JWT Token Data to be analyzed: {jwt_data}
"""
url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": {
"text": prompt
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
generated_text = response.json()
jwt_analysis_data = jwt_ai_data_regex(str(generated_text))
print(jwt_analysis_data)
return jwt_analysis_data
else:
print("Error: Unable to generate text. Status Code:", response.status_code)
return "None"
@staticmethod
def llama_AI(self, jwt_data: str, mode: str, lkey, lendpoint):
api_url = 'http://localhost:5000/api/chatbot'
user_instruction = """
Perform a comprehensive analysis on the provided JWT token. The JWT analysis output must be in a asked format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks
The output format:
"Header":
- List the JWT header details and security views on them
"Payload":
- List the JWT payload details and security views on them
"Signature":
- Provide insights on the JWT signature
"PossibleAttacks":
- List possible JWT exploits and attacks
"""
user_message = f"""
JWT Token Data to be analyzed: {jwt_data}
"""
model_name = "TheBloke/Llama-2-7B-Chat-GGML"
file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin"
if mode == "local":
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
elif mode == "runpod":
prompt = f"[INST] <<SYS>> {user_instruction}<</SYS>> JWT Token Data to be analyzed: {user_message} [/INST]"
bot_response = self.llama_runpod_api(prompt, lkey, lendpoint)
bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name)
print("test")
if bot_response:
return bot_response
@staticmethod
def gpt_ai(analyze: str, api_key: Optional[str]) -> str:
openai.api_key = api_key
prompt = f"""
Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.
Follow these guidelines:
1) Analyze the JWT token from a pentester's perspective
2) Keep the final output minimal while adhering to the given format
3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities
5) For the output "Algorithm Used" value use the Algorithm value from the JWT data.
6) For the output "Header" value use the Header value from the JWT data.
7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints.
8) For "Signature" mention the signatures discovered.
9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints"
The output format:
{{
"Algorithm Used": "",
"Header": "",
"Payload": "",
"Signature": "",
"PossibleAttacks": "",
"VulnerableEndpoints": ""
}}
JWT Token Data to be analyzed: {analyze}
"""
try:
messages = [{"content": prompt, "role": "user"}]
response = openai.ChatCompletion.create(
model=model_engine,
messages=messages,
max_tokens=1024,
n=1,
stop=None,
)
response = response['choices'][0]['message']['content']
rsp = str(response)
return rsp
except KeyboardInterrupt:
print("Bye")
quit()
def chat_with_api(api_url: str, user_message: str, user_instruction: str, model_name: str, file_name: str = None) -> Any:
# Prepare the request data in JSON format
data = {
'user_message': user_message,
'model_name': model_name,
'file_name': file_name,
'user_instruction': user_instruction
}
# Send the POST request to the API
response = requests.post(api_url, json=data)
# Check if the request was successful (status code 200)
if response.status_code == 200:
return response.json()['bot_response']
else:
# If there was an error, print the error message
print(f"Error: {response.status_code} - {response.text}")
return None
def llama_runpod_api(prompt: str, lkey: str, lendpoint: str) -> Any:
url = f"https://api.runpod.ai/v2/{lendpoint}/runsync"
payload = json.dumps({
"input": {
"prompt": prompt,
"max_new_tokens": 4500,
"temperature": 0.9,
"top_k": 50,
"top_p": 0.7,
"repetition_penalty": 1.2,
"batch_size": 8,
"stop": [
"</s>"
]
}
})
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {lkey}',
}
response = requests.request("POST", url, headers=headers, data=payload)
response_t = json.loads(response.text)
return response_t["output"]
def dns_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
A_pattern = r'"A": \["(.*?)"\]'
AAA_pattern = r'"AAA: \["(.*?)"\]'
NS_pattern = r'"NS": \["(.*?)"\]'
MX_pattern = r'"MX": \["(.*?)"\]'
PTR_pattern = r'"PTR": \["(.*?)"\]'
SOA_pattern = r'"SOA": \["(.*?)"\]'
TXT_pattern = r'"TXT": \["(.*?)"\]'
# Initialize variables for extracted data
A = None
AAA = None
NS = None
MX = None
PTR = None
SOA = None
TXT = None
# Extract individual values using patterns
match = re.search(A_pattern, json_string)
if match:
A = match.group(1)
match = re.search(AAA_pattern, json_string)
if match:
AAA = match.group(1)
match = re.search(NS_pattern, json_string)
if match:
NS = match.group(1)
match = re.search(MX_pattern, json_string)
if match:
MX = match.group(1)
match = re.search(PTR_pattern, json_string)
if match:
PTR = match.group(1)
match = re.search(SOA_pattern, json_string)
if match:
SOA = match.group(1)
match = re.search(TXT_pattern, json_string)
if match:
TXT = match.group(1)
# Create a dictionary to store the extracted data
data = {
"A": A,
"AAA": AAA,
"NS": NS,
"MX": MX,
"PTR": PTR,
"SOA": SOA,
"TXT": TXT
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
def nmap_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
critical_score_pattern = r'"critical score": \["(.*?)"\]'
os_information_pattern = r'"os information": \["(.*?)"\]'
open_ports_pattern = r'"open ports": \["(.*?)"\]'
open_services_pattern = r'"open services": \["(.*?)"\]'
vulnerable_service_pattern = r'"vulnerable service": \["(.*?)"\]'
found_cve_pattern = r'"found cve": \["(.*?)"\]'
# Initialize variables for extracted data
critical_score = None
os_information = None
open_ports = None
open_services = None
vulnerable_service = None
found_cve = None
# Extract individual values using patterns
match = re.search(critical_score_pattern, json_string)
if match:
critical_score = match.group(1)
match = re.search(os_information_pattern, json_string)
if match:
os_information = match.group(1)
match = re.search(open_ports_pattern, json_string)
if match:
open_ports = match.group(1)
match = re.search(open_services_pattern, json_string)
if match:
open_services = match.group(1)
match = re.search(vulnerable_service_pattern, json_string)
if match:
vulnerable_service = match.group(1)
match = re.search(found_cve_pattern, json_string)
if match:
found_cve = match.group(1)
# Create a dictionary to store the extracted data
data = {
"critical score": critical_score,
"os information": os_information,
"open ports": open_ports,
"open services": open_services,
"vulnerable service": vulnerable_service,
"found cve": found_cve
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
def jwt_ai_data_regex(json_string: str) -> Any:
# Define the regular expression patterns for individual values
header_pattern = r'"Header": \{\s*"alg": "(.*?)",\s*"typ": "(.*?)"\s*\}'
payload_pattern = r'"Payload": \{\s*"iss": "(.*?)",\s*"sub": "(.*?)",\s*"aud": "(.*?)",\s*"exp": "(.*?)",\s*"nbf": "(.*?)",\s*"iat": "(.*?)"\s*\}'
signature_pattern = r'"Signature": "(.*?)"'
possible_attacks_pattern = r'"PossibleAttacks": "(.*?)"'
vulnerable_endpoints_pattern = r'"VulnerableEndpoints": "(.*?)"'
# Initialize variables for extracted data
header = {}
payload = {}
signature = ""
possible_attacks = ""
vulnerable_endpoints = ""
# Extract individual values using patterns
match_header = re.search(header_pattern, json_string)
if match_header:
header = {"alg": match_header.group(1), "typ": match_header.group(2)}
match_payload = re.search(payload_pattern, json_string)
if match_payload:
payload = {
"iss": match_payload.group(1),
"sub": match_payload.group(2),
"aud": match_payload.group(3),
"exp": match_payload.group(4),
"nbf": match_payload.group(5),
"iat": match_payload.group(6)
}
match_signature = re.search(signature_pattern, json_string)
if match_signature:
signature = match_signature.group(1)
match_attacks = re.search(possible_attacks_pattern, json_string)
if match_attacks:
possible_attacks = match_attacks.group(1)
match_endpoints = re.search(vulnerable_endpoints_pattern, json_string)
if match_endpoints:
vulnerable_endpoints = match_endpoints.group(1)
# Create a dictionary to store the extracted data
data = {
"Header": header,
"Payload": payload,
"Signature": signature,
"PossibleAttacks": possible_attacks,
"VulnerableEndpoints": vulnerable_endpoints
}
# Convert the dictionary to JSON format
json_output = json.dumps(data)
return json_output
| [
"\n Do a NMAP scan analysis on the provided NMAP scan information\n The NMAP output must return in a JSON format accorging to the provided\n output format. The data must be accurate in regards towards a pentest report.\n The data must follow the following rules:\n 1) The NMAP scans must be done from a pentester point of view\n 2) The final output must be minimal according to the format given.\n 3) The final output must be kept to a minimal.\n 4) If a value not found in the scan just mention an empty string.\n 5) Analyze everything even the smallest of data.\n 6) Completely analyze the data provided and give a confirm answer using the output format.\n\n The output format:\n {\n \"critical score\": [\"\"],\n \"os information\": [\"\"],\n \"open ports\": [\"\"],\n \"open services\": [\"\"],\n \"vulnerable service\": [\"\"],\n \"found cve\": [\"\"]\n }\n\n NMAP Data to be analyzed: PLACEHOLDER\n ",
"\n Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report.\n Follow these guidelines:\n 1) Analyze the JWT token from a pentester's perspective\n 2) Keep the final output minimal while adhering to the given format\n 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities\n 5) For the output \"Algorithm Used\" value use the Algorithm value from the JWT data.\n 6) For the output \"Header\" value use the Header value from the JWT data.\n 7) For the \"Payload\" Use the decoded payloads as a reference and then analyze any attack endpoints.\n 8) For \"Signature\" mention the signatures discovered.\n 9) List a few endpoints you feel are vulnerable for \"VulnerableEndpoints\"\n\n The output format:\n {\n \"Algorithm Used\": \"\",\n \"Header\": \"\",\n \"Payload\": \"\",\n \"Signature\": \"\",\n \"PossibleAttacks\": \"\",\n \"VulnerableEndpoints\": \"\"\n }\n\n JWT Token Data to be analyzed: PLACEHOLDER\n ",
"\n Do a DNS analysis on the provided DNS scan information\n The DNS output must return in a JSON format accorging to the provided\n output format. The data must be accurate in regards towards a pentest report.\n The data must follow the following rules:\n 1) The DNS scans must be done from a pentester point of view\n 2) The final output must be minimal according to the format given\n 3) The final output must be kept to a minimal\n\n The output format:\n {\n \"A\": [\"\"],\n \"AAA\": [\"\"],\n \"NS\": [\"\"],\n \"MX\": [\"\"],\n \"PTR\": [\"\"],\n \"SOA\": [\"\"],\n \"TXT\": [\"\"]\n }\n DNS Data to be analyzed: PLACEHOLDER\n ",
"[INST] <<SYS>> PLACEHOLDER<</SYS>> JWT Token Data to be analyzed: PLACEHOLDER [/INST]",
"[INST] <<SYS>> PLACEHOLDER<</SYS>> NMAP Data to be analyzed: PLACEHOLDER [/INST]",
"\n Do a DNS analysis on the provided DNS scan information\n The DNS output must return in a JSON format accorging to the provided\n output format. The data must be accurate in regards towards a pentest report.\n The data must follow the following rules:\n 1) The DNS scans must be done from a pentester point of view\n 2) The final output must be minimal according to the format given\n 3) The final output must be kept to a minimal\n\n The output format:\n {\n \"A\": [\"\"],\n \"AAA\": [\"\"],\n \"NS\": [\"\"],\n \"MX\": [\"\"],\n \"PTR\": [\"\"],\n \"SOA\": [\"\"],\n \"TXT\": [\"\"]\n }\n\n DNS Data to be analyzed: PLACEHOLDER\n "
] |
2024-01-10 | Hsiang-Tang/gpt_finetune_linebot | fine_tune.py | # gpt-3.5_fine_tune
#!/usr/bin/env python
# coding: utf-8
from flask import Flask, render_template, request, abort
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import TextMessage, MessageEvent, TextSendMessage
import os
import openai
import tempfile
import datetime
import time
import string
import os
# 升級 openai 庫
os.system('pip install openai --upgrade')
# 使用 curl 下載 clinic_qa.json 文件
os.system('curl -o clinic_qa.json -L https://github.com/Hsiang-Tang/gpt-3.5_fine_tune/raw/main/clinic_qa.json')
import openai
# 設置您的 OpenAI API 金鑰
openai.api_key = os.getenv("OPENAI_API_KEY")
# 創建 fine-tune 文件
openai.File.create(
file=open("clinic_qa.json", "rb"),
purpose='fine-tune'
)
# 列出文件
openai.File.list()
# 創建 fine-tuning 作業
openai.FineTuningJob.create(training_file="file-BhA5o5gmQRCx15KsK4zq97WI", model="gpt-3.5-turbo")
# 列出 fine-tuning 作業
openai.FineTuningJob.list(limit=10)
# 檢索 fine-tuning 作業事件
openai.FineTuningJob.retrieve("ftjob-x9NGckMPlxEXEXGSDtjy4Uc0")
# 列出 fine-tuning 作業事件
openai.FineTuningJob.list_events(id="ftjob-x9NGckMPlxEXEXGSDtjy4Uc0", limit=10)
# 創建聊天完成
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "您現在扮演一個專業的醫生"},
{"role": "user", "content": "我感覺動一動就很累,老是很疲倦"}
]
)
print(completion.choices[0].message.content)
# 創建帶有 fine-tuned 模型的聊天完成
completion2 = openai.ChatCompletion.create(
model="ft:gpt-3.5-turbo-0613:personal::7wllb3DZ",
messages=[
{"role": "system", "content": "您現在扮演一個專業的醫生"},
{"role": "user", "content": "我感覺動一動就很累,老是很疲倦"}
]
)
print(completion2.choices[0].message.content)
def GPT_response(text):
response = openai.ChatCompletion.create(
model="ft:gpt-3.5-turbo-0613:personal::7wllb3DZ",
messages=[
{"role": "system", "content": "您現在扮演一個專業的醫生"},
{"role": "user", "content": text}
]
)
answer = response.choices[0].message.content
# 去除回复文本中的標點符號
answer = answer.translate(str.maketrans('', '', string.punctuation))
return answer | [
"您現在扮演一個專業的醫生",
"我感覺動一動就很累,老是很疲倦"
] |
2024-01-10 | shyamal-anadkat/howdoi.ai | utils~giphy.py | """Chain that calls GiphyAPi.
"""
import os
import sys
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class GiphyAPIWrapper(BaseModel):
"""Wrapper around Giphy API.
To use, you should have the environment variable ``GIPHY_API_KEY`` set with your API key, or pass
`giphyapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
giphy_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
giphy_api_key = get_from_dict_or_env(
values, "giphy_api_key", "GIPHY_API_KEY"
)
values["giphy_api_key"] = giphy_api_key
try:
import giphy_client
from giphy_client.rest import ApiException
values["giphy_engine"] = giphy_client.DefaultApi()
except ImportError:
raise ValueError(
"Could not import giphy_client python package. "
"Please it install it with `pip install giphy_client`."
)
return values
def run(self, query: str) -> str:
"""Run query through GiphyAPI and parse result."""
api_key = self.giphy_api_key # str | Giphy API Key.
q = query # str | Search query term or prhase.
# int | The maximum number of records to return. (optional) (default to 25)
limit = 5
# int | An optional results offset. Defaults to 0. (optional) (default to 0)
offset = 0
rating = 'g' # str | Filters results by specified rating. (optional)
# str | Specify default country for regional content; use a 2-letter ISO 639-1 country code. See list of supported languages <a href = \"../language-support\">here</a>. (optional)
lang = 'en'
fmt = 'json' # str | U
with HiddenPrints():
try:
# Search Endpoint
api_response = self.giphy_engine.gifs_search_get(
api_key, q, limit=limit, offset=offset, rating=rating, lang=lang, fmt=fmt)
except ApiException as e:
raise ValueError(f"Got error from GiphyAPI: {e}")
# raise Exception(api_response.data[0].embed_url)
url = api_response.data[0].embed_url
return f"""Final Answer: <iframe src="{url}" width="480" height="480" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><br /><a href="{url}">powered by GIPHY</a>"""
| [] |
2024-01-10 | shyamal-anadkat/howdoi.ai | utils~chat_agent.py | """Chat agent with question answering
"""
import os
from utils.giphy import GiphyAPIWrapper
from dataclasses import dataclass
from langchain.chains import LLMChain, LLMRequestsChain
from langchain import Wikipedia, OpenAI
from langchain.agents.react.base import DocstoreExplorer
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent
from langchain.prompts import PromptTemplate
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.agents.conversational.base import ConversationalAgent
from datetime import datetime
import langchain
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
news_api_key = os.environ["NEWS_API_KEY"]
tmdb_bearer_token = os.environ["TMDB_API_KEY"]
@dataclass
class ChatAgent:
agent_executor: AgentExecutor = None
def _get_docstore_agent(self):
docstore = DocstoreExplorer(Wikipedia())
docstore_tools = [
Tool(
name="Search",
func=docstore.search
),
Tool(
name="Lookup",
func=docstore.lookup
)
]
docstore_llm = OpenAI(temperature=0, model_name="text-davinci-003")
docstore_agent = initialize_agent(
docstore_tools, docstore_llm, agent="react-docstore", verbose=True)
return docstore_agent
def _get_requests_llm_tool(self):
template = """
Extracted: {requests_result}"""
PROMPT = PromptTemplate(
input_variables=["requests_result"],
template=template,
)
def lambda_func(input):
out = chain = LLMRequestsChain(llm_chain=LLMChain(
llm=OpenAI(temperature=0),
prompt=PROMPT)).run(input)
return out.strip()
return lambda_func
def __init__(self, *, conversation_chain: LLMChain = None, history_array):
date = datetime.today().strftime('%B %d, %Y')
# set up a Wikipedia docstore agent
docstore_agent = self._get_docstore_agent()
giphy = GiphyAPIWrapper()
tool_names = get_all_tool_names()
tool_names.remove("pal-math")
tool_names.remove("requests") # let's use the llm_requests instead
# let's use the llm_requests instead
tool_names.remove("google-search")
tool_names.remove("pal-colored-objects")
tool_names.remove("python_repl")
tool_names.remove("terminal")
requests_tool = self._get_requests_llm_tool()
tools = load_tools(tool_names,
llm=OpenAI(temperature=0, model="text-davinci-003"),
news_api_key=news_api_key,
tmdb_bearer_token=tmdb_bearer_token)
# Tweak some of the tool descriptions
for tool in tools:
if tool.name == "Search":
tool.description = "Use this tool exclusively for questions relating to current events, or when you can't find an answer using any of the other tools."
if tool.name == "Calculator":
tool.description = "Use this to solve numeric math questions and do arithmetic. Don't use it for general or abstract math questions."
tools = tools + [
Tool(
name="WikipediaSearch",
description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.",
func=docstore_agent.run
),
Tool(
name="GiphySearch",
func=giphy.run,
description="useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer."
),
Tool(
name="Requests",
func=requests_tool,
description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."
)
]
ai_prefix = "AI"
human_prefix = "Human"
prefix = f"""{ai_prefix} is a large language model. {ai_prefix} is represented by a 🤖.
{ai_prefix} uses a light, humorous tone, and {ai_prefix} frequently includes emojis its responses. Responses with code examples should be formatted in code blocks using <pre><code></code></pre> tags.
{ai_prefix} is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, {ai_prefix} is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
If {ai_prefix} can't provide a good response, it will truthfully answer that it can't help with the user's request.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date.
TOOLS:
------
Assistant has access to the following tools:
"""
suffix = f"""
Questions that refer to a specific date or time period will be interpreted relative to this date.
After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US).
Begin!
Previous conversation history:
{{chat_history}}
New input: {{input}}
{{agent_scratchpad}}
"""
memory = ConversationBufferMemory(memory_key="chat_history")
for item in history_array:
memory.save_context(
{f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]})
llm = OpenAI(temperature=.5, model="text-davinci-003")
llm_chain = LLMChain(
llm=llm,
prompt=ConversationalAgent.create_prompt(
tools,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
suffix=suffix
),
)
agent_obj = ConversationalAgent(
llm_chain=llm_chain, ai_prefix=ai_prefix)
self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
verbose=True,
max_iterations=5,
memory=memory)
# self.agent_executor = AgentExecutor.from_agent_and_tools(
# agent=agent,
# tools=tools,
# max_iterations=5,
# verbose=True)
| [
"requests_result",
"\n Extracted: {requests_result}"
] |
2024-01-10 | EthicalSecurity-Agency/openai_evals | evals~elsuite~modelgraded~classify.py | """
Generic eval that uses a prompt + classification.
"""
import logging
from collections import Counter
from random import Random
from typing import Optional, Union
import openai
import evals
import evals.record
from evals import CompletionFn, DummyCompletionFn, OpenAIChatCompletionFn
from evals.elsuite.modelgraded.base import ModelGradedSpec
from evals.elsuite.modelgraded.classify_utils import (
CHOICE_KEY,
INVALID_STR,
MATCH_FNS,
concat_n_completions,
get_choice,
)
from evals.elsuite.utils import PromptFn, scrub_formatting_from_prompt
from evals.registry import Registry
class ModelBasedClassify(evals.Eval):
invalid_request_during_completion = 0
invalid_request_during_evaluation = 0
def __init__(
self,
completion_fns: list[CompletionFn],
samples_jsonl: str,
modelgraded_spec: str,
registry: Registry,
*args,
match_fn: str = "starts_or_endswith",
max_tokens: int = 1024,
multicomp_n: Union[int, str] = 1,
multicomp_temperature: float = 0.4,
samples_renamings: Optional[dict[str, str]] = None,
eval_type: Optional[str] = None,
metaeval: bool = False,
eval_completion_fn: Optional[str] = None,
modelgraded_spec_args: Optional[dict[str, dict[str, str]]] = None,
**kwargs,
):
super().__init__(completion_fns, *args, **kwargs)
n_models = len(self.completion_fns)
self.max_tokens = max_tokens
self.samples_jsonl = samples_jsonl
self.match_fn = MATCH_FNS[match_fn]
self.metaeval = metaeval
self.registry = registry
if multicomp_n == "from_models":
assert n_models > 1, f"multicomp_n='from_models' but only 1 model is specified."
self.multicomp_n = n_models
else:
assert isinstance(
multicomp_n, int
), f"multicomp_n={multicomp_n} must be an int or 'from_models'."
self.multicomp_n = multicomp_n
self.multicomp_temperature = multicomp_temperature
self.samples_renamings = samples_renamings or {}
# check if multiple models are specified
if len(self.completion_fns) > 1:
assert (
self.multicomp_n == n_models
), f"multicomp_n={self.multicomp_n} must be equal to the number of models={len(self.completion_fns)} if multiple models are specified."
if isinstance(self.completion_fn, DummyCompletionFn):
self.eval_completion_fn = self.completion_fn
elif eval_completion_fn:
self.eval_completion_fn = self.registry.make_completion_fn(eval_completion_fn)
else:
self.eval_completion_fn = OpenAIChatCompletionFn(model="gpt-3.5-turbo")
spec_kwargs = {"multicomp_n": self.multicomp_n}
if modelgraded_spec_args:
spec_kwargs["args"] = modelgraded_spec_args
self.mg: ModelGradedSpec = self.registry.get_modelgraded_spec(
modelgraded_spec, **spec_kwargs
)
if eval_type:
self.mg.append_answer_prompt(eval_type)
def eval_sample(self, test_sample: dict, rng: Random) -> None:
"""Evaluate a single sample.
Recorded metrics are always: one of the self.choice_strings, or "__invalid__".
"""
if self.samples_renamings:
test_sample = {self.samples_renamings.get(k, k): v for k, v in test_sample.items()}
if self.multicomp_n > 1:
test_sample["n"] = self.multicomp_n
completions = {}
if self.metaeval:
# assert outputs exist in the data
for v in self.mg.input_outputs.values():
assert v in test_sample, f"Missing output '{v}' in sample {test_sample.keys()}"
completions[v] = test_sample[v]
# remove outputs from the data
test_sample = {
k: v for k, v in test_sample.items() if k not in list(self.mg.input_outputs.values())
}
for k in self.mg.input_outputs:
test_sample[k] = scrub_formatting_from_prompt(test_sample[k])
if not self.metaeval:
try:
for k, v in self.mg.input_outputs.items():
if self.multicomp_n > 1 and v in self.mg.completion_sample_templates:
completion_i_s = []
for i in range(self.multicomp_n):
if len(self.completion_fns) > 1:
# use a separate model for each completion
completion_fn = self.completion_fns[i]
else:
# use the single model for all completions
completion_fn = self.completion_fn
get_input_completion = PromptFn(
test_sample[k],
completion_fn=completion_fn,
max_tokens=self.max_tokens,
temperature=self.multicomp_temperature,
)
completion_i, _ = get_input_completion()
completion_i_s.append(completion_i)
completion = concat_n_completions(
completion_i_s, self.mg.completion_sample_templates[v]
)
else:
get_input_completion = PromptFn(
test_sample[k],
completion_fn=self.completion_fn,
max_tokens=self.max_tokens,
)
completion, _ = get_input_completion()
completions[v] = completion
except openai.error.InvalidRequestError:
self.invalid_request_during_completion += 1
return
metrics = {}
if self.mg.expanded_args_dict and len(self.mg.expanded_args_dict) > 1:
args_dict = self.mg.expanded_args_dict
elif self.mg.expanded_args_dict and len(self.mg.expanded_args_dict) == 1:
# if there is only one combination, don't bother with the metric name
args_dict = {CHOICE_KEY: v for v in self.mg.expanded_args_dict.values()}
else:
args_dict = {CHOICE_KEY: {}}
for metric, args in args_dict.items():
args = {k: v[1] for k, v in args.items()}
prompt = self.mg.format(**args, **completions, **test_sample)
evaluate = PromptFn(
prompt,
completion_fn=self.eval_completion_fn,
max_tokens=self.max_tokens,
)
try:
evaluation, _ = evaluate(skip_format=True)
except openai.error.InvalidRequestError:
logging.warn(f"Invalid request during evaluation: {prompt}")
self.invalid_request_during_evaluation += 1
return
choice = get_choice(
evaluation, self.mg.eval_type, self.match_fn, self.mg.choice_strings
)
if choice == INVALID_STR:
logging.warn(
f"Choices {self.mg.choice_strings} not parsable for {self.mg.eval_type}: {evaluation}"
)
metrics[metric] = choice
if self.metaeval:
assert (
metric in test_sample
), f"Missing label for metric '{metric}' in sample {test_sample.keys()}"
metrics[metric + "_metascore"] = choice == test_sample[metric]
evals.record.record_metrics(**metrics)
return choice
def run(self, recorder):
samples = self.get_samples()
self.eval_all_samples(recorder, samples)
record_metrics = {}
record_metrics["invalid_request_during_completion"] = self.invalid_request_during_completion
record_metrics["invalid_request_during_evaluation"] = self.invalid_request_during_evaluation
all_sample_metrics = recorder.get_metrics()
if not all_sample_metrics:
return record_metrics
if self.mg.expanded_args_dict and len(self.mg.expanded_args_dict) > 1:
metrics = sorted(self.mg.expanded_args_dict)
else:
metrics = [CHOICE_KEY]
for metric in metrics:
chosen = [m[metric] for m in all_sample_metrics if metric in m]
# if there is a best choice, compute the score
if self.mg.choice_scores:
# assumption: each INVALID_STR contributes the lowest score
lowest_score = min(self.mg.choice_scores.values())
scores = [
self.mg.choice_scores[choice] if choice != INVALID_STR else lowest_score
for choice in chosen
]
record_metrics[f"score/{metric}"] = sum(scores) / len(all_sample_metrics)
# compute the counts and ratios
counts = dict(Counter(chosen))
missing_samples = len(all_sample_metrics) - len(chosen)
if missing_samples:
counts["__missing_samples__"] = missing_samples
record_metrics.update({f"counts/{metric}/{k}": v for k, v in counts.items()})
if self.metaeval:
metascores = [m[metric + "_metascore"] for m in all_sample_metrics if metric in m]
record_metrics[f"metascore/{metric}"] = sum(metascores) / len(all_sample_metrics)
return record_metrics
| [] |
2024-01-10 | GRKdev/Script-SQL-API | gui~training.py | import tkinter as tk
from tkinter import ttk, messagebox
import os
import openai
import subprocess
import json
from dotenv import load_dotenv, set_key
class TrainingTab:
def __init__(self, master):
load_dotenv(".env")
self.api_key = os.getenv("OPENAI_API_KEY")
self.frame = ttk.Frame(master)
self.api_label = tk.Label(self.frame, text="Clave API:")
self.api_label.grid(row=0, column=0, pady=10)
self.api_entry = tk.Entry(self.frame, width=30)
if self.api_key:
self.api_entry.insert(0, self.api_key)
self.api_entry.grid(row=0, column=1, pady=10)
self.upload_button = tk.Button(
self.frame, text="Subir Archivos JSONL", command=self.upload_and_get_ids
)
self.upload_button.grid(row=2, column=1, pady=20)
self.upload_button.config(width=15, height=2)
self.model_label = tk.Label(self.frame, text="Modelo:")
self.model_label.grid(row=1, column=0, pady=10)
self.model_combo = ttk.Combobox(
self.frame, values=["ada", "babbage-002", "gpt-3.5-turbo"]
)
self.model_combo.grid(row=1, column=1, pady=10)
self.model_combo.current(0)
self.epoch_label = tk.Label(self.frame, text="Epochs:")
self.epoch_label.grid(row=3, column=0, pady=10)
self.epoch_entry = tk.Entry(self.frame, width=30)
self.epoch_entry.grid(row=3, column=1, pady=10)
self.message_label = tk.Label(self.frame, text="Mensaje:")
self.message_label.grid(row=4, column=0, pady=10)
self.message_entry = tk.Entry(self.frame, width=30)
self.message_entry.grid(row=4, column=1, pady=10)
self.final_button = tk.Button(
self.frame, text="Enviar", command=self.send_final_query
)
self.final_button.grid(row=5, column=0, pady=20)
self.final_button.config(bg="#7FFF7F")
self.final_button.config(width=10, height=2)
self.status_button = tk.Button(
self.frame, text="Estado", command=self.check_status
)
self.status_button.grid(row=5, column=1, pady=20)
self.status_button.config(width=10, height=2)
self.cancel_button = tk.Button(
self.frame, text="Cancelar", command=self.cancel_ft
)
self.cancel_button.grid(row=5, column=2, pady=20)
self.cancel_button.config(bg="#FF7F84")
self.cancel_button.config(width=10, height=2)
def set_api_key(self):
openai.api_key = self.api_key
self.api_key = self.api_entry.get().strip()
os.environ["OPENAI_API_KEY"] = self.api_key
set_key(".env", "OPENAI_API_KEY", self.api_key)
openai.api_key = self.api_key
def upload_and_get_ids(self):
self.set_api_key()
if not self.api_key:
tk.messagebox.showerror(
"Error", "Por favor, introduce la clave API de OpenAI."
)
return
try:
train_response = openai.files.create(
file=open("Documents/dicc/results/train.jsonl", "rb"),
purpose="fine-tune",
)
train_id = train_response.id
valid_response = openai.files.create(
file=open("Documents/dicc/results/valid.jsonl", "rb"),
purpose="fine-tune",
)
valid_id = valid_response.id
self.train_id = train_id
self.valid_id = valid_id
tk.messagebox.showinfo(
"IDs obtenidos", f"Train ID: {train_id}\nValid ID: {valid_id}"
)
except Exception as e:
tk.messagebox.showerror("Error", str(e))
def save_to_file(self, train_id, valid_id, fine_tune_id, message):
data = {
"train_id": train_id,
"valid_id": valid_id,
"fine_tune_id": fine_tune_id,
"message": message,
}
with open("training_history.json", "a") as file:
json.dump(data, file)
file.write("\n")
def update_fine_tune_ids(self):
self.message_to_id_map = {}
messages = []
try:
with open("training_history.json", "r") as file:
for line in file:
data = json.loads(line)
message = data["message"]
fine_tune_id = data["fine_tune_id"]
self.message_to_id_map[message] = fine_tune_id
messages.append(message)
except FileNotFoundError:
pass
def send_final_query(self):
selected_model = self.model_combo.get()
if selected_model == "ada":
self.send_final_query_ada()
elif selected_model in ["babbage-002", "gpt-3.5-turbo"]:
self.send_final_query_babbage_gpt()
def send_final_query_babbage_gpt(self):
message = self.message_entry.get().strip()
selected_model = self.model_combo.get()
epoch = self.epoch_entry.get().strip()
try:
response = openai.fine_tuning.jobs.create(
training_file=self.train_id,
validation_file=self.valid_id,
model=selected_model,
suffix=message,
hyperparameters={
"n_epochs": epoch,
},
)
fine_tune_id = response.id
tk.messagebox.showinfo("ID de Fine-Tune", fine_tune_id)
self.fine_tune_id = fine_tune_id
except Exception as e:
tk.messagebox.showerror("Error", str(e))
return
self.save_to_file(self.train_id, self.valid_id, self.fine_tune_id, message)
self.update_fine_tune_ids()
def send_final_query_ada(self):
message = self.message_entry.get().strip()
response = subprocess.check_output(
[
"openai",
"api",
"fine_tunes.create",
"-t",
self.train_id,
"-v",
self.valid_id,
"-m",
"ada",
"--suffix",
message,
],
text=True,
)
print(response)
for line in response.splitlines():
if line.startswith("Created fine-tune:"):
fine_tune_id = line.split(":")[1].strip()
tk.messagebox.showinfo("ID de Fine-Tune", fine_tune_id)
self.fine_tune_id = fine_tune_id
break
else:
tk.messagebox.showerror(
"Error", "No se encontró la ID de Fine-Tune en la respuesta."
)
self.save_to_file(self.train_id, self.valid_id, self.fine_tune_id, message)
self.update_fine_tune_ids()
def cancel_ft(self):
if not hasattr(self, "fine_tune_id"):
messagebox.showerror(
"Error", "No has iniciado ningún entrenamiento recientemente."
)
return
response = openai.fine_tuning.jobs.cancel(self.fine_tune_id)
tk.messagebox.showinfo("Estado del Cancelamiento", response.status)
def check_status(self):
if not hasattr(self, "fine_tune_id"):
messagebox.showerror(
"Error", "No has iniciado ningún entrenamiento recientemente."
)
return
try:
# Use the new API method to retrieve fine-tuning job details
data = openai.fine_tuning.jobs.retrieve(self.fine_tune_id)
except Exception as e:
messagebox.showerror("Error", f"Error al obtener datos: {e}")
return
# Process the response data
error_messages = []
status = data.status
cost = None # Adjust this if the new API provides cost details
# Process other details from the response
# ...
if error_messages:
error_text = "\n".join(error_messages)
messagebox.showerror("Errores en los archivos", error_text)
else:
status_message = f"Estado: {status}"
if cost:
status_message += f"\nCosto: ${cost}"
messagebox.showinfo("Estado del Entrenamiento", status_message)
| [] |
2024-01-10 | idan-tankel/SemOOD | SEED-Bench~AI_client.py | from openai import OpenAI
client = OpenAI(
)
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
)
print(response) | [
"Where was it played?",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020.",
"Who won the world series in 2020?"
] |
2024-01-10 | qcraftai/gedepth | depth~models~decode_heads~__init__.py | from functools import update_wrapper
from .densedepth_head import DenseDepthHead, DepthBaseDecodeHead
from .adabins_head import AdabinsHead
from .bts_head import BTSHead
from .dpt_head import DPTHead
from .binsformer_head import BinsFormerDecodeHead
from .maskpe_head import MaskedPE
# from .fcn_head import FCNHead
# from .ocr_head import OCRHead
from .cascade_decode_head import BaseCascadeDecodeHead
# from .decode_head_seg import BaseDecodeHead
# from .guidance_head import GuidanceHead | [] |
2024-01-10 | teamglados/mishmash | api~routes~text_to_image.py | from api.app import *
import openai
import asyncio
import concurrent
import functools
import os
@functools.lru_cache
def dalle_create_image_with_auth(api_key, **kwargs):
openai.api_key = api_key
return openai.Image.create(
response_format='b64_json',
**kwargs
)
async def dalle_create_image(*args, **kwargs):
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as exc:
return await asyncio.get_running_loop().run_in_executor(
exc,
functools.partial(
dalle_create_image_with_auth,
os.environ['OPENAI_KEY'],
**kwargs
)
)
@app.get('/text2image')
async def create(prompt: str = '', count: int = 1, size: int = 1):
logger.info(f'text to image using the following prompt: {repr(prompt)}')
size = [
'256x256',
'512x512',
'1024x1024'
][min(max(0, size-1), 2)]
result = await dalle_create_image(
prompt=prompt,
size=size,
n=count,
)
return {'result': list(map(lambda r: r['b64_json'], result['data']))}
| [] |
2024-01-10 | teamglados/mishmash | api~routes~text_content.py | from api.app import *
import openai
import asyncio
import concurrent
import functools
import os
def strip_non_alnum_from_text(text):
# avoid outputs like: ':\n\n\n\nNew iPhone: Now with 100% more battery life!'
first_alnum_character = 0
for idx, s in enumerate(text):
if s.isalnum():
first_alnum_character = idx
break
return text[first_alnum_character:].strip()
@functools.lru_cache
def generate_text_content(api_key, **kwargs):
"""
Inlcude previous prompt input separeted by \n\n
Examples:
"Create funny one sentence marketing text for new iPhone"
"Create marketing text for new iPhone model\n\nThe new iPhone model is the most powerful and \
sophisticated iPhone yet. It has a powerful A12 processor and a new design that is sleek and \
stylish. This phone is sure to revolutionize the smartphone industry.\n\ntranslate to spanish"
"""
try:
openai.api_key = api_key
response = openai.Completion.create(
model="text-davinci-002",
prompt=kwargs["prompt"],
max_tokens=256,
temperature=0.7,
top_p=1,
n=1,
stream=False,
logprobs=None,
echo=False,
)
res_dict = response.to_dict()
logger.info(f"Completion API completed with: {res_dict}")
if res_dict["choices"]:
res_text = res_dict["choices"][0].to_dict()["text"].strip()
return strip_non_alnum_from_text(res_text)
return None
except:
logger.exception("Completion api failed:")
return None
async def create_text(*args, **kwargs):
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as exc:
return await asyncio.get_running_loop().run_in_executor(
exc, functools.partial(generate_text_content, os.environ["OPENAI_KEY"], **kwargs)
)
@app.get("/textcontent")
async def create(prompt: str = ""):
logger.info(f"creating text using the following prompt: {repr(prompt)}")
result = await create_text(prompt=prompt)
return {'result': result}
| [] |
2024-01-10 | szemenyeim/AIRPGEnv | PPO~multiprocessing_env.py | # This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
from gym import spaces
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, map, reward, done, info = env.step(data)
if done:
ob, map = env.reset()
remote.send((ob, map, reward, done, info))
elif cmd == 'reset':
ob, map = env.reset()
remote.send((ob, map))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, map, rews, dones, infos = zip(*results)
try:
return np.stack(obs), np.stack(map), np.stack(rews), np.stack(dones), infos
except:
import pdb
pdb.set_trace ()
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
states = [remote.recv() for remote in self.remotes]
ob = [np.stack([i[0] for i in states]), np.stack([i[1] for i in states])]
return ob
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
def reset(self):
pass
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class VecFrameStack(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, num_stacks):
self.venv = venv
self.num_stacks = num_stacks
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.num_stacks, axis=0)
high = np.repeat(wos.high, self.num_stacks, axis=0)
self.observations = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
shape = self.observation_space.shape
obs, map, rews, news, infos = self.venv.step_wait()
self.observations[:, :-shape[0]] = self.observations[:, shape[0]:]
for (i, new) in enumerate(news):
if new:
self.observations[i] = 0
self.observations[:, -shape[0]:] = obs
return self.observations, map, rews, news, infos
def reset(self):
"""
Reset all environments
"""
shape = self.observation_space.shape
obs = self.venv.reset()
self.observations[...] = 0
self.observations[:, -shape[0]:] = obs
return self.observations
def close(self):
self.venv.close()
import torch
import gym
# Derived from
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py
class VecPyTorchFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack, device=None):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
self.shape_dim0 = wos.shape[0]
low = np.repeat(wos.low, self.nstack, axis=0)
high = np.repeat(wos.high, self.nstack, axis=0)
if device is None:
device = torch.device('cpu')
self.stacked_obs = torch.zeros((venv.num_envs,) + low.shape).to(device)
observation_space = gym.spaces.Box(
low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, map, rews, news, infos = self.venv.step_wait()
self.stacked_obs[:, :-self.shape_dim0] = \
self.stacked_obs[:, self.shape_dim0:]
for (i, new) in enumerate(news):
if new:
self.stacked_obs[i] = 0
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs, map, rews, news, infos
def reset(self):
obs = self.venv.reset()
self.stacked_obs.zero_()
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs
def close(self):
self.venv.close()
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
def reset(self):
obs = self.venv.reset()
obs1 = torch.from_numpy(obs[0]).float().to(self.device)
obs2 = torch.from_numpy(obs[1]).float().to(self.device)
return obs1, obs2
def step_async(self, actions):
actions = actions.squeeze(1).cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, map, reward, done, info = self.venv.step_wait()
obs = torch.from_numpy(obs).float().to(self.device)
map = torch.from_numpy(map).float().to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
return obs, map, reward, done, info
| [] |
2024-01-10 | szemenyeim/AIRPGEnv | A2C~multiprocessing_env.py | # This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
from gym import spaces
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, map, reward, done, info = env.step(data)
if done:
ob, map = env.reset()
remote.send((ob, map, reward, done, info))
elif cmd == 'reset':
ob, map = env.reset()
remote.send((ob, map))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, map, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(map), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
states = [remote.recv() for remote in self.remotes]
ob = [np.stack([i[0] for i in states]), np.stack([i[1] for i in states])]
return ob
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
def reset(self):
pass
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class VecFrameStack(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, num_stacks):
self.venv = venv
self.num_stacks = num_stacks
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.num_stacks, axis=0)
high = np.repeat(wos.high, self.num_stacks, axis=0)
self.observations = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
shape = self.observation_space.shape
obs, map, rews, news, infos = self.venv.step_wait()
self.observations[:, :-shape[0]] = self.observations[:, shape[0]:]
for (i, new) in enumerate(news):
if new:
self.observations[i] = 0
self.observations[:, -shape[0]:] = obs
return self.observations, map, rews, news, infos
def reset(self):
"""
Reset all environments
"""
shape = self.observation_space.shape
obs = self.venv.reset()
self.observations[...] = 0
self.observations[:, -shape[0]:] = obs
return self.observations
def close(self):
self.venv.close()
import torch
import gym
# Derived from
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py
class VecPyTorchFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack, device=None):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
self.shape_dim0 = wos.shape[0]
low = np.repeat(wos.low, self.nstack, axis=0)
high = np.repeat(wos.high, self.nstack, axis=0)
if device is None:
device = torch.device('cpu')
self.stacked_obs = torch.zeros((venv.num_envs,) + low.shape).to(device)
observation_space = gym.spaces.Box(
low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, map, rews, news, infos = self.venv.step_wait()
self.stacked_obs[:, :-self.shape_dim0] = \
self.stacked_obs[:, self.shape_dim0:]
for (i, new) in enumerate(news):
if new:
self.stacked_obs[i] = 0
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs, map, rews, news, infos
def reset(self):
obs = self.venv.reset()
self.stacked_obs.zero_()
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs
def close(self):
self.venv.close()
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
def reset(self):
obs = self.venv.reset()
obs1 = torch.from_numpy(obs[0]).float().to(self.device)
obs2 = torch.from_numpy(obs[1]).float().to(self.device)
return obs1, obs2
def step_async(self, actions):
actions = actions.squeeze(1).cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, map, reward, done, info = self.venv.step_wait()
obs = torch.from_numpy(obs).float().to(self.device)
map = torch.from_numpy(map).float().to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
return obs, map, reward, done, info
| [] |
2024-01-10 | szemenyeim/AIRPGEnv | PPO~vec_env.py | # This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| [] |
2024-01-10 | royaals/medicalbot | app2.py | from flask import Flask, render_template, request
import openai
from dotenv import load_dotenv
import os # Add this line
# Load environment variables
load_dotenv()
app = Flask(__name__) # Use __name__, not _name_
# Set up OpenAI API credentials
openai.api_key = os.getenv('OPENAI_API_KEY')
# Define the default route to return the index.html file
@app.route("/")
def index():
return render_template("index.html")
@app.route("/api", methods=["POST"])
def api():
# Get the message from the POST request
user_message = request.json.get("message")
# Define a system message to set context as medical
system_message = {
"role": "system",
"content": "You are strictly a medical chatbot. Do not provide information outside of the medical domain. If a question isn't medical, inform the user and ask for a medical question."
}
# Send the system message and user message to OpenAI's API and receive the response
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
system_message,
{"role": "user", "content": user_message}
]
)
response = completion.choices[0].message
return response
if __name__ == '__main__':
app.run()
else:
print("You can only ask about Medical Related Questions")
| [
"You are strictly a medical chatbot. Do not provide information outside of the medical domain. If a question isn't medical, inform the user and ask for a medical question."
] |
2024-01-10 | hiive/hiivemdptoolbox | hiive~mdptoolbox~example.py | # -*- coding: utf-8 -*-
"""Markov Decision Process (MDP) Toolbox: ``example`` module
=========================================================
The ``example`` module provides functions to generate valid MDP transition and
reward matrices.
Available functions
-------------------
:func:`~mdptoolbox.example.forest`
A simple forest management example
:func:`~mdptoolbox.example.rand`
A random example
:func:`~mdptoolbox.example.small`
A very small example
:func:`~mdptoolbox.example.openai`
A discrete OpenAI Gym environment
"""
# Copyright (c) 2011-2014 Steven A. W. Cordwell
# Copyright (c) 2009 INRA
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as _np
import scipy.sparse as _sp
from . import openai as openai_
def forest(S=3, r1=4, r2=2, p=0.1, is_sparse=False):
"""Generate a MDP example based on a simple forest management scenario.
This function is used to generate a transition probability
(``A`` × ``S`` × ``S``) array ``P`` and a reward (``S`` × ``A``) matrix
``R`` that model the following problem. A forest is managed by two actions:
'Wait' and 'Cut'. An action is decided each year with first the objective
to maintain an old forest for wildlife and second to make money selling cut
wood. Each year there is a probability ``p`` that a fire burns the forest.
Here is how the problem is modelled.
Let {0, 1 . . . ``S``-1 } be the states of the forest, with ``S``-1 being
the oldest. Let 'Wait' be action 0 and 'Cut' be action 1.
After a fire, the forest is in the youngest state, that is state 0.
The transition matrix ``P`` of the problem can then be defined as follows::
| p 1-p 0.......0 |
| . 0 1-p 0....0 |
P[0,:,:] = | . . 0 . |
| . . . |
| . . 1-p |
| p 0 0....0 1-p |
| 1 0..........0 |
| . . . |
P[1,:,:] = | . . . |
| . . . |
| . . . |
| 1 0..........0 |
The reward matrix R is defined as follows::
| 0 |
| . |
R[:,0] = | . |
| . |
| 0 |
| r1 |
| 0 |
| 1 |
R[:,1] = | . |
| . |
| 1 |
| r2 |
Parameters
---------
S : int, optional
The number of states, which should be an integer greater than 1.
Default: 3.
r1 : float, optional
The reward when the forest is in its oldest state and action 'Wait' is
performed. Default: 4.
r2 : float, optional
The reward when the forest is in its oldest state and action 'Cut' is
performed. Default: 2.
p : float, optional
The probability of wild fire occurence, in the range ]0, 1[. Default:
0.1.
is_sparse : bool, optional
If True, then the probability transition matrices will be returned in
sparse format, otherwise they will be in dense format. Default: False.
Returns
-------
out : tuple
``out[0]`` contains the transition probability matrix P and ``out[1]``
contains the reward matrix R. If ``is_sparse=False`` then P is a numpy
array with a shape of ``(A, S, S)`` and R is a numpy array with a shape
of ``(S, A)``. If ``is_sparse=True`` then P is a tuple of length ``A``
where each ``P[a]`` is a scipy sparse CSR format matrix of shape
``(S, S)``; R remains the same as in the case of ``is_sparse=False``.
Examples
--------
>>> import hiive.mdptoolbox.example
>>> P, R = hiive.mdptoolbox.example.forest()
>>> P
array([[[ 0.1, 0.9, 0. ],
[ 0.1, 0. , 0.9],
[ 0.1, 0. , 0.9]],
<BLANKLINE>
[[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 1. , 0. , 0. ]]])
>>> R
array([[ 0., 0.],
[ 0., 1.],
[ 4., 2.]])
>>> Psp, Rsp = hiive.mdptoolbox.example.forest(is_sparse=True)
>>> len(Psp)
2
>>> Psp[0]
<3x3 sparse matrix of type '<... 'numpy.float64'>'
with 6 stored elements in Compressed Sparse Row format>
>>> Psp[1]
<3x3 sparse matrix of type '<... 'numpy.int64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> Rsp
array([[ 0., 0.],
[ 0., 1.],
[ 4., 2.]])
>>> (Psp[0].todense() == P[0]).all()
True
>>> (Rsp == R).all()
True
"""
assert S > 1, "The number of states S must be greater than 1."
assert (r1 > 0) and (r2 > 0), "The rewards must be non-negative."
assert 0 <= p <= 1, "The probability p must be in [0; 1]."
# Definition of Transition matrix
if is_sparse:
P = []
rows = list(range(S)) * 2
cols = [0] * S + list(range(1, S)) + [S - 1]
vals = [p] * S + [1-p] * S
P.append(_sp.coo_matrix((vals, (rows, cols)), shape=(S, S)).tocsr())
rows = list(range(S))
cols = [0] * S
vals = [1] * S
P.append(_sp.coo_matrix((vals, (rows, cols)), shape=(S, S)).tocsr())
else:
P = _np.zeros((2, S, S))
P[0, :, :] = (1 - p) * _np.diag(_np.ones(S - 1), 1)
P[0, :, 0] = p
P[0, S - 1, S - 1] = (1 - p)
P[1, :, :] = _np.zeros((S, S))
P[1, :, 0] = 1
# Definition of Reward matrix
R = _np.zeros((S, 2))
R[S - 1, 0] = r1
R[:, 1] = _np.ones(S)
R[0, 1] = 0
R[S - 1, 1] = r2
return(P, R)
def _randDense(states, actions, mask):
"""Generate random dense ``P`` and ``R``. See ``rand`` for details.
"""
# definition of transition matrix : square stochastic matrix
P = _np.zeros((actions, states, states))
# definition of reward matrix (values between -1 and +1)
R = _np.zeros((actions, states, states))
for action in range(actions):
for state in range(states):
# create our own random mask if there is no user supplied one
if mask is None:
m = _np.random.random(states)
r = _np.random.random()
m[m <= r] = 0
m[m > r] = 1
elif mask.shape == (actions, states, states):
m = mask[action][state] # mask[action, state, :]
else:
m = mask[state]
# Make sure that there is atleast one transition in each state
if m.sum() == 0:
m[_np.random.randint(0, states)] = 1
P[action][state] = m * _np.random.random(states)
P[action][state] = P[action][state] / P[action][state].sum()
R[action][state] = (m * (2 * _np.random.random(states) -
_np.ones(states, dtype=int)))
return(P, R)
def _randSparse(states, actions, mask):
"""Generate random sparse ``P`` and ``R``. See ``rand`` for details.
"""
# definition of transition matrix : square stochastic matrix
P = [None] * actions
# definition of reward matrix (values between -1 and +1)
R = [None] * actions
for action in range(actions):
# it may be more efficient to implement this by constructing lists
# of rows, columns and values then creating a coo_matrix, but this
# works for now
PP = _sp.dok_matrix((states, states))
RR = _sp.dok_matrix((states, states))
for state in range(states):
if mask is None:
m = _np.random.random(states)
m[m <= 2/3.0] = 0
m[m > 2/3.0] = 1
elif mask.shape == (actions, states, states):
m = mask[action][state] # mask[action, state, :]
else:
m = mask[state]
n = int(m.sum()) # m[state, :]
if n == 0:
m[_np.random.randint(0, states)] = 1
n = 1
# find the columns of the vector that have non-zero elements
nz = m.nonzero()
if len(nz) == 1:
cols = nz[0]
else:
cols = nz[1]
vals = _np.random.random(n)
vals = vals / vals.sum()
reward = 2*_np.random.random(n) - _np.ones(n)
PP[state, cols] = vals
RR[state, cols] = reward
# PP.tocsr() takes the same amount of time as PP.tocoo().tocsr()
# so constructing PP and RR as coo_matrix in the first place is
# probably "better"
P[action] = PP.tocsr()
R[action] = RR.tocsr()
return(P, R)
def rand(S, A, is_sparse=False, mask=None):
"""Generate a random Markov Decision Process.
Parameters
----------
S : int
Number of states (> 1)
A : int
Number of actions (> 1)
is_sparse : bool, optional
False to have matrices in dense format, True to have sparse matrices.
Default: False.
mask : array, optional
Array with 0 and 1 (0 indicates a place for a zero probability), shape
can be ``(S, S)`` or ``(A, S, S)``. Default: random.
Returns
-------
out : tuple
``out[0]`` contains the transition probability matrix P and ``out[1]``
contains the reward matrix R. If ``is_sparse=False`` then P is a numpy
array with a shape of ``(A, S, S)`` and R is a numpy array with a shape
of ``(S, A)``. If ``is_sparse=True`` then P and R are tuples of length
``A``, where each ``P[a]`` is a scipy sparse CSR format matrix of shape
``(S, S)`` and each ``R[a]`` is a scipy sparse csr format matrix of
shape ``(S, 1)``.
Examples
--------
>>> import numpy, hiive.mdptoolbox.example
>>> numpy.random.seed(0) # Needed to get the output below
>>> P, R = hiive.mdptoolbox.example.rand(4, 3)
>>> P
array([[[ 0.21977283, 0.14889403, 0.30343592, 0.32789723],
[ 1. , 0. , 0. , 0. ],
[ 0. , 0.43718772, 0.54480359, 0.01800869],
[ 0.39766289, 0.39997167, 0.12547318, 0.07689227]],
<BLANKLINE>
[[ 1. , 0. , 0. , 0. ],
[ 0.32261337, 0.15483812, 0.32271303, 0.19983549],
[ 0.33816885, 0.2766999 , 0.12960299, 0.25552826],
[ 0.41299411, 0. , 0.58369957, 0.00330633]],
<BLANKLINE>
[[ 0.32343037, 0.15178596, 0.28733094, 0.23745272],
[ 0.36348538, 0.24483321, 0.16114188, 0.23053953],
[ 1. , 0. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ]]])
>>> R
array([[[-0.23311696, 0.58345008, 0.05778984, 0.13608912],
[-0.07704128, 0. , -0. , 0. ],
[ 0. , 0.22419145, 0.23386799, 0.88749616],
[-0.3691433 , -0.27257846, 0.14039354, -0.12279697]],
<BLANKLINE>
[[-0.77924972, 0. , -0. , -0. ],
[ 0.47852716, -0.92162442, -0.43438607, -0.75960688],
[-0.81211898, 0.15189299, 0.8585924 , -0.3628621 ],
[ 0.35563307, -0. , 0.47038804, 0.92437709]],
<BLANKLINE>
[[-0.4051261 , 0.62759564, -0.20698852, 0.76220639],
[-0.9616136 , -0.39685037, 0.32034707, -0.41984479],
[-0.13716313, 0. , -0. , -0. ],
[ 0. , -0. , 0.55810204, 0. ]]])
>>> numpy.random.seed(0) # Needed to get the output below
>>> Psp, Rsp = mdptoolbox.example.rand(100, 5, is_sparse=True)
>>> len(Psp), len(Rsp)
(5, 5)
>>> Psp[0]
<100x100 sparse matrix of type '<... 'numpy.float64'>'
with 3296 stored elements in Compressed Sparse Row format>
>>> Rsp[0]
<100x100 sparse matrix of type '<... 'numpy.float64'>'
with 3296 stored elements in Compressed Sparse Row format>
>>> # The number of non-zero elements (nnz) in P and R are equal
>>> Psp[1].nnz == Rsp[1].nnz
True
"""
# making sure the states and actions are more than one
assert S > 1, "The number of states S must be greater than 1."
assert A > 1, "The number of actions A must be greater than 1."
# if the user hasn't specified a mask, then we will make a random one now
if mask is not None:
# the mask needs to be SxS or AxSxS
try:
assert mask.shape in ((S, S), (A, S, S)), (
"'mask' must have dimensions S×S or A×S×S."
)
except AttributeError:
raise TypeError("'mask' must be a numpy array or matrix.")
# generate the transition and reward matrices based on S, A and mask
if is_sparse:
P, R = _randSparse(S, A, mask)
else:
P, R = _randDense(S, A, mask)
return(P, R)
def small():
"""A very small Markov decision process.
The probability transition matrices are::
| | 0.5 0.5 | |
| | 0.8 0.2 | |
P = | |
| | 0.0 1.0 | |
| | 0.1 0.9 | |
The reward matrix is::
R = | 5 10 |
| -1 2 |
Returns
=======
out : tuple
``out[0]`` is a numpy array of the probability transition matriices.
``out[1]`` is a numpy arrray of the reward matrix.
Examples
========
>>> import hiive.mdptoolbox.example
>>> P, R = hiive.mdptoolbox.example.small()
>>> P
array([[[ 0.5, 0.5],
[ 0.8, 0.2]],
<BLANKLINE>
[[ 0. , 1. ],
[ 0.1, 0.9]]])
>>> R
array([[ 5, 10],
[-1, 2]])
"""
P = _np.array([[[0.5, 0.5], [0.8, 0.2]], [[0, 1], [0.1, 0.9]]])
R = _np.array([[5, 10], [-1, 2]])
return P, R
def openai(env_name:str, render:bool=False, **kwargs):
"""
Generate a MDPToolbox-formatted version of a *discrete* OpenAI Gym environment.
You can find the list of available gym environments here: https://gym.openai.com/envs/#classic_control
You'll have to look at the source code of the environments for available kwargs; as it is not well documented.
This function is used to generate a transition probability
(``A`` × ``S`` × ``S``) array ``P`` and a reward (``S`` × ``A``) matrix
``R``.
Parameters
---------
env_name : str
The name of the Open AI gym environment to model.
render : bool
Flag to render the environment via gym's `render()` function.
Returns
-------
out : tuple
``out[0]`` contains the transition probability matrix P and ``out[1]``
contains the reward matrix R.
Examples
--------
>>> import hiive.mdptoolbox.example
>>> from gym.envs.toy_text.frozen_lake import generate_random_map
>>> random_map = generate_random_map(size=10, p=0.98)
>>> P, R = hiive.mdptoolbox.example.openai("FrozenLake-v0", desc=random_map)
>>> P
array([[[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
<BLANKLINE>
[0., 0., 0., ..., 1., 0., 0.],
[0., 0., 0., ..., 0., 1., 0.],
[0., 0., 0., ..., 0., 0., 1.]]])
>>> R
array([[ -1., -1., -1., -1., -1., -10.],
[ -1., -1., -1., -1., -1., -10.],
[ -1., -1., -1., -1., -1., -10.],
...,
[ -1., -1., -1., -1., -10., -10.],
[ -1., -1., -1., -1., -10., -10.],
[ -1., -1., -1., -1., -10., -10.]])
>>> P, R = hiive.mdptoolbox.example.openai("Taxi-v3", True)
+---------+
|R: | : :G|
| : | : : |
| : : : : |
| | : | : |
|Y| : |B: |
+---------+`
"""
env = openai_.OpenAI_MDPToolbox(env_name, render, **kwargs)
return env.P, env.R
| [] |
2024-01-10 | metric-space-ai/octopus_function | information_retrieval.py | import os
os.environ["FLASK_ENV"] = "development"
dependencies = [
'pip install -q torch==2.0.1 --index-url https://download.pytorch.org/whl/cu118',
'pip install -q torchvision==0.15.2 --index-url https://download.pytorch.org/whl/cu118',
'pip install -q torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118',
'pip install -q transformers==4.34.0',
'pip install -q langchain==0.0.326',
'pip install -q flask==3.0.0',
'pip install -q pypdf==3.17.0',
'pip install -q cython==3.0.5',
'pip install -q sentence_transformers==2.2.2',
'pip install -q chromadb==0.4.15',
'pip install -q accelerate==0.23.0',
'pip install -q sentencepiece==0.1.99',
'pip install -q pyngrok==7.0.0',
'pip install -q gdown==4.7.1'
]
for command in dependencies:
os.system(command)
# ---------------------------------------------------
# creating the configuration script
# ---------------------------------------------------
config_str = '''
{
"device_map": {
"cuda:0": "20GiB",
"cuda:1": "20GiB",
"cpu": "30GiB"
},
"required_python_version": "cp311",
"models": [
{
"key": "information_source.zip",
"name": "information_source",
"access_token": "https://drive.google.com/uc?id=1O5gQKwcYA_7JzQr8JmhmonVwTov_cWlL"
},
{
"key": "BAAI/bge-small-en",
"name": "embeddings_model",
"access_token": "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"
},
{
"key": "meta-llama/Llama-2-7b-chat-hf",
"name": "llama_model",
"access_token": "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"
}
],
"functions": [
{
"name": "QueryContent",
"description": "When a user queries something, the AI first search into the chroma database to search for answers",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Answer to the query to search from the database"
}
},
"required": ["prompt"]
},
"input_type": "json",
"return_type": "application/json"
}
]
}
'''
# ---------------------------------------------------
# importing the reqiored libraries
# ---------------------------------------------------
import json
import time
import gdown
import torch
import zipfile
import textwrap
import requests
import threading
from pyngrok import ngrok
from flask import Flask, request, jsonify
from transformers import pipeline
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFacePipeline
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
class PROMPT_CONFIG:
def __init__(self):
# ---------------------------------------------------
# forming the LLaMA-2 prompt style
# ---------------------------------------------------
self.B_INST, self.E_INST = "[INST]", "[/INST]"
self.B_SYS, self.E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
self.DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
self.SYS_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible using the context text provided. Your answers should only answer the question once and not have any text after the answer is done.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. """
self.INSTRUCTION = """CONTEXT:/n/n {context}/n
Question: {question}"""
SYSTEM_PROMPT = self.B_SYS + self.DEFAULT_SYSTEM_PROMPT + self.E_SYS
self.prompt_template = self.B_INST + SYSTEM_PROMPT + self.INSTRUCTION + self.E_INST
llama_prompt = PromptTemplate(template=self.prompt_template, input_variables=["context", "question"])
self.chain_type_kwargs = {"prompt": llama_prompt}
constants = PROMPT_CONFIG()
class DataManager:
def __init__(self, key, name, token):
extension = key.split(".")[-1]
self.file = f"{name}.{extension}"
gdown.download(token, output=self.file, quiet=False, fuzzy=True)
#extract file
with zipfile.ZipFile(self.file, 'r') as zip_ref:
zip_ref.extractall(name)
# ---------------------------------------------------
# chat completion functions
# ---------------------------------------------------
def wrap_text_preserve_newlines(text, width=110):
# Split the input text into lines based on newline characters
lines = text.split('\n')
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
def process_llm_response(llm_response):
result = wrap_text_preserve_newlines(llm_response['result'])
query = llm_response['query']
unique_sources = set()
for source in llm_response["source_documents"]:
unique_sources.add(source.metadata['source'])
sources = '\n\nSources: '
for source in unique_sources:
sources = sources + source
return {"query": query, "result": result, "sources": sources}
# ---------------------------------------------------
# create the model manager class
# ---------------------------------------------------
class ModelManager:
def __init__(self, config):
self.config = config
self.models = {}
self.device = self.select_device()
def select_device(self):
if not torch.cuda.is_available():
return "cpu"
device_map = self.config.get('device_map', {})
available_devices = list(device_map.keys())
return available_devices[0] if available_devices else "cpu"
def setup(self):
self.models.clear()
# form the model in sync for the query retrieval
for model_info in self.config["models"]:
if model_info["name"] == 'information_source':
DataManager(model_info["key"], model_info["name"], model_info["access_token"])
# get the embeddings model to embed the pdfs in the folder
if model_info["name"] == 'embeddings_model':
loader = DirectoryLoader(f"{self.config['models'][0]['name']}", glob="./*.pdf", recursive=True, loader_cls=PyPDFLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
embed_model = HuggingFaceBgeEmbeddings(model_name=model_info["key"],
model_kwargs={'device': self.device},
encode_kwargs={'normalize_embeddings': True},
query_instruction="Generate a representation for this sentence for retrieving related articles: ")
vectordb = Chroma.from_documents(documents=texts, embedding=embed_model, persist_directory='db')
retriever = vectordb.as_retriever(search_kwargs={"k": 5})
self.models[model_info["name"]] = retriever
elif model_info['name'] == 'llama_model':
torch.cuda.empty_cache()
#int(self.select_device()[-1])
pipe = pipeline("text-generation", model=model_info["key"], max_length=2048, temperature=0.75, top_p=0.95, repetition_penalty=1.2, token=model_info["access_token"])
llm = HuggingFacePipeline(pipeline=pipe)
qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=self.models['embeddings_model'], chain_type_kwargs=constants.chain_type_kwargs, return_source_documents=True)
self.models[model_info["name"]] = qa_chain
return True
def infer(self, parameters):
try:
### BEGIN USER EDITABLE SECTION ###
query_model = self.models["llama_model"]
llm_response = query_model(parameters['prompt'])
torch.cuda.empty_cache() if self.device != "cpu" else None
#llm_response = process_llm_response(llm_response)
return llm_response
### END USER EDITABLE SECTION ###
except Exception as e:
print(f"Error during inference: {e}")
return None
# ---------------------------------------------------
# load configurations for the program block
# ---------------------------------------------------
config = json.loads(config_str)
model_manager = ModelManager(config)
# ---------------------------------------------------
# make the ap and the corresponding function
# ---------------------------------------------------
app = Flask(__name__)
# startup the application
# ---------------------------------------------------
@app.route('/setup', methods=['GET'])
def setup():
model_manager.setup()
return jsonify({"status": "models loaded successfully"})
@app.route('/<function_name>', methods=['POST'])
def generic_route(function_name):
function_config = next((f for f in config["functions"] if f["name"] == function_name), None)
if not function_config:
return jsonify({"error": "Invalid endpoint"}), 404
if function_config["input_type"] != "json":
return jsonify({"error": f"Unsupported input type {function_config['input_type']}"}), 400
data = request.json
parameters = {k: data[k] for k in function_config["parameters"]["properties"].keys() if k in data}
result = model_manager.infer(parameters)
result = process_llm_response(result)
if result:
return jsonify(result), 200
#return app.response_class(result, content_type=function_config["return_type"])
else:
return jsonify({"error": "Error during inference"}), 500
@app.errorhandler(Exception)
def handle_exception(e):
# Generic exception handler
return jsonify(error=str(e)), 500
# Start the Flask server in a new thread
threading.Thread(target=app.run, kwargs={"use_reloader": False}).start()
# Set up Ngrok to create a tunnel to the Flask server
public_url = ngrok.connect(5000).public_url
function_names = [func['name'] for func in config["functions"]]
print(f" * ngrok tunnel \"{public_url}\" -> \"http://127.0.0.1:{5000}/\"")
# Loop over function_names and print them
for function_name in function_names:
time.sleep(5)
print(f'Endpoint here: {public_url}/{function_name}')
BASE_URL = f"{public_url}"
### BEGIN USER EDITABLE SECTION ###
def setup_test():
response = requests.get(f"{BASE_URL}/setup")
# Check if the request was successful
if response.status_code == 200:
return (True, response.json()) # True indicates success
else:
return (False, response.json()) # False indicates an error
def infer_test(prompt="Tell me about music"):
# create prompt
prompt = "Question: " + prompt + " Answer:"
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": prompt
}
response = requests.post(f"{BASE_URL}/QueryContent", headers=headers, json=data)
if response.status_code == 200:
# Save the image to a file
dict_response = response.json()
with open("output_text.txt", "w") as file:
file.write(str(dict_response['result']))
file.write(str(dict_response['sources']))
print("Answer saved as output_text.txt!")
return (True, response.json()) # True indicates success
else:
return (False, response.json()) # False indicates an error
def infer_test_url(prompt="which city is this?"):
# create promt
prompt = "Question: " + prompt + " Answer:"
headers = {
"Content-Type": "application/json"
}
data = {
"prompt": prompt
}
response = requests.post(f"{BASE_URL}/QueryContent", headers=headers, json=data)
if response.status_code == 200:
dict_response = response.json()
with open("output_text.txt", "w") as file:
file.write(str(dict_response['result']))
file.write(str(dict_response['sources']))
print("Answer saved as output_text.txt!")
return (True, response.json()) # True indicates success
else:
return (False, response.json()) # False indicates an error
### END USER EDITABLE SECTION ###
# Testing
result_setup = setup_test()
result_infer = infer_test()
print(result_infer)
result_infer_url = infer_test_url("Tell me about books")
print(result_infer_url)
| [
"question",
"context",
"Question: PLACEHOLDER Answer:"
] |
2024-01-10 | Quentin-Anthony/NeMo | nemo~collections~nlp~modules~common~megatron~transformer.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
import math
from contextlib import nullcontext
from typing import Any, Callable, Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
InfusedAdapterConfig,
MLPInfusedAdapterConfig,
ParallelLinearAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.fused_bias_dropout_add import (
bias_dropout_add,
bias_dropout_add_fused_inference,
bias_dropout_add_fused_train,
dropout_add,
)
from nemo.collections.nlp.modules.common.megatron.fused_bias_geglu import fused_bias_geglu
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.fused_softmax import MatchedScaleMaskSoftmax
from nemo.collections.nlp.modules.common.megatron.layer_norm_1p import LayerNorm1P
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.rotary_pos_embedding import apply_rotary_pos_emb
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, attention_mask_func, erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.core import adapter_mixins
from nemo.utils import logging
try:
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.enums import AttnMaskType, AttnType, ModelType
from apex.transformer.utils import divide as safe_divide
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.normalization import MixedFusedRMSNorm
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
try:
from transformer_engine.pytorch import TransformerLayer, fp8_autocast
from transformer_engine.common import recipe
from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint
HAVE_TE = True
except:
HAVE_TE = False
# fake missing class
class TransformerLayer(ApexGuardDefaults):
def __init__(self):
super().__init__()
logging.warning(
"Transformer Engine was not found. transformer_engine.pytorch.transformer.TransformerLayer will not work. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
"""
class ParallelMLP(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(
self,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
gradient_accumulation_fusion=False,
dropout=0.0,
):
super(ParallelMLP, self).__init__()
self.activation = activation
self.bias = bias
self.transformer_block_type = transformer_block_type
self.normalization = normalization
self.layernorm_epsilon = layernorm_epsilon
self.persist_layer_norm = persist_layer_norm
self.activation = activation
self.dropout = dropout
self.set_accepted_adapter_types([MLPInfusedAdapterConfig._target_])
if activation not in ['gelu', 'geglu', 'reglu', 'swiglu']:
raise ValueError(f"Activation {activation} not supported. Only gelu, geglu, reglu, swiglu are supported.")
no_async_tensor_model_parallel_allreduce = (
parallel_state.get_tensor_model_parallel_world_size() == 1 or sequence_parallel
)
# Project to 4h.
self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using geglu, divide ffn dim by 2/3 to keep overall params the same.
gather_output=False,
init_method=init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
if activation in ['geglu', 'reglu', 'swiglu']:
# Separate linear layer for *GLU activations.
# Source: https://github.com/huggingface/transformers/blob/bee361c6f1f7704f8c688895f2f86f6e5ff84727/src/transformers/models/t5/modeling_t5.py#L292
self.dense_h_to_4h_2 = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using *glu, divide ffn dim by 2/3 to keep overall params the same.
gather_output=False,
init_method=init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.glu_activation_family = activation in ['geglu', 'reglu', 'swiglu']
bias_activation_fusion_unavailable = activation in ['reglu', 'swiglu']
if bias_activation_fusion_unavailable and bias_activation_fusion:
raise ValueError(
f"Cannot use bias_activation_fusion with {activation} activation. Please turn bias gelu fusion off."
)
if self.glu_activation_family and onnx_safe and self.bias_activation_fusion:
raise ValueError(
f"Cannot use onnx_safe with specificed activation function and bias_activation_fusion : {activation} Please turn onnx safe off."
)
if bias_activation_fusion and not bias:
raise ValueError(
f"Cannot use bias_activation_fusion without bias terms. Please set bias=True or bias_activation_fusion=False."
)
self.bias_activation_fusion = bias_activation_fusion
# Give openai_gelu precedence over other activations if set, for HF compatibility. Normally this is off and shouldn't affect regular model training.
if openai_gelu:
self.activation_func = openai_gelu_func
elif activation in ["gelu", "geglu"]:
self.activation_func = F.gelu
elif onnx_safe:
self.activation_func = erf_gelu
elif activation == "reglu":
self.activation_func = F.relu
elif activation == "swiglu":
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
self.activation_func = F.silu
# Project back to h.
self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
ffn_hidden_size,
hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.normalization = get_layer_norm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon, persist_layer_norm
)
elif normalization == 'layernorm1p':
self.normalization = LayerNorm1P(
ffn_hidden_size // get_tensor_model_parallel_world_size(),
layernorm_epsilon,
sequence_parallel_enabled=sequence_parallel,
)
else:
self.normalization = MixedFusedRMSNorm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if self.glu_activation_family:
intermediate_parallel_2, bias_parallel_2 = self.dense_h_to_4h_2(hidden_states)
if self.bias_activation_fusion:
if self.activation == 'gelu':
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
elif self.activation == 'geglu':
intermediate_parallel = fused_bias_geglu(
intermediate_parallel, bias_parallel, intermediate_parallel_2, bias_parallel_2
)
elif self.activation in ['reglu', 'swiglu'] or (
self.glu_activation_family and not self.bias_activation_fusion
):
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel) * (
intermediate_parallel_2 + bias_parallel_2
)
else:
intermediate_parallel = self.activation_func(intermediate_parallel) * intermediate_parallel_2
else:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
else:
intermediate_parallel = self.activation_func(intermediate_parallel)
if self.dropout > 0:
intermediate_parallel = F.dropout(intermediate_parallel, p=self.dropout, training=self.training)
infused_adapter = self.get_from_adapter_layer(AdapterName.MLP_INFUSED)
if infused_adapter:
intermediate_parallel = infused_adapter(intermediate_parallel)
# Normformer normalization
if self.transformer_block_type == 'normformer':
intermediate_parallel = self.normalization(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class SwitchMLP(MegatronModule):
"""Top-1 MoE
Curently supports Sinkhorn based expert routing."""
def __init__(
self,
num_experts,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
gradient_accumulation_fusion=False,
dropout=0.0,
):
super(SwitchMLP, self).__init__()
self.num_experts = num_experts
self.route_algo = SwitchMLP.sinkhorn
self.router = tensor_parallel.RowParallelLinear(
hidden_size,
num_experts,
input_is_parallel=False,
init_method=init_method,
skip_bias_add=False,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
mlp_args = {
'init_method': init_method,
'output_layer_init_method': output_layer_init_method,
'hidden_size': hidden_size,
'ffn_hidden_size': ffn_hidden_size,
'use_cpu_initialization': use_cpu_initialization,
'bias_activation_fusion': bias_activation_fusion,
'openai_gelu': openai_gelu,
'onnx_safe': onnx_safe,
'activation': activation,
'bias': bias,
'transformer_block_type': transformer_block_type,
'normalization': normalization,
'layernorm_epsilon': layernorm_epsilon,
'persist_layer_norm': persist_layer_norm,
'sequence_parallel': sequence_parallel,
'gradient_accumulation_fusion': gradient_accumulation_fusion,
'dropout': dropout,
}
self.experts = torch.nn.ModuleList([ParallelMLP(**mlp_args) for _ in range(num_experts)])
def forward(self, hidden_states):
hidden_shape = hidden_states.shape
route, _ = self.router(hidden_states)
route = route.view(-1, self.num_experts)
if self.training:
with torch.no_grad():
norm_route = self.route_algo(
route.detach().to(dtype=torch.float32)
) # explicit fp32 conversion for stability
_, max_ind = torch.max(norm_route, dim=1)
route = torch.sigmoid(route)
max_prob = route[torch.arange(route.size(0)), max_ind]
else:
route = torch.sigmoid(route)
max_prob, max_ind = torch.max(route, dim=1)
max_prob = torch.unsqueeze(max_prob, 1)
hidden_states = hidden_states.view(-1, hidden_shape[-1])
local_indices = (max_ind == 0).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = self.experts[0](hidden)
output_bias = output_bias.expand_as(output)
output_total = torch.empty_like(hidden_states, dtype=output.dtype)
output_bias_total = torch.empty_like(hidden_states, dtype=output_bias.dtype)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
for expert_num, expert in enumerate(self.experts):
if expert_num == 0:
continue
local_indices = (max_ind == expert_num).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = expert(hidden)
output_bias = output_bias.expand_as(output)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
output_total = output_total * max_prob
output_bias_total = output_bias_total * max_prob
output_total = output_total.view(hidden_shape)
output_bias_total = output_bias_total.view(hidden_shape)
return output_total, output_bias_total
@classmethod
def sinkhorn(cls, cost, tol=0.0001):
"Megatron-LMs sinkhorn implementation"
cost = torch.exp(cost)
d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype)
d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype)
eps = 0.00000001
error = 1e9
d1_old = d1
while error > tol:
d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps)
d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps)
error = torch.mean(torch.abs(d1_old - d1))
d1_old = d1
return d1 * cost * d0.unsqueeze(1)
class CoreAttention(MegatronModule):
""" Region where selective activation recomputation is applied.
See Figure 3. in Reducing Activation Recomputation in Large Transformer Models
https://arxiv.org/pdf/2205.05198.pdf for more details.
"""
def __init__(
self,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
masked_softmax_fusion=True,
attention_dropout=0.1,
sequence_parallel=False,
normalize_attention_scores=True,
):
super(CoreAttention, self).__init__()
self.precision = precision
self.fp16 = precision == 16
self.bf16 = precision == 'bf16'
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = False
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.sequence_parallel = sequence_parallel
# If True, will scale attention scores by 1 / sqrt(hidden_size_per_attention_head).
# This arg is been provided mostly to support weight conversion of Huggingface models. (ex: T5v1.1)
self.normalize_attention_scores = normalize_attention_scores
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = safe_divide(projection_size, world_size)
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = MatchedScaleMaskSoftmax(
self.fp16,
self.bf16,
self.attn_mask_type,
masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(attention_dropout)
def forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=None,
get_key_value=False,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
# TODO: figure out how to do this
# apply relative positional encoding (rotary embedding)
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb)
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb)
# TODO, can apply positional embedding to value_layer so it has
# absolute positional embedding.
# otherwise, only relative positional embedding takes effect
# value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
# preallocting input tensor: [b * np, sq, sk]
matmul_input_buffer = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor) if self.normalize_attention_scores else 1.0,
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
if relative_position_bias is not None:
attention_scores += relative_position_bias[
:,
self.num_attention_heads_partition_offset : self.num_attention_heads_partition_offset
+ self.num_attention_heads_per_partition,
: attention_scores.size(2),
: attention_scores.size(3),
]
# ==================================================
# Update attention mask for inference. [b, np, sq, sk]
# ==================================================
if get_key_value:
with torch.no_grad():
if layer_past is not None:
attention_mask = attention_mask[
..., attention_scores.size(3) - 1, : attention_scores.size(3)
].unsqueeze(2)
else:
attention_mask = attention_mask[..., : attention_scores.size(3), : attention_scores.size(3)]
# ===========================
# Attention probs and dropout
# ===========================
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.random.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
# change view [sk, b * np, hn]
value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
if headscale_tensor is not None:
context_layer = context_layer * headscale_tensor
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class ParallelAttention(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [s, b, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
use_cpu_initialization=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
layer_type=None,
megatron_legacy=False,
bias=True,
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
gradient_accumulation_fusion=False,
normalize_attention_scores=True,
):
super(ParallelAttention, self).__init__()
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.normalize_attention_scores = normalize_attention_scores
self.megatron_legacy = megatron_legacy
self.set_accepted_adapter_types([InfusedAdapterConfig._target_])
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
projection_size = kv_channels * num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = safe_divide(projection_size, num_attention_heads)
self.num_attention_heads_per_partition = safe_divide(num_attention_heads, world_size)
self.num_attention_heads_partition_offset = (
self.num_attention_heads_per_partition * parallel_state.get_tensor_model_parallel_rank()
)
no_async_tensor_model_parallel_allreduce = (
parallel_state.get_tensor_model_parallel_world_size() == 1 or sequence_parallel
)
# Strided linear layer.
if attention_type == AttnType.self_attn:
self.query_key_value = tensor_parallel.ColumnParallelLinear(
hidden_size,
3 * projection_size,
gather_output=False,
init_method=init_method,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
else:
assert attention_type == AttnType.cross_attn
self.query = tensor_parallel.ColumnParallelLinear(
hidden_size,
projection_size,
gather_output=False,
init_method=init_method,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.key_value = tensor_parallel.ColumnParallelLinear(
hidden_size,
2 * projection_size,
gather_output=False,
init_method=init_method,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
no_async_tensor_model_parallel_allreduce=no_async_tensor_model_parallel_allreduce,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.core_attention = CoreAttention(
layer_number=self.layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=self.attention_type,
attn_mask_type=self.attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
sequence_parallel=sequence_parallel,
normalize_attention_scores=normalize_attention_scores,
)
# Output.
self.dense = tensor_parallel.RowParallelLinear(
projection_size,
hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
use_cpu_initialization=use_cpu_initialization,
bias=bias,
sequence_parallel_enabled=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
self.headscale = headscale
if headscale:
self.head_scale_tensor = torch.nn.Parameter(
torch.ones(1, self.num_attention_heads_per_partition, 1, 1), requires_grad=True
)
# Inference key-value memory
self.inference_key_memory = None
self.inference_value_memory = None
self.inference_current_sequence_len = 0
# relative position embedding
self.layer_type = layer_type
def _checkpointed_attention_forward(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=None,
relative_position_bias=None,
headscale_tensor=None,
):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
if len(inputs) == 7:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = inputs[4]
relative_position_bias = inputs[5]
headscale_tensor = inputs[6]
elif len(inputs) == 8:
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5])
relative_position_bias = inputs[6]
headscale_tensor = inputs[7]
else:
raise ValueError('unexpected number of inputs')
output_ = self.core_attention(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=headscale_tensor,
)
return output_
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1])
hidden_states = tensor_parallel.checkpoint(
custom_forward,
False,
query_layer,
key_layer,
value_layer,
attention_mask,
*rot_tuple,
relative_position_bias,
headscale_tensor,
)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size, dtype):
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
dtype=dtype,
device=torch.cuda.current_device(),
)
def _transpose_last_dim(self, mixed_layer, num_splits, num_splits_first):
input_shape = mixed_layer.size()
if num_splits_first:
"""[s, b, num_splits * np * hn]
-->(view) [s, b, num_splits, np, hn]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
num_splits,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-2, -3).contiguous()
else:
"""[s, b, np * hn * num_splits]
-->(view) [s, b, np, hn, num_splits]
-->(tranpose) [s, b, np, num_splits, hn]
-->(view) [s, b, np * num_splits * hn] """
intermediate_shape = input_shape[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
num_splits,
)
mixed_layer = mixed_layer.view(*intermediate_shape)
mixed_layer = mixed_layer.transpose(-1, -2).contiguous()
mixed_layer = mixed_layer.view(*input_shape)
return mixed_layer
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # rotary positional embedding
relative_position_bias=None,
checkpoint_core_attention=False,
):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
if set_inference_key_value_memory:
assert inference_max_sequence_len and inference_max_sequence_len > 0
self.inference_key_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype
)
self.inference_value_memory = self._allocate_memory(
inference_max_sequence_len, hidden_states.size(1), hidden_states.dtype
)
self.inference_current_sequence_len = 0
# Some consistency check.
if inference_max_sequence_len:
assert self.inference_current_sequence_len < self.inference_key_memory.size(0)
assert inference_max_sequence_len == self.inference_key_memory.size(0)
# This is added for safety. In case inference_max_sequence_len
# is not provided, make sure there is no potential memory left
# from previous inference.
if not inference_max_sequence_len:
self.inference_key_memory = None
self.inference_value_memory = None
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_x_layer = self._transpose_last_dim(mixed_x_layer, 3, True)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(query_layer, key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_x_layer, 3)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
if self.megatron_legacy:
mixed_kv_layer = self._transpose_last_dim(mixed_kv_layer, 2, True)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(key_layer, value_layer) = tensor_parallel.split_tensor_along_last_dim(mixed_kv_layer, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
if self.is_adapter_available():
key_infused_adapter = self.get_from_adapter_layer(AdapterName.KEY_INFUSED)
value_infused_adapter = self.get_from_adapter_layer(AdapterName.VALUE_INFUSED)
if key_infused_adapter:
assert value_infused_adapter is not None, "Expected value_infused_adapter not found!"
kls = key_layer.shape
key_layer = key_infused_adapter(key_layer.reshape(kls[0], kls[1], -1)).reshape(kls)
if value_infused_adapter:
assert key_infused_adapter is not None, "Expected key_infused_adapter not found!"
vls = value_layer.shape
value_layer = value_infused_adapter(value_layer.reshape(vls[0], vls[1], -1)).reshape(vls)
# ===================================================
# Adjust key, value, and attention mask for inference
# ===================================================
# duplicate the pos_emb for self attention
if rotary_pos_emb is not None:
rotary_pos_emb = rotary_pos_emb if isinstance(rotary_pos_emb, tuple) else ((rotary_pos_emb,) * 2)
if inference_max_sequence_len:
# Adjust the range variables.
start = self.inference_current_sequence_len
self.inference_current_sequence_len += key_layer.size(0)
end = self.inference_current_sequence_len
# Copy key and values.
self.inference_key_memory[start:end, ...] = key_layer
self.inference_value_memory[start:end, ...] = value_layer
key_layer = self.inference_key_memory[:end, ...]
value_layer = self.inference_value_memory[:end, ...]
# Adjust attention mask
attention_mask = attention_mask[..., start:end, :end]
# adjust the key rotary positional embedding
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
if not set_inference_key_value_memory:
# In inference, we compute one token at a time.
# Select the correct positional embedding.
q_pos_emb = q_pos_emb[end - 1 : end]
k_pos_emb = k_pos_emb[:end, :, :, :]
rotary_pos_emb = (q_pos_emb, k_pos_emb)
if layer_past is not None:
past_key, past_value = layer_past
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=0)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=0)
if get_key_value:
present = (key_layer, value_layer)
if checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer,
key_layer,
value_layer,
attention_mask,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
)
else:
context_layer = self.core_attention(
query_layer,
key_layer,
value_layer,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
rotary_pos_emb=rotary_pos_emb,
relative_position_bias=relative_position_bias,
headscale_tensor=self.head_scale_tensor if self.headscale else None,
)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
if get_key_value:
output = [output, present]
return output, bias
class ParallelChunkedCrossAttention(MegatronModule):
"""Parallel chunked cross-attention layer class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
num_attention_heads,
hidden_size,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
use_cpu_initialization=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
megatron_legacy=False,
chunk_size=64, # each chunk, how many tokens
bias=True,
headscale=False,
gradient_accumulation_fusion=False,
normalize_attention_scores=True,
):
super(ParallelChunkedCrossAttention, self).__init__()
self.cross_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
self.chunk_size = chunk_size
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None,
checkpoint_core_attention=False,
):
if checkpoint_core_attention:
raise ValueError(
'checkpoint_core_attention during forward not implemented yet for ParallelChunkedCrossAttention'
)
# hidden_states is assumed to have dimension [token length, batch, dimension]
# derive variables
# encoder_output here is the retrieved context
context = encoder_output
# context is assumed to have dimension [num_chunks, num_neighbors, context_token_len, batch, dimension]
chunk_size = self.chunk_size
b, n, dim = (
hidden_states.shape[1],
hidden_states.shape[0],
hidden_states.shape[2],
)
default_bias = self.cross_attention.dense.bias
if set_inference_key_value_memory:
seq_index = (n // chunk_size) * chunk_size
self.current_len = n
elif inference_max_sequence_len is not None:
# only handles single token increment
assert n == 1
self.current_len += n
token_pos = (self.current_len - 1) % chunk_size
chunk_id = self.current_len // chunk_size
if chunk_id <= 0:
# if sequence length less than chunk size, do an early return
return torch.zeros_like(hidden_states), default_bias
causal_padding = chunk_size - 1
# pad it as a full chunk, put it at the end of the chunk position
hidden_states = F.pad(hidden_states, (0, 0, 0, 0, causal_padding, 0), value=0.0)
# only use the relevant context
context = context[chunk_id - 1 : chunk_id, :, :, :, :]
attention_mask = rearrange(attention_mask, '(b k) 1 q v -> b k 1 q v', b=b)
# select the relevant chunk attn mask
attention_mask = attention_mask[:, chunk_id - 1]
seq_index = chunk_size
else:
# this is normal forward without inference
seq_index = (n // chunk_size) * chunk_size
# if sequence length less than chunk size, do an early return
if n < self.chunk_size and set_inference_key_value_memory and inference_max_sequence_len is not None:
return torch.zeros_like(hidden_states), default_bias
num_chunks, num_retrieved = (
context.shape[-5],
context.shape[-4],
)
# causal padding
causal_padding = chunk_size - 1
x = F.pad(hidden_states, (0, 0, 0, 0, -causal_padding, causal_padding), value=0.0)
# remove sequence which is ahead of the neighbors retrieved (during inference)
# seq_index = (n // chunk_size) * chunk_size
x, x_remainder = x[:seq_index], x[seq_index:]
seq_remain_len = x_remainder.shape[0]
# take care of rotary positional embedding
# make sure queries positions are properly shifted to the future
q_pos_emb, k_pos_emb = rotary_pos_emb
# currently implementation is broken
# q need to extend to causal_padding, and just do
# q_pos_emb = F.pad(q_pos_emb, (0, 0, -causal_padding, 0), value = 0.)
if inference_max_sequence_len is not None and not set_inference_key_value_memory:
q_pos_emb = F.pad(
q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding - token_pos, -causal_padding + token_pos), value=0.0
)
else:
q_pos_emb = F.pad(q_pos_emb, (0, 0, 0, 0, 0, 0, -causal_padding, 0), value=0.0)
k_pos_emb = repeat(k_pos_emb, 'n b h d -> (r n) b h d', r=num_retrieved)
rotary_pos_emb = (q_pos_emb, k_pos_emb)
# make sure number context chunks is enough
assert x.shape[0] // chunk_size == num_chunks
# reshape so we have chunk to chunk attention, without breaking causality
x = rearrange(x, '(k n) b d -> n (b k) d', k=num_chunks)
context = rearrange(context, 'k r n b d -> (r n) (b k) d')
# cross attention
out, bias = self.cross_attention(x, attention_mask, encoder_output=context, rotary_pos_emb=rotary_pos_emb)
# reshape back to original sequence
out = rearrange(out, 'n (b k) d -> (k n) b d', b=b)
# pad back to original, with 0s at the beginning (which will be added to the residual and be fine)
out = F.pad(out, (0, 0, 0, 0, causal_padding, -causal_padding + seq_remain_len), value=0.0)
if not set_inference_key_value_memory and inference_max_sequence_len is not None:
out = out[-1:]
return out, bias
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
def get_dropout_add(training):
def _dropout_add(x, bias, residual, prob):
assert bias is None
return dropout_add(x, bias, residual, prob, training)
return _dropout_add
class ParallelTransformerLayer_(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
persist_layer_norm=False,
use_cpu_initialization=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
gradient_accumulation_fusion=False,
openai_gelu=False,
onnx_safe=False,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformerLayer_, self).__init__()
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.layer_number = layer_number
self.layer_type = layer_type
self.bias = bias
self.transformer_block_type = transformer_block_type
self.set_accepted_adapter_types([LinearAdapterConfig._target_, ParallelLinearAdapterConfig._target_])
if not bias and bias_dropout_add_fusion:
raise ValueError(
'bias_dropout_add_fusion=True requires bias=True, found bias=False. Either set both to True or both to False.'
)
if normalization not in ['layernorm', 'layernorm1p', 'rmsnorm']:
raise ValueError(f'normalization must be "layernorm", "layernorm1p" or "rmsnorm", found {normalization}')
if transformer_block_type not in ['pre_ln', 'post_ln', 'normformer']:
raise ValueError(
f'transformer_block_type must be either "pre_ln" or "post_ln" or "normformer", found {transformer_block_type}'
)
self.fp32_residual_connection = fp32_residual_connection # if true move residual connections to fp32
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bias_dropout_add_fusion = bias_dropout_add_fusion # if true, enable bias dropout fusion
# Self attention.
# retrieval_decoder_after_self_attn skips the self attention
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# Layernorm on the input data.
if normalization == 'layernorm':
self.input_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.input_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.input_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
self.self_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
layer_type=layer_type,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm
)
else:
self.post_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type != LayerType.decoder_pre_mlp or self.transformer_block_type != 'post_ln':
# the post_attention_layernorm is used for layermorm after mlp
# don't need it for decoder_pre_mlp and post_ln
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type == LayerType.decoder_pre_mlp:
# skip MLP and cross attention
return
# the post_attention_layernorm is used for layermorm after mlp
# need it for post_ln
if self.layer_type == LayerType.retrieval_decoder_after_self_attn and self.transformer_block_type == 'post_ln':
# Layernorm on the attention output
if normalization == 'layernorm':
self.post_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
if self.layer_type == LayerType.decoder or self.layer_type == LayerType.retrieval_encoder:
self.inter_attention = ParallelAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_type=AttnType.cross_attn,
attn_mask_type=AttnMaskType.padding,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
bias=bias,
headscale=headscale,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
elif (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
self.inter_attention = ParallelChunkedCrossAttention(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
use_cpu_initialization=use_cpu_initialization,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
megatron_legacy=megatron_legacy,
chunk_size=chunk_size,
bias=bias,
headscale=headscale,
gradient_accumulation_fusion=gradient_accumulation_fusion,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.post_inter_attention_normformer_norm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_normformer_norm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_normformer_norm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# Layernorm on the attention output.
if normalization == 'layernorm':
self.post_inter_attention_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel
)
elif normalization == 'layernorm1p':
self.post_inter_attention_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.post_inter_attention_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
# MLP
if num_moe_experts > 1 and self.layer_number % moe_frequency == 0:
self.mlp = SwitchMLP(
num_experts=num_moe_experts,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
dropout=moe_dropout,
)
else:
self.mlp = ParallelMLP(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
bias=bias,
transformer_block_type=transformer_block_type,
normalization=normalization,
layernorm_epsilon=layernorm_epsilon,
persist_layer_norm=persist_layer_norm,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
dropout=ffn_dropout,
)
def _get_bias_droput_add_func(self, transformer_block_type='pre_ln', position_after='attention'):
"""
Returns a function that potentially fuses the dropout and bias addition.
This function is particularly helpful for the normformer architecture that does not the fused kernel after attention layers, but can after the MLP.
"""
# Normformer activations at this point have no bias vector since they've gone through another normalization layer.
if transformer_block_type == 'normformer' and position_after == 'attention':
bias_dropout_add_func = get_dropout_add(self.training)
# Bias dropout add fused kernel
elif self.bias and self.bias_dropout_add_fusion:
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
# Bias dropout add non-fused kernel
elif self.bias and not self.bias_dropout_add_fusion:
bias_dropout_add_func = get_bias_dropout_add(self.training)
# Dropout add non-fused kernel for a model without bias terms.
else:
bias_dropout_add_func = get_dropout_add(self.training)
return bias_dropout_add_func
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
# Self attention.
if rotary_pos_emb is not None:
# self attention pos_emb is (q, q)
self_attention_pos_emb = (rotary_pos_emb[0], rotary_pos_emb[0])
cross_attention_pos_emb = (rotary_pos_emb[1], rotary_pos_emb[2])
else:
self_attention_pos_emb = None
cross_attention_pos_emb = None
if self.layer_type != LayerType.retrieval_decoder_after_self_attn:
# hidden_states: [b, s, h]
# Pre-LN: x -> LN -> MHA -> Residual -> LN -> MLP -> Residual
# Post-LN: x -> MHA -> Residual -> LN -> MLP -> Residual -> LN
# Normformer: x -> LN -> MHA -> LN -> Residual -> MLP (w/LN) -> Residual
residual = hidden_states
# Layer norm at the beginning of the transformer layer.
if self.transformer_block_type in ['pre_ln', 'normformer']:
hidden_states = self.input_layernorm(hidden_states)
attention_output, attention_bias = self.self_attention(
hidden_states,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=self_attention_pos_emb,
relative_position_bias=self_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
if get_key_value:
attention_output, presents = attention_output
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_attention_normformer_norm(attention_output)
attention_bias = None
# jit scripting for a nn.module (with dropout) is not
# trigerring the fusion kernel. For now, we use two
# different nn.functional routines to account for varying
# dropout semantics during training and inference phases.
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
if attention_bias is not None:
attention_bias = attention_bias.expand_as(residual)
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Attention checksum {layernorm_input.sum()}")
if self.is_adapter_available():
adapter_1 = self.get_from_adapter_layer(AdapterName.PRE_ATTN_ADAPTER)
if adapter_1:
strategy = adapter_1.adapter_strategy
layernorm_input = self.forward_single_enabled_adapter_(
layernorm_input,
adapter_1,
adapter_name=AdapterName.PRE_ATTN_ADAPTER,
adapter_strategy=strategy,
)
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
normalization_output = self.input_layernorm(layernorm_input)
layernorm_input = normalization_output
elif self.transformer_block_type in ['pre_ln', 'normformer']:
# Layer norm post the self attention.
normalization_output = self.post_attention_layernorm(layernorm_input)
else:
layernorm_input, normalization_output = hidden_states
if self.layer_type == LayerType.decoder_pre_mlp:
return layernorm_input, normalization_output
if (
self.layer_type == LayerType.decoder
or self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_encoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
if (
self.layer_type == LayerType.retrieval_decoder
or self.layer_type == LayerType.retrieval_decoder_after_self_attn
):
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
attention_output, attention_bias = self.inter_attention(
normalization_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
rotary_pos_emb=cross_attention_pos_emb,
relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# If normformer, apply norm on the output of the self attention.
if self.transformer_block_type == 'normformer':
# Normformer normalization
attention_output = (
attention_output + attention_bias if attention_bias is not None else attention_output
)
attention_output = self.post_inter_attention_normformer_norm(attention_output)
attention_bias = None
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='attention'
)
layernorm_input = bias_dropout_add_func(attention_output, attention_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} Cross-Attention checksum {layernorm_input.sum()}")
normalization_output = self.post_inter_attention_layernorm(layernorm_input)
# Post-LN normalization after residual
if self.transformer_block_type == 'post_ln':
layernorm_input = normalization_output
# MLP.
mlp_output, mlp_bias = self.mlp(normalization_output)
residual = layernorm_input
bias_dropout_add_func = self._get_bias_droput_add_func(
transformer_block_type=self.transformer_block_type, position_after='mlp'
)
output = bias_dropout_add_func(mlp_output, mlp_bias, residual, self.hidden_dropout)
# print(f"Layer: {self.layer_number} MLP + Dropout + Residual checksum {output.sum()}")
if self.transformer_block_type == 'post_ln':
output = self.post_attention_layernorm(output)
if get_key_value:
output = [output, presents]
if (
self.is_adapter_available()
): # TODO: (@adithyre) was able to move adapter_2 back to the end of the transformer after ptl 1.7 update.
adapter_2 = self.get_from_adapter_layer(AdapterName.POST_ATTN_ADAPTER)
if adapter_2:
strategy = adapter_2.adapter_strategy
output = self.forward_single_enabled_adapter_(
output, adapter_2, adapter_name=AdapterName.POST_ATTN_ADAPTER, adapter_strategy=strategy
)
return output
class ParallelTransformerLayer(ParallelTransformerLayer_):
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
hidden_size,
ffn_hidden_size,
num_attention_heads,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
fp32_residual_connection=False,
precision=16,
apply_query_key_layer_scaling=True,
kv_channels=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
bias_dropout_add_fusion=True,
persist_layer_norm=False,
use_cpu_initialization=False,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
masked_softmax_fusion=True,
attention_dropout=0.1,
ffn_dropout=0.0,
activation='gelu',
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
activations_checkpoint_granularity=None,
sequence_parallel=False,
gradient_accumulation_fusion=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformerLayer, self).__init__(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
layer_type=layer_type,
self_attn_mask_type=self_attn_mask_type,
fp32_residual_connection=fp32_residual_connection,
precision=precision,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
bias_dropout_add_fusion=bias_dropout_add_fusion,
persist_layer_norm=persist_layer_norm,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
masked_softmax_fusion=masked_softmax_fusion,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
gradient_accumulation_fusion=gradient_accumulation_fusion,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
)
if precision == 32:
self.dtype = torch.float32
elif precision == 16:
self.dtype = torch.float16
elif precision == 'bf16':
self.dtype = torch.bfloat16
else:
raise ValueError
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
rotary_pos_emb=None,
layer_past=None,
get_key_value=False,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_core_attention=False,
):
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
layer_past,
get_key_value,
set_inference_key_value_memory,
inference_max_sequence_len,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_core_attention,
)
class AutocastTransformerLayer(TransformerLayer):
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
layernorm_epsilon: float,
num_attention_heads: int,
init_method: Callable,
output_layer_init_method: Callable,
hidden_dropout: float,
attention_dropout: float,
layer_number: Optional[int] = None,
kv_channels: Optional[int] = None,
self_attn_mask_type: str = "causal",
tp_group: Optional[Any] = None,
tp_size: int = 1,
params_dtype: torch.dtype = torch.float32,
get_rng_state_tracker: Optional[Callable] = None,
fuse_wgrad_accumulation: bool = False,
apply_query_key_layer_scaling: bool = True,
attention_softmax_in_fp32: bool = False,
seq_length: Optional[int] = None,
micro_batch_size: Optional[int] = None,
sequence_parallel: bool = False,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
drop_path_rate: float = 0,
use_emha: bool = False,
autocast_dtype: Any = 16,
) -> None:
super().__init__(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type,
tp_group=tp_group,
tp_size=tp_size,
params_dtype=params_dtype,
get_rng_state_tracker=get_rng_state_tracker,
fuse_wgrad_accumulation=fuse_wgrad_accumulation,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_softmax_in_fp32=attention_softmax_in_fp32,
seq_length=seq_length,
micro_batch_size=micro_batch_size,
sequence_parallel=sequence_parallel,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
output_layernorm=output_layernorm,
layer_type=layer_type,
drop_path_rate=drop_path_rate,
set_parallel_mode=tp_size > 1,
fuse_qkv_params=True,
)
# use_emha=use_emha,
if autocast_dtype == 32:
self.dtype = torch.float32
elif autocast_dtype == 16:
self.dtype = torch.float16
elif autocast_dtype == 'bf16':
self.dtype = torch.bfloat16
else:
raise ValueError
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
encoder_output: Optional[torch.Tensor] = None,
enc_dec_attn_mask: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
is_first_microbatch: Optional[bool] = None,
checkpoint_core_attention: Optional[bool] = False,
) -> torch.Tensor:
if self.dtype == torch.float32:
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
with torch.autocast(device_type="cuda", dtype=self.dtype):
return super().forward(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(
self,
init_method,
output_layer_init_method,
num_layers,
hidden_size,
ffn_hidden_size,
num_attention_heads,
apply_query_key_layer_scaling=True,
kv_channels=None,
layer_type=LayerType.encoder, # it can be a list of types or single type
self_attn_mask_type=AttnMaskType.padding,
pre_process=True,
post_process=True,
precision=16,
fp32_residual_connection=False,
activations_checkpoint_method=None,
activations_checkpoint_num_layers=None,
layernorm_epsilon=1e-5,
hidden_dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.0,
use_cpu_initialization=False,
bias_activation_fusion=True,
bias_dropout_add_fusion=True,
masked_softmax_fusion=True,
gradient_accumulation_fusion=False,
persist_layer_norm=False,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
model_type=ModelType.encoder_or_decoder,
megatron_legacy=False,
bias=True,
chunk_size=64,
normalization='layernorm',
transformer_block_type='pre_ln',
headscale=False,
layer_number_offset=0, # this is use only for attention norm_factor scaling
activations_checkpoint_granularity=None,
activations_checkpoint_layers_per_pipeline=None,
sequence_parallel=False,
transformer_engine=False,
fp8=False,
fp8_e4m3=False,
fp8_hybrid=False,
fp8_margin=0,
fp8_interval=1,
fp8_amax_history_len=1,
fp8_amax_compute_algo='most_recent',
use_emha=False,
normalize_attention_scores=True,
num_moe_experts=1,
moe_frequency=1,
moe_dropout=0.0,
):
super(ParallelTransformer, self).__init__()
if kv_channels is None:
assert (
hidden_size % num_attention_heads == 0
), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'
kv_channels = hidden_size // num_attention_heads
self.fp32_residual_connection = fp32_residual_connection
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.self_attn_mask_type = self_attn_mask_type
self.model_type = model_type
self.normalization = normalization
self.transformer_block_type = transformer_block_type
self.layer_type = layer_type
self.activations_checkpoint_method = activations_checkpoint_method
self.activations_checkpoint_num_layers = activations_checkpoint_num_layers
self.activations_checkpoint_granularity = activations_checkpoint_granularity
self.activations_checkpoint_layers_per_pipeline = activations_checkpoint_layers_per_pipeline
if self.activations_checkpoint_granularity:
if self.activations_checkpoint_granularity == 'selective':
if self.activations_checkpoint_method == 'uniform':
logging.info(
(
f'Using uniform activation checkpointing with granularity selective forces all layers to use checkpointing.'
)
)
elif self.activations_checkpoint_method == 'block':
logging.info(
(
f'Using block activation checkpointing requires activations_checkpoint_num_layers to be set.'
f'Got: {self.activations_checkpoint_num_layers}. Setting to 1 by default.'
)
)
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity selective.'
)
elif self.activations_checkpoint_granularity == 'full':
if self.activations_checkpoint_method in ['uniform', 'block']:
if not self.activations_checkpoint_num_layers:
logging.info(
(
f'Using uniform or block activation checkpointing requires activations_checkpoint_num_layers to be set.'
f'Got: {self.activations_checkpoint_num_layers}. Setting to 1 by default.'
)
)
else:
raise ValueError(
f'activations_checkpoint_method should be "uniform" or "block" when using granularity full.'
)
else:
raise ValueError(f'activations_checkpoint_granularity should be "selective" or "full".')
self.sequence_parallel = sequence_parallel
self.transformer_engine = transformer_engine
self.fp8 = fp8
self.fp8_e4m3 = fp8_e4m3
self.fp8_hybrid = fp8_hybrid
self.fp8_margin = fp8_margin
self.fp8_interval = fp8_interval
self.fp8_amax_history_len = fp8_amax_history_len
self.fp8_amax_compute_algo = fp8_amax_compute_algo
self.fp8_recipe = None
if self.fp8:
if self.fp8_e4m3:
fp8_format = recipe.Format.E4M3
elif self.fp8_hybrid:
fp8_format = recipe.Format.HYBRID
self.fp8_recipe = recipe.DelayedScaling(
margin=self.fp8_margin,
interval=self.fp8_interval,
fp8_format=fp8_format,
amax_history_len=self.fp8_amax_history_len,
amax_compute_algo=self.fp8_amax_compute_algo,
)
self.is_first_microbatch = True
self.microbatch_count = 0 # transformer engine forward needs to know if it is working on the first microbatch
self.checkpoint_core_attention = (
activations_checkpoint_granularity == 'selective'
) # transformer engine forward allows for more granular selective checkpointing
if self.model_type == ModelType.encoder_or_decoder:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
assert moe_frequency <= num_layers, 'MoE frequency must be <= number of transformer layers'
# TODO: Add similar assert for encoder-decoder.
self.num_layers = self.get_num_layers(num_layers)
# Transformer layers.
def build_layer(layer_number):
if isinstance(layer_type, list):
lt = layer_type[layer_number - 1]
else:
lt = layer_type
if self.transformer_engine:
return AutocastTransformerLayer(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
layernorm_epsilon=layernorm_epsilon,
num_attention_heads=num_attention_heads,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
layer_number=layer_number + layer_number_offset,
kv_channels=kv_channels,
self_attn_mask_type=self_attn_mask_type.name,
tp_size=parallel_state.get_tensor_model_parallel_world_size(),
params_dtype=torch.float32, # dtype params are initialized in
get_rng_state_tracker=tensor_parallel.random.get_cuda_rng_tracker,
fuse_wgrad_accumulation=gradient_accumulation_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
seq_length=None, # used for jit warmup
micro_batch_size=None, # used for jit warmup
sequence_parallel=sequence_parallel,
apply_residual_connection_post_layernorm=False,
autocast_dtype=precision,
use_emha=use_emha,
)
else:
return ParallelTransformerLayer(
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number + layer_number_offset,
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
num_attention_heads=num_attention_heads,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
kv_channels=kv_channels,
layer_type=lt,
self_attn_mask_type=self_attn_mask_type,
precision=precision,
fp32_residual_connection=fp32_residual_connection,
layernorm_epsilon=layernorm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
use_cpu_initialization=use_cpu_initialization,
bias_activation_fusion=bias_activation_fusion,
bias_dropout_add_fusion=bias_dropout_add_fusion,
masked_softmax_fusion=masked_softmax_fusion,
gradient_accumulation_fusion=gradient_accumulation_fusion,
persist_layer_norm=persist_layer_norm,
openai_gelu=openai_gelu,
onnx_safe=onnx_safe,
activation=activation,
megatron_legacy=megatron_legacy,
bias=bias,
chunk_size=chunk_size,
normalization=normalization,
transformer_block_type=transformer_block_type,
headscale=headscale,
activations_checkpoint_granularity=activations_checkpoint_granularity,
sequence_parallel=sequence_parallel,
normalize_attention_scores=normalize_attention_scores,
num_moe_experts=num_moe_experts,
moe_frequency=moe_frequency,
moe_dropout=moe_dropout,
)
if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:
assert num_layers % parallel_state.get_virtual_pipeline_model_parallel_world_size() == 0, (
'num_layers_per_stage must be divisible by ' 'virtual_pipeline_model_parallel_size'
)
assert self.model_type.value != 2, f'virtual pipeline parallel currently only supported for GPT'
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = self.num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = parallel_state.get_virtual_pipeline_model_parallel_rank() * (
num_layers // parallel_state.get_virtual_pipeline_model_parallel_world_size()
) + (parallel_state.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if (
self.model_type == ModelType.encoder_and_decoder
and parallel_state.get_pipeline_model_parallel_world_size() > 1
):
pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = parallel_state.get_pipeline_model_parallel_split_rank()
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = parallel_state.get_pipeline_model_parallel_rank() * self.num_layers
self.layers = torch.nn.ModuleList([build_layer(i + 1 + offset) for i in range(self.num_layers)])
if self.post_process and self.transformer_block_type != 'post_ln':
# Final layer norm before output.
if normalization == 'layernorm':
self.final_layernorm = get_layer_norm(
hidden_size, layernorm_epsilon, persist_layer_norm, sequence_parallel=sequence_parallel
)
elif normalization == 'layernorm1p':
self.final_layernorm = LayerNorm1P(
hidden_size, layernorm_epsilon, sequence_parallel_enabled=sequence_parallel
)
else:
self.final_layernorm = MixedFusedRMSNorm(hidden_size, layernorm_epsilon)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def get_num_layers(self, num_layers):
"""Compute the number of transformer layers resident on the current rank."""
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if self.model_type == ModelType.encoder_and_decoder:
assert parallel_state.get_pipeline_model_parallel_split_rank() is not None
num_ranks_in_encoder = parallel_state.get_pipeline_model_parallel_split_rank()
num_ranks_in_decoder = parallel_state.get_pipeline_model_parallel_world_size() - num_ranks_in_encoder
if self.layer_type == LayerType.encoder:
assert (
num_layers % num_ranks_in_encoder == 0
), 'num_layers must be divisible by number of ranks given to encoder'
elif self.layer_type == LayerType.decoder:
assert (
num_layers % num_ranks_in_decoder == 0
), 'num_layers must be divisible by number of ranks given to decoder'
else:
raise ValueError(f"Unknown layer type {self.layer_type}")
if parallel_state.is_pipeline_stage_before_split():
num_layers = num_layers // num_ranks_in_encoder
else:
num_layers = num_layers // num_ranks_in_decoder
else:
assert (
num_layers % parallel_state.get_pipeline_model_parallel_world_size() == 0
), 'num_layers must be divisible by pipeline_model_parallel_size'
num_layers = num_layers // parallel_state.get_pipeline_model_parallel_world_size()
return num_layers
def _checkpointed_forward(
self,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
):
"""Forward method with activation checkpointing."""
def custom(start, end):
if self.transformer_engine:
def custom_forward(*inputs):
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=None,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=False,
)
return hidden_states
else:
def custom_forward(*inputs):
if len(inputs) == 9:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = (inputs[4], inputs[5], inputs[6])
self_attention_relative_position_bias = inputs[7]
cross_attention_relative_position_bias = inputs[8]
elif len(inputs) == 10:
hidden_states = (inputs[0], inputs[1])
attention_mask = inputs[2]
encoder_output = inputs[3]
enc_dec_attn_mask = inputs[4]
rotary_pos_emb = (inputs[5], inputs[6], inputs[7])
self_attention_relative_position_bias = inputs[8]
cross_attention_relative_position_bias = inputs[9]
else:
hidden_states = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
rotary_pos_emb = inputs[4]
self_attention_relative_position_bias = inputs[5]
cross_attention_relative_position_bias = inputs[6]
for index in range(start, end):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
)
if isinstance(hidden_states, tuple):
pass
else:
hidden_states = hidden_states.contiguous()
return hidden_states
return custom_forward
# Make sure memory is freed.
tensor_parallel.reset_checkpointed_activations_memory_buffer()
if self.activations_checkpoint_method == 'uniform':
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + self.activations_checkpoint_num_layers),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(
custom(l, l + self.activations_checkpoint_num_layers), False, *arg_tuple
)
l += self.activations_checkpoint_num_layers
elif self.activations_checkpoint_method == 'block':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if checkpoint_activations_all_layers:
activations_checkpoint_num_layers = self.num_layers
else:
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
if (
parallel_state.get_pipeline_model_parallel_world_size() > 0
and self.activations_checkpoint_layers_per_pipeline is not None
):
# Decrease the number of layers to checkpoint at later pipeline stages
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if isinstance(hidden_states, tuple):
hidden_tuple = (hidden_states[0], hidden_states[1])
else:
hidden_tuple = (hidden_states,)
middle_tuple = (
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
if rotary_pos_emb is None:
rot_tuple = (rotary_pos_emb,)
else:
rot_tuple = (rotary_pos_emb[0], rotary_pos_emb[1], rotary_pos_emb[2])
final_tuple = (self_attention_relative_position_bias, cross_attention_relative_position_bias)
arg_tuple = hidden_tuple + middle_tuple + rot_tuple + final_tuple
if l < activations_checkpoint_num_layers:
if self.transformer_engine:
hidden_states = te_checkpoint(
custom(l, l + 1),
False,
tensor_parallel.random.get_cuda_rng_tracker,
parallel_state.get_tensor_model_parallel_group(),
*arg_tuple,
)
else:
hidden_states = tensor_parallel.checkpoint(custom(l, l + 1), False, *arg_tuple)
else:
hidden_states = custom(l, l + 1)(*arg_tuple)
else:
raise ValueError("Invalid activation checkpoint method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
hidden_states,
attention_mask,
layer_past=None,
get_key_value=False,
encoder_output=None,
enc_dec_attn_mask=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=None,
rotary_pos_emb=None, # list of positional embedding tensors, first one self attention, second one and third one are for cross attention (q, k)
retrieved_emb=None, # tensor of retrieved embedding of shape [b, k, r, n, d]
self_attention_relative_position_bias=None,
cross_attention_relative_position_bias=None,
checkpoint_activations_all_layers=None,
):
# Checks.
if inference_max_sequence_len:
assert self.activations_checkpoint_method is None, 'inference does not work with activation checkpointing'
if layer_past is not None:
assert get_key_value, 'for not None values in layer_past, ' 'expected get_key_value to be set'
if get_key_value:
assert self.activations_checkpoint_method is None, (
'get_key_value does not work with ' 'activation checkpointing'
)
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# TODO: @Yi Dong, what should this be?
if retrieved_emb is not None:
assert len(retrieved_emb.shape) == 5
# this is retrieval decoder, need special transpose
encoder_output = rearrange(retrieved_emb, 'b k r n d -> k r n b d').contiguous()
"""
is_first_microbatch is an optimization parameter for transformer engine.
It indicates if the current step in the forward pass is the first in a gradient accumulation cycle.
If set, FP8 weights are cached and some minor optimizations are applied to fuse_wgrad_accumulation
"""
from apex.transformer.pipeline_parallel.utils import _GLOBAL_NUM_MICROBATCHES_CALCULATOR
num_micro_batches = getattr(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'num_micro_batches', 1)
if self.sequence_parallel:
rng_context = tensor_parallel.random.get_cuda_rng_tracker().fork()
else:
rng_context = nullcontext()
with rng_context:
# fp8_autocast will not do anything if TE or FP8 isn't used
fp8_group = None
if parallel_state.model_parallel_is_initialized():
fp8_group = parallel_state.get_data_parallel_group()
if HAVE_TE:
# if TE is installed but fp8 is not available then this will do nothing
fp8_context = fp8_autocast(enabled=self.fp8, fp8_recipe=self.fp8_recipe, fp8_group=fp8_group)
else:
fp8_context = nullcontext()
with fp8_context:
if self.activations_checkpoint_granularity == 'full':
hidden_states = self._checkpointed_forward(
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
rotary_pos_emb,
self_attention_relative_position_bias,
cross_attention_relative_position_bias,
checkpoint_activations_all_layers,
)
else:
if get_key_value:
presents = []
for index in range(self.num_layers):
layer = self._get_layer(index)
past = None
if layer_past is not None:
past = layer_past[index]
if self.activations_checkpoint_granularity == 'selective':
# When pipeline-parallel size > 1 and 'num_micro_batches_with_partial_activation_checkpoints' = int,
# pipeline scheduling can force to checkpoint all layers or partial layers in a micro-batch.
if (
checkpoint_activations_all_layers == True
or self.activations_checkpoint_method == 'uniform'
):
checkpoint_core_attention = True
elif self.activations_checkpoint_method == 'block':
activations_checkpoint_num_layers = self.activations_checkpoint_num_layers
# Decrease the number of layers to checkpoint at later pipeline stages
if self.activations_checkpoint_layers_per_pipeline is not None:
activations_checkpoint_num_layers -= int(
parallel_state.get_pipeline_model_parallel_rank()
* self.activations_checkpoint_layers_per_pipeline
)
checkpoint_core_attention = index < activations_checkpoint_num_layers
else:
checkpoint_core_attention = False
if self.transformer_engine:
inference_params = None
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
is_first_microbatch=self.is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
)
else:
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
layer_past=past,
get_key_value=get_key_value,
set_inference_key_value_memory=set_inference_key_value_memory,
inference_max_sequence_len=inference_max_sequence_len,
rotary_pos_emb=rotary_pos_emb,
self_attention_relative_position_bias=self_attention_relative_position_bias,
cross_attention_relative_position_bias=cross_attention_relative_position_bias,
checkpoint_core_attention=checkpoint_core_attention,
)
# Skip counter update for eval and activation checkpointing
if torch.is_grad_enabled() and self.training:
self.microbatch_count += 1
if self.microbatch_count % num_micro_batches == 0:
self.microbatch_count = 0
self.is_first_microbatch = True
else:
self.is_first_microbatch = False
output = hidden_states
# Final layer norm.
if self.post_process:
# only apply the final_layernorm for pre-ln
if self.transformer_block_type != 'post_ln':
output = self.final_layernorm(hidden_states)
if get_key_value:
output = [output, presents]
return output
| [] |
2024-01-10 | gersteinlab/ML-Bench | MLAgent~tools~call_openai.py | import openai
import yaml
import os
def call_GPT(function_prompt,model_name,function_type,function):
if function_type == "auto":
with open("./config/config_openai.yml", "r") as yaml_file:
config = yaml.safe_load(yaml_file)
openai.api_base = config["api_base"]
openai.api_proxy = config["api_proxy"]
openai.api_key = config["openai_keys"][model_name][0]["api_key"]
try:
res = openai.ChatCompletion.create(
model = model_name,
messages = [
{"role": "user",
"content": function_prompt}
],
functions = [function],
function_call = "auto" ,
)
return res
except Exception as e:
print("An exception occurred:", e)
elif function_type == "none":
with open("./config/config_openai.yml", "r") as yaml_file:
config = yaml.safe_load(yaml_file)
openai.api_base = config["api_base"]
#openai.api_proxy = config["api_proxy"]
openai.api_key = config["openai_keys"][model_name][0]["api_key"]
try:
res = openai.ChatCompletion.create(
model=model_name,
messages=[
{"role": "user",
"content": function_prompt}
]
)
return res
except Exception as e:
print("An exception occurred:", e)
| [] |
2024-01-10 | gersteinlab/ML-Bench | MLAgent~repo~open_clip~src~open_clip~factory.py | import json
import logging
import os
import re
from copy import deepcopy
from dataclasses import asdict
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype, resize_text_pos_embed, set_model_preprocess_cfg
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss, SigLipLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained,\
list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform_v2, AugmentationCfg, PreprocessCfg, merge_preprocess_dict, merge_preprocess_kwargs
from .tokenizer import HFTokenizer, SimpleTokenizer, DEFAULT_CONTEXT_LENGTH
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def _get_hf_config(model_id, cache_dir=None):
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
return config
def get_tokenizer(
model_name: str = '',
context_length: Optional[int] = None,
**kwargs,
):
if model_name.startswith(HF_HUB_PREFIX):
model_name = model_name[len(HF_HUB_PREFIX):]
try:
config = _get_hf_config(model_name)['model_cfg']
except Exception:
tokenizer = HFTokenizer(
model_name,
context_length=context_length or DEFAULT_CONTEXT_LENGTH,
**kwargs,
)
return tokenizer
else:
config = get_model_config(model_name)
assert config is not None, f"No valid model config found for {model_name}."
text_config = config.get('text_cfg', {})
if 'tokenizer_kwargs' in text_config:
tokenizer_kwargs = dict(text_config['tokenizer_kwargs'], **kwargs)
else:
tokenizer_kwargs = kwargs
if context_length is None:
context_length = text_config.get('context_length', DEFAULT_CONTEXT_LENGTH)
if 'hf_tokenizer_name' in text_config:
tokenizer = HFTokenizer(
text_config['hf_tokenizer_name'],
context_length=context_length,
**tokenizer_kwargs,
)
else:
tokenizer = SimpleTokenizer(
context_length=context_length,
**tokenizer_kwargs,
)
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif isinstance(checkpoint, torch.jit.ScriptModule):
state_dict = checkpoint.state_dict()
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
if Path(checkpoint_path).suffix in ('.npz', '.npy'):
from .big_vision import load_big_vision_weights
load_big_vision_weights(model, checkpoint_path)
return {}
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
# If loading a non-SigLIP model for SigLIP training. See https://github.com/mlfoundations/open_clip/issues/712
if 'logit_bias' not in state_dict and model.logit_bias is not None:
state_dict["logit_bias"] = torch.zeros_like(state_dict["logit_scale"])
# Certain text transformers no longer expect position_ids after transformers==4.31
position_id_key = 'text.transformer.embeddings.position_ids'
if position_id_key in state_dict and not hasattr(model, position_id_key):
del state_dict[position_id_key]
resize_pos_embed(state_dict, model)
resize_text_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
force_preprocess_cfg: Optional[Dict[str, Any]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
**model_kwargs,
):
force_preprocess_cfg = force_preprocess_cfg or {}
preprocess_cfg = asdict(PreprocessCfg())
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config = _get_hf_config(model_id, cache_dir)
preprocess_cfg = merge_preprocess_dict(preprocess_cfg, config['preprocess_cfg'])
model_cfg = config['model_cfg']
pretrained_hf = False # override, no need to load original HF text weights
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
cache_dir=cache_dir,
)
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
is_timm_model = 'timm_model_name' in model_cfg.get('vision_cfg', {})
if pretrained_image:
if is_timm_model:
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
# cast_dtype set for fp16 and bf16 (manual mixed-precision), not set for 'amp' or 'pure' modes
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
if is_hf_model:
# load pretrained weights for HF text model IFF no CLIP weights being loaded
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf and not pretrained
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
model_cfg = dict(model_cfg, **model_kwargs) # merge cfg dict w/ kwargs (kwargs overrides cfg)
if custom_text:
if "multimodal_cfg" in model_cfg:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
if precision in ("fp16", "bf16"):
dtype = torch.float16 if 'fp16' in precision else torch.bfloat16
# manual mixed precision that matches original OpenAI behaviour
if is_timm_model:
# FIXME this is a bit janky, create timm based model in low-precision and
# then cast only LayerNormFp32 instances back to float32 so they don't break.
# Why? The convert_weights_to_lp fn only works with native models.
model.to(device=device, dtype=dtype)
from .transformer import LayerNormFp32
def _convert_ln(m):
if isinstance(m, LayerNormFp32):
m.weight.data = m.weight.data.to(torch.float32)
m.bias.data = m.bias.data.to(torch.float32)
model.apply(_convert_ln)
else:
model.to(device=device)
convert_weights_to_lp(model, dtype=dtype)
elif precision in ("pure_fp16", "pure_bf16"):
dtype = torch.float16 if 'fp16' in precision else torch.bfloat16
model.to(device=device, dtype=dtype)
else:
model.to(device=device)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
preprocess_cfg = merge_preprocess_dict(preprocess_cfg, pretrained_cfg)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f' Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({checkpoint_path}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
# set image preprocessing configuration in model attributes for convenience
if getattr(model.visual, 'image_size', None) is not None:
# use image_size set on model creation (via config or force_image_size arg)
force_preprocess_cfg['size'] = model.visual.image_size
set_model_preprocess_cfg(model, merge_preprocess_dict(preprocess_cfg, force_preprocess_cfg))
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif args.siglip:
assert not args.horovod, "Horovod not currently supported for SigLip"
return SigLipLoss(
rank=args.rank,
world_size=args.world_size,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
image_interpolation: Optional[str] = None,
image_resize_mode: Optional[str] = None, # only effective for inference
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
**model_kwargs,
):
force_preprocess_cfg = merge_preprocess_kwargs(
{}, mean=image_mean, std=image_std, interpolation=image_interpolation, resize_mode=image_resize_mode)
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
force_preprocess_cfg=force_preprocess_cfg,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
**model_kwargs,
)
pp_cfg = PreprocessCfg(**model.visual.preprocess_cfg)
preprocess_train = image_transform_v2(
pp_cfg,
is_train=True,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform_v2(
pp_cfg,
is_train=False,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
image_interpolation: Optional[str] = None,
image_resize_mode: Optional[str] = None, # only effective for inference
return_transform: bool = True,
cache_dir: Optional[str] = None,
**model_kwargs,
):
force_preprocess_cfg = merge_preprocess_kwargs(
{}, mean=image_mean, std=image_std, interpolation=image_interpolation, resize_mode=image_resize_mode)
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
force_preprocess_cfg=force_preprocess_cfg,
cache_dir=cache_dir,
require_pretrained=True,
**model_kwargs,
)
if not return_transform:
return model
preprocess = image_transform_v2(
PreprocessCfg(**model.visual.preprocess_cfg),
is_train=False,
)
return model, preprocess
| [] |
2024-01-10 | gersteinlab/ML-Bench | MLAgent~tools~call_azure.py | import openai
import yaml
import os
def call_GPT(function_prompt,model_name,function_type,function):
if function_type == "auto":
with open("./config/config_azure.yml", "r") as yaml_file:
config = yaml.safe_load(yaml_file)
openai.api_base = config["api_base"]
openai.api_type = config["api_type"]
openai.api_version = config["api_version"]
#openai.api_proxy = config["api_proxy"]
openai.api_key = config["openai_keys"][model_name][0]["api_key"]
try:
res = openai.ChatCompletion.create(
engine=model_name,
messages=[
{"role": "user",
"content": function_prompt}
],
functions = [function],
function_call = "auto" ,
)
return res
except Exception as e:
print("An exception occurred:", e)
elif function_type == "none":
with open("./config/config_azure.yml", "r") as yaml_file:
config = yaml.safe_load(yaml_file)
openai.api_base = config["api_base"]
openai.api_type = config["api_type"]
openai.api_version = config["api_version"]
#openai.api_proxy = config["api_proxy"]
openai.api_key = config["openai_keys"][model_name][0]["api_key"]
try:
res = openai.ChatCompletion.create(
engine=model_name,
messages=[
{"role": "user",
"content": function_prompt}
]
)
return res
except Exception as e:
print("An exception occurred:", e)
| [] |
2024-01-10 | gersteinlab/ML-Bench | MLAgent~repo~open_clip~src~open_clip~pretrained.py | import hashlib
import os
import urllib
import warnings
from functools import partial
from typing import Dict, Union
from tqdm import tqdm
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD, INCEPTION_MEAN, INCEPTION_STD, \
IMAGENET_MEAN, IMAGENET_STD
from .version import __version__
try:
from huggingface_hub import hf_hub_download
hf_hub_download = partial(hf_hub_download, library_name="open_clip", library_version=__version__)
_has_hf_hub = True
except ImportError:
hf_hub_download = None
_has_hf_hub = False
def _pcfg(url='', hf_hub='', **kwargs):
# OpenAI / OpenCLIP defaults
return {
'url': url,
'hf_hub': hf_hub,
'mean': OPENAI_DATASET_MEAN,
'std': OPENAI_DATASET_STD,
'interpolation': 'bicubic',
'resize_mode': 'shortest',
**kwargs,
}
def _slpcfg(url='', hf_hub='', **kwargs):
# SiGLIP defaults
return {
'url': url,
'hf_hub': hf_hub,
'mean': INCEPTION_MEAN,
'std': INCEPTION_STD,
'interpolation': 'bicubic',
'resize_mode': 'squash',
**kwargs,
}
def _apcfg(url='', hf_hub='', **kwargs):
# CLIPA defaults
return {
'url': url,
'hf_hub': hf_hub,
'mean': IMAGENET_MEAN,
'std': IMAGENET_STD,
'interpolation': 'bilinear',
'resize_mode': 'squash',
**kwargs,
}
_RN50 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"),
cc12m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"),
)
_RN50_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"),
cc12m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"),
)
_RN101 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"),
)
_RN101_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"),
)
_RN50x4 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt"),
)
_RN50x16 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt"),
)
_RN50x64 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt"),
)
_VITB32 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
laion2b_e16=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/'),
# DataComp-XL models
datacomp_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K/'),
# DataComp-M models
datacomp_m_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-DataComp.M-s128M-b4K/'),
commonpool_m_clip_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.clip-s128M-b4K/'),
commonpool_m_laion_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.laion-s128M-b4K/'),
commonpool_m_image_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.image-s128M-b4K/'),
commonpool_m_text_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.text-s128M-b4K/'),
commonpool_m_basic_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.basic-s128M-b4K/'),
commonpool_m_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M-s128M-b4K/'),
# DataComp-S models
datacomp_s_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-DataComp.S-s13M-b4K/'),
commonpool_s_clip_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.clip-s13M-b4K/'),
commonpool_s_laion_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.laion-s13M-b4K/'),
commonpool_s_image_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.image-s13M-b4K/'),
commonpool_s_text_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.text-s13M-b4K/'),
commonpool_s_basic_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.basic-s13M-b4K/'),
commonpool_s_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S-s13M-b4K/'),
)
_VITB32_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
metaclip_400m=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/b32_400m.pt"),
metaclip_fullcc=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/b32_fullcc2.5b.pt"),
)
_VITB32_256 = dict(
datacomp_s34b_b86k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K/'),
)
_VITB16 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
# DataComp-XL models
datacomp_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K/'),
# DataComp-L models
datacomp_l_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-DataComp.L-s1B-b8K/'),
commonpool_l_clip_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.clip-s1B-b8K/'),
commonpool_l_laion_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.laion-s1B-b8K/'),
commonpool_l_image_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.image-s1B-b8K/'),
commonpool_l_text_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.text-s1B-b8K/'),
commonpool_l_basic_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.basic-s1B-b8K/'),
commonpool_l_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L-s1B-b8K/'),
# DFN
dfn2b=_pcfg(hf_hub='apple/DFN2B-CLIP-ViT-B-16/')
)
_VITB16_quickgelu = dict(
metaclip_400m=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/b16_400m.pt"),
metaclip_fullcc=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/b16_fullcc2.5b.pt"),
)
_VITB16_PLUS_240 = dict(
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
)
_VITL14 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
laion2b_s32b_b82k=_pcfg(
hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
mean=INCEPTION_MEAN, std=INCEPTION_STD),
# DataComp-XL models
datacomp_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K/'),
commonpool_xl_clip_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL.clip-s13B-b90K/'),
commonpool_xl_laion_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL.laion-s13B-b90K/'),
commonpool_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL-s13B-b90K/'),
)
_VITL14_quickgelu = dict(
metaclip_400m=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/l14_400m.pt"),
metaclip_fullcc=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/l14_fullcc2.5b.pt"),
dfn2b=_pcfg(hf_hub='apple/DFN2B-CLIP-ViT-L-14/'),
)
_VITL14_336 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
)
_VITH14 = dict(
laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
)
_VITH14_quickgelu = dict(
metaclip_fullcc=_pcfg(
"https://dl.fbaipublicfiles.com/MMPT/metaclip/h14_fullcc2.5b.pt"),
dfn5b=_pcfg(
hf_hub='apple/DFN5B-CLIP-ViT-H-14/',
interpolation="bicubic",
resize_mode="squash"
),
)
_VITH14_378_quickgelu = dict(
dfn5b=_pcfg(
hf_hub='apple/DFN5B-CLIP-ViT-H-14-378/',
interpolation="bicubic",
resize_mode="squash"
),
)
_VITg14 = dict(
laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'),
)
_VITbigG14 = dict(
laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
)
_robertaViTB32 = dict(
laion2b_s12b_b32k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-roberta-base-laion2B-s12B-b32k/'),
)
_xlmRobertaBaseViTB32 = dict(
laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-xlm-roberta-base-laion5B-s13B-b90k/'),
)
_xlmRobertaLargeFrozenViTH14 = dict(
frozen_laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-frozen-xlm-roberta-large-laion5B-s13B-b90k/'),
)
_convnext_base = dict(
laion400m_s13b_b51k=_pcfg(hf_hub='laion/CLIP-convnext_base-laion400M-s13B-b51K/'),
)
_convnext_base_w = dict(
laion2b_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K/'),
laion2b_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg/'),
laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K/'),
)
_convnext_base_w_320 = dict(
laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K/'),
laion_aesthetic_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg/'),
)
_convnext_large_d = dict(
laion2b_s26b_b102k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg/'),
)
_convnext_large_d_320 = dict(
laion2b_s29b_b131k_ft=_pcfg(hf_hub='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft/'),
laion2b_s29b_b131k_ft_soup=_pcfg(hf_hub='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup/'),
)
_convnext_xxlarge = dict(
laion2b_s34b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg/'),
laion2b_s34b_b82k_augreg_rewind=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind/'),
laion2b_s34b_b82k_augreg_soup=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup/'),
)
_coca_VITB32 = dict(
laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-B-32-laion2B-s13B-b90k/'),
mscoco_finetuned_laion2b_s13b_b90k=_pcfg(hf_hub='laion/mscoco_finetuned_CoCa-ViT-B-32-laion2B-s13B-b90k/')
)
_coca_VITL14 = dict(
laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-L-14-laion2B-s13B-b90k/'),
mscoco_finetuned_laion2b_s13b_b90k=_pcfg(hf_hub='laion/mscoco_finetuned_CoCa-ViT-L-14-laion2B-s13B-b90k/')
)
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"RN50x64": _RN50x64,
"ViT-B-32": _VITB32,
"ViT-B-32-256": _VITB32_256,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-B-16-quickgelu": _VITB16_quickgelu,
"ViT-B-16-plus-240": _VITB16_PLUS_240,
"ViT-L-14": _VITL14,
"ViT-L-14-quickgelu": _VITL14_quickgelu,
"ViT-L-14-336": _VITL14_336,
"ViT-H-14": _VITH14,
"ViT-H-14-quickgelu": _VITH14_quickgelu,
"ViT-H-14-378-quickgelu": _VITH14_378_quickgelu,
"ViT-g-14": _VITg14,
"ViT-bigG-14": _VITbigG14,
"roberta-ViT-B-32": _robertaViTB32,
"xlm-roberta-base-ViT-B-32": _xlmRobertaBaseViTB32,
"xlm-roberta-large-ViT-H-14": _xlmRobertaLargeFrozenViTH14,
"convnext_base": _convnext_base,
"convnext_base_w": _convnext_base_w,
"convnext_base_w_320": _convnext_base_w_320,
"convnext_large_d": _convnext_large_d,
"convnext_large_d_320": _convnext_large_d_320,
"convnext_xxlarge": _convnext_xxlarge,
"coca_ViT-B-32": _coca_VITB32,
"coca_ViT-L-14": _coca_VITL14,
"EVA01-g-14": dict(
# from QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt
laion400m_s11b_b41k=_pcfg(hf_hub='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k/'),
),
"EVA01-g-14-plus": dict(
# from QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt
merged2b_s11b_b114k=_pcfg(hf_hub='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k/'),
),
"EVA02-B-16": dict(
# from QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt
merged2b_s8b_b131k=_pcfg(hf_hub='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k/'),
),
"EVA02-L-14": dict(
# from QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt
merged2b_s4b_b131k=_pcfg(hf_hub='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k/'),
),
"EVA02-L-14-336": dict(
# from QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt
merged2b_s6b_b61k=_pcfg(hf_hub='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k/'),
),
"EVA02-E-14": dict(
# from QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt
laion2b_s4b_b115k=_pcfg(hf_hub='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k/'),
),
"EVA02-E-14-plus": dict(
# from QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt
laion2b_s9b_b144k=_pcfg(hf_hub='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k/'),
),
"ViT-B-16-SigLIP": dict(
webli=_slpcfg(hf_hub='timm/ViT-B-16-SigLIP/'),
),
"ViT-B-16-SigLIP-256": dict(
webli=_slpcfg(hf_hub='timm/ViT-B-16-SigLIP-256/'),
),
"ViT-B-16-SigLIP-i18n-256": dict(
webli=_slpcfg(hf_hub='timm/ViT-B-16-SigLIP-i18n-256/'),
),
"ViT-B-16-SigLIP-384": dict(
webli=_slpcfg(hf_hub='timm/ViT-B-16-SigLIP-384/'),
),
"ViT-B-16-SigLIP-512": dict(
webli=_slpcfg(hf_hub='timm/ViT-B-16-SigLIP-512/'),
),
"ViT-L-16-SigLIP-256": dict(
webli=_slpcfg(hf_hub='timm/ViT-L-16-SigLIP-256/'),
),
"ViT-L-16-SigLIP-384": dict(
webli=_slpcfg(hf_hub='timm/ViT-L-16-SigLIP-384/'),
),
"ViT-SO400M-14-SigLIP": dict(
webli=_slpcfg(hf_hub='timm/ViT-SO400M-14-SigLIP/'),
),
"ViT-SO400M-14-SigLIP-384": dict(
webli=_slpcfg(hf_hub='timm/ViT-SO400M-14-SigLIP-384/'),
),
"ViT-L-14-CLIPA": dict(
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-L-14-CLIPA-datacomp1B/'),
),
"ViT-L-14-CLIPA-336": dict(
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-L-14-CLIPA-336-datacomp1B/'),
),
"ViT-H-14-CLIPA": dict(
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-H-14-CLIPA-datacomp1B/'),
),
"ViT-H-14-CLIPA-336": dict(
laion2b=_apcfg(hf_hub='UCSC-VLAA/ViT-H-14-CLIPA-336-laion2B/'),
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-H-14-CLIPA-336-datacomp1B/'),
),
"ViT-bigG-14-CLIPA": dict(
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-bigG-14-CLIPA-datacomp1B/'),
),
"ViT-bigG-14-CLIPA-336": dict(
datacomp1b=_apcfg(hf_hub='UCSC-VLAA/ViT-bigG-14-CLIPA-336-datacomp1B/'),
),
"nllb-clip-base": dict(
v1=_pcfg(hf_hub='visheratin/nllb-clip-base-oc/'),
),
"nllb-clip-large": dict(
v1=_pcfg(hf_hub='visheratin/nllb-clip-large-oc/'),
)
}
def _clean_tag(tag: str):
# normalize pretrained tags
return tag.lower().replace('-', '_')
def list_pretrained(as_str: bool = False):
""" returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
def list_pretrained_models_by_tag(tag: str):
""" return all models having the specified pretrain tag """
models = []
tag = _clean_tag(tag)
for k in _PRETRAINED.keys():
if tag in _PRETRAINED[k]:
models.append(k)
return models
def list_pretrained_tags_by_model(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
def is_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return False
return _clean_tag(tag) in _PRETRAINED[model]
def get_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return {}
model_pretrained = _PRETRAINED[model]
return model_pretrained.get(_clean_tag(tag), {})
def get_pretrained_url(model: str, tag: str):
cfg = get_pretrained_cfg(model, _clean_tag(tag))
return cfg.get('url', '')
def download_pretrained_from_url(
url: str,
cache_dir: Union[str, None] = None,
):
if not cache_dir:
cache_dir = os.path.expanduser("~/.cache/clip")
os.makedirs(cache_dir, exist_ok=True)
filename = os.path.basename(url)
if 'openaipublic' in url:
expected_sha256 = url.split("/")[-2]
elif 'mlfoundations' in url:
expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
else:
expected_sha256 = ''
download_target = os.path.join(cache_dir, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if expected_sha256:
if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
else:
return download_target
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def has_hf_hub(necessary=False):
if not _has_hf_hub and necessary:
# if no HF Hub module installed, and it is necessary to continue, raise error
raise RuntimeError(
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
return _has_hf_hub
def download_pretrained_from_hf(
model_id: str,
filename: str = 'open_clip_pytorch_model.bin',
revision=None,
cache_dir: Union[str, None] = None,
):
has_hf_hub(True)
cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
return cached_file
def download_pretrained(
cfg: Dict,
force_hf_hub: bool = False,
cache_dir: Union[str, None] = None,
):
target = ''
if not cfg:
return target
download_url = cfg.get('url', '')
download_hf_hub = cfg.get('hf_hub', '')
if download_hf_hub and force_hf_hub:
# use HF hub even if url exists
download_url = ''
if download_url:
target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
elif download_hf_hub:
has_hf_hub(True)
# we assume the hf_hub entries in pretrained config combine model_id + filename in
# 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
# use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
model_id, filename = os.path.split(download_hf_hub)
if filename:
target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
else:
target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
return target
| [] |
2024-01-10 | Anirudh-Murali/llm_chat_classifier | classes~ChatSimilarity.py | import openai
from openai.embeddings_utils import get_embedding
import json
from copy import deepcopy
import tiktoken
from tqdm.notebook import tqdm
import re
import itertools
from sentence_transformers import SentenceTransformer, util
import numpy as np
from classes.OpenaiConnector import OpenaiConnector
class ChatSimilarity(OpenaiConnector):
"""
Returns most similar chat thread
Attributes:
- json_file_path (str): Path to the JSON file containing chat data.
- api_key (str): API key for OpenAI authentication.
- nli_model: Neural model for natural language inference and similarity.
- chat_data (dict): Data structure containing chat threads loaded from JSON.
- indexed_data (dict): Processed chat threads with question-response pairs.
- similar_query_prompt (str): Predefined prompt loaded from a file for generating similar queries.
- relevence_comparision_prompt (str): Predefined prompt loaded from a file for relevance comparison.
- chat_thread_similar_queries (dict): Holds similar queries for each chat thread.
- index_mapping (dict): Mapping of chat embeddings to their titles for FAISS indexing.
- embedding_matrix (numpy.ndarray): Matrix of embeddings to be indexed using FAISS.
"""
def __init__(self, json_file_path, api_key, instruction_model='gpt-3.5-turbo-instruct',
embed_model='text-embedding-ada-002',
chat_model='gpt-4',
nli_model='paraphrase-MiniLM-L6-v2'):
"""
Constructor for ChatSimilarity class.
Args:
- json_file_path (str): Path to the JSON file containing chat data.
- api_key (str): API key for OpenAI authentication.
- instruction_model (str): Name of the open AI model to be used for instruction
- embed_model: Name of the open AI model to be used for embedding
- chat_model (str): Name of the open AI model to be used for chat
- nli_model (str): Name of the sentence_transformers model for natural language inference and similarity
"""
super().__init__(api_key, instruction_model= instruction_model,
chat_model=chat_model,embed_model=embed_model)
self.nli_model = SentenceTransformer(nli_model) #good balance of speed and accuracy
self.json_file_path = json_file_path
self.chat_data = self._load_json()
self.indexed_data = self._process_chats()
self.similar_query_prompt = self._load_prompt_from_file('prompts/similar_queries.txt')
self.relevence_comparision_prompt = self._load_prompt_from_file('prompts/chat_relevance.txt')
self.assitant_message = self._load_prompt_from_file('prompts/assistant_message.txt')
# initialise class variables which will be set later
self.chat_thread_similar_queries = None #generate_similar_queries()
self.index_mapping = None #prepare_embeddings_for_faiss()
self.embedding_matrix = None #prepare_embeddings_for_faiss()
def _load_json(self):
"""
Loads the chat data from the specified JSON file.
"""
with open(self.json_file_path, 'r') as file:
chat_data = json.load(file)
return chat_data
def _load_prompt_from_file(self,filepath):
"""Load the content of a file and return as a string."""
with open(filepath, 'r') as file:
content = file.read()
return content
def _process_chats(self):
"""
Creates a new data structure to hold question-response pairs for each chat thread.
"""
indexed_data = {}
for thread in self.chat_data:
mapping = thread.get('mapping', {})
question, response = None, None
# Iterate through the mappings to find user and assistant messages
for key, value in mapping.items():
# Guard against None value for 'message'
if value.get('message') is not None:
message_content = value['message'].get('content', {}).get('parts', [])
# Check role of the author and assign question or response
if value['message'].get('author', {}).get('role') == 'user':
question = ' '.join(message_content) # join in case parts have multiple segments
elif value['message'].get('author', {}).get('role') == 'assistant':
response = ' '.join(message_content)
# If both question and response are found, add to indexed_data
if question and response:
if thread['title'] not in indexed_data:
indexed_data[thread['title']] = []
indexed_data[thread['title']].append((question, response))
question, response = None, None # reset for the next pair
return indexed_data
@staticmethod
def split_on_number_period(s):
"""
Static method to split a string based on numbered list pattern.
Args:
- s (str): Input string.
Returns:
- list[str]: List of split parts.
"""
# Split on the pattern
parts = re.split(r'\n\d+\.', s)
# Remove leading numbers followed by a period and filter out empty strings
cleaned_parts = [re.sub(r'^\d+\.', '', part).strip() for part in parts]
return [part for part in cleaned_parts if part]
def generate_similar_queries(self, token_limit=2048,max_response_tokens=150,
num_responses=5):
"""
Generate similar queries/questions based on the overall theme of chat threads using OpenAI.
Args:
- token_limit (int): Maximum allowed tokens for the API call. Default is 2048.
- max_response_tokens (int): Maximum number of tokens in the model's response.
- num_responses (int): Number of different responses or queries to generate.
Modifies:
- self.chat_thread_similar_queries: Fills with the generated similar queries.
"""
def prepare_model_response(response):
# extract similar queries from each response
split_response = [ChatSimilarity.split_on_number_period(single_response) for
single_response in response]
# now response is a 2D list. We need to flatten it
flattend_response = list(itertools.chain.from_iterable(split_response))
return flattend_response
# get all user chat titles
chat_thread_similar_queries = {}
for chat_thread_title in tqdm(self.indexed_data.keys()):
# init empty list to store prompts after chunking of the chat
similar_queries = []
# init empty list to store all user queries
user_queries = []
# Extract the conversations for the given chat_thread_title
conversations = self.indexed_data[chat_thread_title]
# Prepare the context by considering the token limit
context = ""
for question, response in reversed(conversations): # Start from the latest conversation
# Form the conversation context
conversation_context = f"User: {question}\nAssistant: {response}\n"
# Check if adding the next conversation breaches the max token limit
new_token_count = len(list(self.instruction_tokenizer.encode(self.similar_query_prompt+ context + conversation_context)))
if new_token_count <= token_limit:
context += conversation_context
continue
else:
# get similar queries for the current chunk
prompt = self.similar_query_prompt.format(chat_context=context)
context = ''
response = self.get_instructor_model_response(prompt,max_response_tokens=max_response_tokens,
token_limit=token_limit,num_responses=num_responses)# get similar queries
response = prepare_model_response(response) #format the model response
similar_queries.extend(response)
# get similar queries for the last chunk
prompt = self.similar_query_prompt.format(chat_context=context)
response = self.get_instructor_model_response(prompt,max_response_tokens=max_response_tokens,
token_limit=token_limit,num_responses=num_responses)# get similar queries
response = prepare_model_response(response) #format the model response
similar_queries.extend(response)
chat_thread_similar_queries[chat_thread_title] = {'queries':similar_queries}
# generate and store embeddings for all queries in chat_thread_similar_queries
embeddings = [self.get_text_embedding(f'''{query}''') for query in similar_queries]
# embeddings = [self.get_text_embedding('''{query}''') for query in similar_queries]
# self.bert_model.encode(sentence)
chat_thread_similar_queries[chat_thread_title]['embeddings'] = embeddings
self.chat_thread_similar_queries = chat_thread_similar_queries
def prepare_embeddings_for_faiss(self):
"""
Process embeddings in preparation for FAISS indexing.
Raises:
- ValueError: If chat queries and their embeddings have not been generated.
Modifies:
- self.index_mapping: Fills with mapping of embeddings to chat titles.
- self.embedding_matrix: Fills with the combined matrix of embeddings.
"""
if not self.chat_thread_similar_queries:
raise ValueError("Chat queries & embeddings have not been generated. Please run generate_similar_queries() first.")
index_mapping = {}
for idx,chat_title in tqdm(enumerate(self.chat_thread_similar_queries.keys()),
total=len(self.chat_thread_similar_queries.keys())):
embeddings = np.array(self.chat_thread_similar_queries[chat_title]['embeddings'])
num_query_embeddings = embeddings.shape[0]
index_keys = index_mapping.keys()
if len(index_mapping.keys())>0:
last_index_key = max(index_keys) + 1
else:
last_index_key = 0
apend_mapping = {idx:chat_title for idx in range(last_index_key,last_index_key+num_query_embeddings)}
index_mapping.update(apend_mapping)
if idx == 0:
embedding_matrix = embeddings
else:
embedding_matrix = np.concatenate([embedding_matrix,embeddings])
self.index_mapping = index_mapping
self.embedding_matrix = embedding_matrix
def get_similar_chats(self,user_query,faiss_index):
"""
Get chat threads that are similar to a user's query using FAISS.
Args:
- user_query (str): The user's input query.
- faiss_index: FAISS index instance containing chat embeddings.
Returns:
- list[str] or str or None: List of matching chat titles or a single matching title or None if no match found.
"""
user_query_embedding = self.get_text_embedding(user_query)
user_query_embedding = np.array(user_query_embedding)
similarity, indices = faiss_index.find_similar_querys(user_query_embedding, num_neighbors=10)
min_similarity_cutoff = 0.8 #arbitrarily chosen
# Compute similarity with other users for the current actor
# similarity, indices = self.find_similar_users(actor_vector, num_neighbors=top_n)
# Flatten the indices and similarity arrays
indices = indices.flatten()
similarity = similarity.flatten()
# Drop -1 indices
indices = indices[np.logical_and(~np.isnan(indices), similarity > min_similarity_cutoff)]
if len(indices)<0: # nothing matched
return None
chat_titles = list(set([faiss_index.index_mapping_dict[idx] for idx in indices]))
return chat_titles
def extract_latest_chat_context(self,chat_thread_titles,token_limit=2048):
"""
Extracts the latest context from given chat threads, considering a token limit.
Args:
- chat_thread_titles (list[str]): List of chat thread titles to extract context from.
- token_limit (int): Maximum allowed tokens for the context extraction. Default is 2048.
Returns:
- list[str]: List of chat contexts.
"""
latest_chat_context = {}
chat_history_prompts = []
for chat_thread_title in tqdm(chat_thread_titles):
# Extract the latest conversation for the given chat_thread_title
conversations = self.indexed_data[chat_thread_title]
# Start the context by providing the chat title and the first prompt/response
question, response = conversations[0]
context = f"{chat_thread_title}: User{question} \nAssistant:{response}"
for question, response in reversed(conversations): # Start from the latest conversation
# Form the conversation context
conversation_context = f"User: {question}\nAssistant: {response}\n"
# Check if adding the next conversation breaches the max token limit
# new_token_count = len(list(self.chat_tokenizer.encode(self.relevance_prompt + context + conversation_context)))
new_token_count = len(list(self.chat_tokenizer.encode(context + conversation_context)))
if new_token_count <= token_limit:
context += conversation_context
continue
else:
break
chat_history_prompts.append(f"{chat_thread_title}: {context}")
return chat_history_prompts
def prepare_model_messages(self,chat_history_prompts,user_query):
"""
Prepares a structured message format for querying the OpenAI model.
Args:
- chat_history_prompts (list[str]): Chat histories to consider.
- user_query (str): User's input query.
Returns:
- list[dict]: Structured messages for the model.
"""
messages = [
# {"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": self.relevence_comparision_prompt},
{"role": "assistant", "content": self.assitant_message.replace('another','a')}]
for chat_history_prompt in chat_history_prompts:
messages.append({'role':'user','content':chat_history_prompt})
messages.append({'role':'assistant','content':self.assitant_message})
messages.append({'role':'user','content':f'''new query:{user_query}'''})
return messages
def get_valid_chat_title(self, user_query: str, chat_titles: list[str]) -> str:
"""
Find the most similar text from a list of chat titles by comparing it to a user query.
Args:
- user_query (str): The user's input query.
- chat_titles (list[str]): A list of chat titles to compare with the user's query.
Returns:
- str: The most similar chat title.
"""
# Initialize the model
# Get the embeddings
user_query_embedding = self.nli_model.encode(user_query, convert_to_tensor=True)
chat_title_embeddings = self.nli_model.encode(chat_titles, convert_to_tensor=True)
# Compute cosine similarities
cosine_scores = [util.pytorch_cos_sim(user_query_embedding, chat_title_embedding) for chat_title_embedding in chat_title_embeddings]
# Get the index of the highest similarity score
most_similar_index = np.argmax(cosine_scores)
return chat_titles[most_similar_index]
def return_most_similar_chat(self,user_query,similar_chat_titles:list):
'''
Function to finalise the chat thread from a candidate of chat threads
Args:
- user_query(str): The user query which is to be compared
- similar_chat_titles(list) : list of candidate chat thread titles
Return:
- None if there is no matching chat thread, else the title of the most
similar chat thread
'''
if len(similar_chat_titles)==0:
print("No chat threads are similar")
return None
elif len(similar_chat_titles)==1:
print(f"{similar_chat_titles[0]} is the single candidate")
return similar_chat_titles[0]
else:
print(f"multiple candidate : {similar_chat_titles}")
most_similar_chat_prompts = self.extract_latest_chat_context(similar_chat_titles)
messages = self.prepare_model_messages(most_similar_chat_prompts,user_query)
model_response = self.get_chat_model_response(messages, max_response_tokens=1, num_responses=1, temperature=0)[0]
most_similar_chat_title = self.get_valid_chat_title(model_response, similar_chat_titles)
return most_similar_chat_title | [
"new query:PLACEHOLDER",
"another",
"[]"
] |
2024-01-10 | huawei-noah/SMARTS | smarts~core~utils~class_factory.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ====================
# Heavily derived from https://github.com/openai/gym/blob/v0.10.5/gym/envs/registration.py
# See gym license in THIRD_PARTY_OPEN_SOURCE_SOFTWARE_NOTICE
import importlib
import re
import warnings
from typing import Dict
# Taken from OpenAI gym's name constraints
NAME_CONSTRAINT_REGEX = re.compile(r"^(?:[\w:-]+\/)?([\w:.-]+)-(v(\d+)|latest)$")
def is_valid_locator(locator: str):
"""Validate the given locator."""
# Handle non-URL-based agents (e.g. open_agent-v0)
return NAME_CONSTRAINT_REGEX.search(locator)
def find_attribute_spec(name):
"""Finds the attribute specification from a reachable module.
Args:
name:
The module and attribute name (i.e. smarts.core.lidar:Lidar, ...)
"""
module_name, attribute_name = name.split(":")
module = importlib.import_module(module_name)
attribute_spec = getattr(module, attribute_name)
return attribute_spec
class ClassFactory:
"""A named factory that can preconfigure generation of objects."""
def __init__(self, name, entrypoint=None, **kwargs):
self.name = name
self.entrypoint = entrypoint
self._kwargs = kwargs
if self.entrypoint is None:
raise EnvironmentError(
f"Entry-point is empty for: '{self.name}'. Provide an entry-point"
)
def make(self, **kwargs):
"""Provides an object from the entrypoint. Overriding predefined keyword arguments with
the given keyword arguments.
"""
if self.entrypoint is None:
raise AttributeError(f"Entry-point does not exist for name `{self.name}`")
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entrypoint):
instance = self.entrypoint(**_kwargs)
else:
type_spec = find_attribute_spec(self.entrypoint)
instance = type_spec(**_kwargs)
return instance
def __repr__(self):
return f"""ClassFactory(
name={self.name},
entrypoint={self.entrypoint},
kwargs={self._kwargs},
)"""
class ClassRegister:
"""A listing of key named class factories."""
def __init__(self):
self.index: Dict[str, ClassFactory] = {}
def register(self, name, entry_point=None, **kwargs):
"""Registers a new factory with the given locator as the key.
Args:
locator: The key value of the factory.
entry_point: The factory method.
kwargs: Predefined arguments to the factory method.
"""
if name in self.index:
warnings.warn(
f"Resident named '{name}' was already registered. Overwriting existing registration."
)
self.index[name] = ClassFactory(name, entry_point, **kwargs)
def find_factory(self, locator):
"""Locates a factory given a locator."""
self._raise_on_invalid_locator(locator)
mod_name, _, name = locator.partition(":")
if name != "":
# There is a module component.
try:
# Import the module so that the agent may register itself in the index
# it is assumed that a `register(name=..., entry_point=...)` exists in the target module.
module = importlib.import_module(mod_name)
except ImportError as exc:
import sys
raise ImportError(
f"Ensure that `{mod_name}` module can be found from your "
f"PYTHONPATH and name=`{locator}` exists (e.g. was registered "
"manually or downloaded).\n"
f"`PYTHONPATH`: `{sys.path}`"
) from exc
else:
# There is no module component.
name = mod_name
try:
# See if `register()` has been called.
# return the builder if it exists.
return self.index[name]
except KeyError as exc:
raise NameError(f"Locator not registered in lookup: {locator}") from exc
def make(self, locator, **kwargs):
"""Calls the factory with `locator` name key supplying the keyword arguments as argument
overrides.
"""
factory = self.find_factory(locator)
instance = factory.make(**kwargs)
return instance
def all(self):
"""Lists all available factory objects."""
return self.index.values()
def __repr__(self) -> str:
columns = 3
max_justify = float("-inf")
for name in self.index.keys():
max_justify = max(max_justify, len(name))
out = ""
for i, name in enumerate(self.index.keys()):
out = f"{out}{name.ljust(max_justify)} "
if i % columns == 0 and len(self.index) != i + 1:
out += "\n"
out += "\n"
return out
def _raise_on_invalid_locator(self, locator: str):
if not is_valid_locator(locator):
# TODO: Give clearer instructions/examples of the locator syntax
raise ValueError(
f"Cannot register invalid locator={locator}. E.g. syntax: "
'"module:name-v0".'
)
| [] |
2024-01-10 | yangytjason2000/RecipeFinder | Backend~lambda~lambda_function.py | import boto3
from boto3.dynamodb.conditions import Key
import json
import jwt
import re
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
METHODS = set(['GET', 'POST', 'DELETE'])
TABLES = set(['ingredient','recipe'])
ISO8601 = r'^([0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(.[0-9]{3})Z$'
def lambda_handler(event, context):
method = event['httpMethod']
if method not in METHODS:
return serialize_invalid_response(f'Unsupported HTTP method: {method}')
table_name = event['queryStringParameters']['database']
if table_name not in TABLES:
return serialize_invalid_response(f'Invalid resource name: {table_name}')
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_name)
username = get_username(event)
if method == 'GET':
if event['queryStringParameters']['mode']=='recommend':
return recommend_item(table,username)
else:
return get_item(table,username)
if method == 'POST':
if event['queryStringParameters']['mode']=='consume':
return consume_item(table, event['body'],username)
else:
return post_item(table, event['body'],username)
if method == 'DELETE':
return delete_item(table, event['body'],username)
def generate_text(prompt):
response = openai.Completion.create(
model = "text-davinci-003",
prompt=prompt,
temperature=0.7,
)
text = response.choices[0].text.strip()
return text
def get_username(event):
# Extract the token from the Authorization header
authorization_header = event['headers'].get('Authorization')
token = authorization_header.split()[1]
# Decode the token to retrieve the user ID or username
decoded_token = jwt.decode(token, algorithms=['HS256'],options={"verify_signature": False})
username = decoded_token['cognito:username']
return username
def check_availibility(fridge,ingredients):
for ingredient in ingredients:
if ingredient['name'] in fridge and ingredient['quantity']>fridge[ingredient['name']]:
return False
elif ingredient['name'] not in fridge:
return False
return True
def recommend_item(table,username,item_number=1):
if item_number < 0:
return serialize_invalid_response('Not a valid number')
recipes = []
response = table.query(KeyConditionExpression=Key('username').eq(username))
recipes.extend(response['Items'])
while 'LastEvaluatedKey' in response:
response = table.query(KeyConditionExpression=Key('username').eq(username),
ExclusiveStartKey=response['LastEvaluatedKey'])
recipes.extend(response['Items'])
dynamodb = boto3.resource('dynamodb')
fridge_table = dynamodb.Table('ingredient')
fridge = {}
response = fridge_table.query(KeyConditionExpression=Key('username').eq(username))
for item in response['Items']:
fridge[item['name']] = item['quantity']
while 'LastEvaluatedKey' in response:
response = fridge_table.query(KeyConditionExpression=Key('username').eq(username),
ExclusiveStartKey=response['LastEvaluatedKey'])
for item in response['Items']:
fridge[item['name']] = item['quantity']
recommend_recipe = []
for recipe in recipes:
if check_availibility(fridge,recipe['ingredient']):
recommend_recipe.append(recipe)
if (len(recommend_recipe)>=item_number):
recommend_recipe = sorted(recommend_recipe,key = lambda x: x['date'])[:item_number]
return serialize_correct_response(recommend_recipe)
def get_item(table,username):
items = []
response = table.query(KeyConditionExpression=Key('username').eq(username))
items.extend(response['Items'])
while 'LastEvaluatedKey' in response:
response = table.query(KeyConditionExpression=Key('username').eq(username),
ExclusiveStartKey=response['LastEvaluatedKey'])
items.extend(response['Items'])
return serialize_correct_response(items)
def consume_item(table, payload,username):
valid, item = validate_payload(payload)
if not valid:
return serialize_invalid_response(item)
if 'date' in item:
date = item['date']
match = re.fullmatch(ISO8601, date)
if match is None:
return serialize_invalid_response(f'Invalid date: {date}')
ingredients = item['ingredient']
for ingredient in ingredients:
response = table.get_item(Key={'username': username,'name': ingredient['name']})
if 'Item' not in response:
return serialize_invalid_response('No such ingredient: '+ingredient['name'])
if float(response['Item']['quantity'])<float(ingredient['quantity']):
return serialize_invalid_response('Not enough ingredient: '+ingredient['name'])
for ingredient in ingredients:
if (float(response['Item']['quantity'])-float(ingredient['quantity'])).is_integer():
response['Item']['quantity']=str(int(float(response['Item']['quantity'])-float(ingredient['quantity'])))
else:
response['Item']['quantity']=str(float(response['Item']['quantity'])-float(ingredient['quantity']))
table.put_item(Item=response['Item'])
return get_item(table,username)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def post_item(table, payload, username):
valid, item = validate_payload(payload)
if not valid:
return serialize_invalid_response(item)
if 'date' in item:
date = item['date']
match = re.fullmatch(ISO8601, date)
if match is None:
return serialize_invalid_response(f'Invalid date: {date}')
if 'quantity' in item:
quantity = item['quantity']
if not is_number(quantity):
return serialize_invalid_response('Invalid quantity')
if 'ingredient' in item:
ingredients = item['ingredient']
for ingredient in ingredients:
if not is_number(ingredient['quantity']):
return serialize_invalid_response('Invalid quantity')
item['name'] = item['name'].strip()
item['username'] = username
table.put_item(Item=item)
return get_item(table,username)
def delete_item(table, payload,username):
valid, item = validate_payload(payload)
if not valid:
return serialize_invalid_response(item)
table.delete_item(Key={'username': username,'name': item['name']})
return get_item(table,username)
def validate_payload(payload):
try:
payload = json.loads(payload)
except Exception:
return False, 'Payload not valid'
if not 'name' in payload:
return False, 'Ingredient name not provided'
return True, payload
def serialize_correct_response(body=None):
return {
'statusCode': 200,
'headers': {},
'body': json.dumps(body),
'isBase64Encoded': False
}
def serialize_invalid_response(message):
body = {
'message': message
}
return {
'statusCode': 400,
'headers': {},
'body': json.dumps(body),
'isBase64Encoded': False
} | [] |
2024-01-10 | ShadowShakes/ShapeMentor-Advisor | service~ai_advisor.py | """Personalized AI interaction to provide health advice."""
from openai import OpenAI
from service.properties import ApiKeySelector, load_prompts_map
from utils import timer
from typing import Dict, List
from retrying import retry
from string import Template
API_SELECTOR = ApiKeySelector()
PROMPTS_MAP = load_prompts_map()
class AIAdvisor:
"""
Provide personalized health advice from GPT
"""
@timer
def get_body_metrics_advice(self, request_data: Dict, use_gpt4: bool = False) -> Dict:
print(f'Generating AI personalized advice now for user based on body metrics')
result_body_metrics_json = {}
try:
gpt_model = 'gpt-4' if use_gpt4 else 'gpt-3.5-turbo'
result_body_metrics_json = generate_user_body_metrics_advice(PROMPTS_MAP['user_prompt'],
PROMPTS_MAP['system_prompt'],
request_data,
gpt_model)
except Exception as e:
print(f"Met exception {e} during AI advice generation for body metrics analysis")
return result_body_metrics_json
@timer
@retry(stop_max_attempt_number=1, wait_fixed=1000, retry_on_result=lambda result: result is None)
def generate_user_body_metrics_advice(user_prompt_tpl, sys_prompt, input_params: Dict, model) -> List or None:
"""Invoke GPTs to generate AI health advice based on body metrics data."""
ai_response_advice = None
try:
user_prompt = Template(user_prompt_tpl).substitute(**input_params)
ai_response_advice = invoke_gpt_api(sys_prompt, user_prompt,
model=model, max_tokens=2048)
except Exception as e:
print(f"Met exception when generating AI health advice, exception msg {e}, retrying...")
finally:
return ai_response_advice
def invoke_gpt_api(system_prompt: str,
user_prompt: str,
model: str = 'gpt-3.5-turbo',
max_tokens: int = 2048,
temperature: float = 0.7,
frequency_penalty: float = 0.25):
if model == 'gpt-4':
# gpt-4 api key
selected_api_key = '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'
else:
# gpt-3.5-turbo key
selected_api_key = API_SELECTOR.retrieve_api_key()
print("currently used api key is: ", selected_api_key)
messages = [{
"role": "system",
"content": system_prompt.strip()
}, {
"role": "user",
"content": user_prompt.strip()
}]
try:
client = OpenAI(
api_key=selected_api_key
)
response = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=1.0,
frequency_penalty=frequency_penalty,
presence_penalty=0.0,
)
assistant_response = response.choices[0].message.content
return assistant_response.strip()
except Exception as e:
print(f"Exception met when invoking openai api with api key {selected_api_key}, error msg: {e}")
return ""
if __name__ == '__main__':
# start_time0 = time.time()
input_case1 = {
"user_name": "Ethan Chen",
"track_data": [
{"date": "2023-10-01", "height": "175cm", "weight": "68kg", "body_fat_percentage": 0.18},
{"date": "2023-10-08", "height": "175cm", "weight": "66kg", "body_fat_percentage": 0.17},
{"date": "2023-10-15", "height": "175cm", "weight": "64kg", "body_fat_percentage": 0.16},
{"date": "2023-11-01", "height": "175cm", "weight": "60kg", "body_fat_percentage": 0.14}
]
}
advisor = AIAdvisor()
test_result1 = advisor.get_body_metrics_advice(input_case1)
print(test_result1)
| [] |
2024-01-10 | piercecohen1/GPT-CLI | stream.py | #!/usr/bin/env python3
# Author: Pierce Cohen
# Description: An interactive CLI for GPT models
# Version: 1.4.0
import openai
import os
import sys
from rich.console import Console
from rich.markdown import Markdown
import pyperclip
from playsound import playsound
import argparse
import json
import time
from rich.table import Table
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import PathCompleter, Completer, Completion
from prompt_toolkit.document import Document
from rich.live import Live
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
class CustomPathCompleter(Completer):
def get_completions(self, document, complete_event):
text = document.text_before_cursor
if text.startswith("/save ") or text.startswith("/load "):
command, partial_path = text.split(" ", 1)
path_completer = PathCompleter()
path_document = Document(partial_path, len(partial_path))
for completion in path_completer.get_completions(path_document, complete_event):
yield Completion(completion.text, completion.start_position,
completion.display, completion.display_meta)
def clear_terminal():
os.system("cls" if os.name == "nt" else "clear")
class ChatApplication:
def __init__(self, system_message=None, model="gpt-3.5-turbo", clear_on_init=False):
self.model = model
self.system_message = system_message
self.initialize_messages()
self.console = Console()
self.sound = True # Set to False to disable sound
if clear_on_init:
clear_terminal()
def initialize_messages(self, system_message=None, model=None):
if system_message:
self.system_message = system_message
else:
self.system_message = "You are a helpful assistant. Keep your answers concise when possible."
if model:
self.model = model
self.messages = [{"role": "system", "content": self.system_message}]
def add_message(self, role, content):
message = {
"role": role,
"content": content
}
self.messages.append(message)
def display_markdown(self, text):
markdown = Markdown(text)
self.console.print(markdown, end="")
def try_chat_completion(self):
response_content = ""
try:
stream = client.chat.completions.create(
model=self.model,
messages=self.messages,
stream=True
)
for part in stream:
content = part.choices[0].delta.content if part.choices[0].delta.content is not None else ''
print(content, end='', flush=True) # Print content as it's received
response_content += content # Accumulate the response content
if 'finish_reason' in part.choices[0] and part.choices[0].finish_reason == 'stop':
print()
break # Break the loop since the message is complete
except openai.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__)
except openai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
except openai.APIStatusError as e:
print("Non-200-range status code was received")
print(e.status_code)
print(e.response)
except Exception as e:
print(f"An unexpected error occurred: {e}")
return response_content.strip()
def save_chat(self, filename):
try:
with open(filename, "w") as outfile:
json.dump({"model": self.model, "messages": self.messages}, outfile, indent=4)
print(f"Chat saved to '{filename}'.")
except Exception as e:
print(f"An error occurred while saving the chat: {e}")
def load_chat(self, filename):
try:
with open(filename, "r") as infile:
chat_data = json.load(infile)
self.model = chat_data.get("model", self.model)
self.messages = chat_data.get("messages", [])
print(f"Chat loaded from '{filename}'.")
except FileNotFoundError:
print(f"File '{filename}' not found.")
except Exception as e:
print(f"An error occurred while loading the chat: {e}")
def format_messages(self):
formatted_messages = "\n".join(f"{message['role'].capitalize()}: {message['content']}"
for message in self.messages)
return formatted_messages
def main():
parser = argparse.ArgumentParser(description="An interactive CLI for GPT models")
parser.add_argument("-v", "--version", action="store_true", help="Show the version number and exit")
parser.add_argument("--load", type=str, help="Load a chat from a file immediately upon launching the program")
parser.add_argument("-q", "--query", type=str, help="Execute a query immediately upon launch")
parser.add_argument('remainder', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.version:
print("GPT-CLI version 1.4.0")
sys.exit(0)
chat_app = ChatApplication()
if args.load:
chat_app.load_chat(args.load)
if args.query:
chat_app.add_message("user", args.query)
chat_app.try_chat_completion()
print()
else:
print(f"Model: {chat_app.model}")
session = PromptSession(completer=CustomPathCompleter())
try:
while True:
user_input = session.prompt("You: ")
if user_input.startswith('/'):
command = user_input.split(' ')[0].lower()
argument = user_input[len(command) + 1:].strip()
if command == '/save':
chat_app.save_chat(argument)
elif command == '/load':
chat_app.load_chat(argument)
elif command == '/quit' or command == '/exit':
print("Exiting the GPT-CLI.")
break
elif command == '/info':
user_messages_count = sum(1 for message in chat_app.messages if message['role'] == 'user')
assistant_messages_count = sum(1 for message in chat_app.messages if message['role'] == 'assistant')
table = Table(title="Chat Information", show_header=True, header_style="bold magenta")
table.add_column("Attribute", style="dim", width=20)
table.add_column("Value")
table.add_row("Model", chat_app.model)
table.add_row("System Message", chat_app.system_message)
table.add_row("User Messages", str(user_messages_count))
table.add_row("Assistant Messages", str(assistant_messages_count))
chat_app.console.print(table)
elif command == '/clear':
clear_terminal()
elif command == '/new':
chat_app.initialize_messages()
print("Started a new chat session.")
elif command == '/system':
clear_terminal()
chat_app.initialize_messages(system_message=argument)
print("Started a new chat with a custom system message.")
elif command == '/model':
chat_app.model = argument
if argument:
clear_terminal()
chat_app.initialize_messages()
print(f"Switched model to {argument} and started a new chat session.")
else:
print("Please specify a model name after the /model command.")
elif command == '/copy':
pyperclip.copy(chat_app.messages[-1]['content'] if chat_app.messages else '')
print("Copied the last message to the clipboard.")
elif command == '/paste':
pasted_content = pyperclip.paste()
chat_app.add_message("user", pasted_content)
chat_app.try_chat_completion()
elif command == '/help':
print("Available commands:")
print("/paste - Paste clipboard content")
print("/copy - Copy the last response to the clipboard")
print("/new - Start a new chat")
print("/clear - Clear terminal window")
print("/system - Start a new chat with a custom system message")
print("/model - Start a new chat with the specified model")
print("/quit - Exit the program")
print("/info - Display info about the current chat session")
print("/save [FILENAME] - Save the chat to a file")
print("/load [FILENAME] - Load a chat from a file")
print("/help - Display this help message")
else:
print(f"Unknown command: {command}")
else:
chat_app.add_message("user", user_input)
assistant_response = chat_app.try_chat_completion()
if assistant_response:
chat_app.add_message("assistant", assistant_response)
print()
except KeyboardInterrupt:
print("\nExiting...")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Awakuruf/Fearless | client~src~danger_level~get_danger_level.py | import cohere
from examples import examples
# from inputs import inputs
import requests
co = cohere.Client('Ex5i3BxYcOXkbj5oVSRB7EC0ne8GQT9lLFBxgE4D')
headers = {"x-api-key": "U9QGGZcVtlzGpN5wcH-QyL-_bh6t_042FIVBJAC8-UE"}
# uses https://app.newscatcherapi.com/dashboard/
def get_danger_level(query):
url = "https://api.newscatcherapi.com/v2/search"
querystring = {
"q": f"{query} AND vancouver",
"lang": "en",
"sort_by": "relevancy",
"page": "1"
}
response = requests.request("GET", url, headers=headers, params=querystring)
titles = []
if response.status_code == 200:
data = response.json()
seen = set()
for article in data['articles']:
title = article['title']
if title not in seen:
titles.append(title)
seen.add(title)
if len(titles) == 15:
break
else:
print(f"Error {response.status_code}: {response.reason}")
response = co.classify(model='large', inputs=titles, examples=examples)
toxic_sum = 0
for classification in response:
prediction = classification.prediction
confidence = classification.confidence
if prediction == 'Benign':
toxic_sum += 1 - confidence
else:
toxic_sum += confidence
print(titles)
danger_level = toxic_sum / len(response)
return danger_level
| [] |
2024-01-10 | Awakuruf/Fearless | client~src~danger_level~examples.py | from cohere.classify import Example
examples = [
Example(
"Pedestrian Safety at Risk: Increase in Robberies Along Popular Walking Routes",
"Toxic"),
Example(
"Walkers Beware: Recent Spate of Hit-and-Runs Leaves Pedestrians Vulnerable",
"Toxic"),
Example(
"Danger Lurks for Pedestrians as Muggings and Assaults on Walking Routes Surge",
"Toxic"),
Example("Pedestrian Routes Targeted by Thieves Stealing Personal Property",
"Toxic"),
Example(
"Pedestrian Fatalities Rise: Authorities Urge Caution on Dangerous Walking Routes",
"Toxic"),
Example(
"Violent Crimes Against Pedestrians on the Rise Along Certain Routes",
"Toxic"),
Example(
"Police Investigate Reports of Aggressive and Dangerous Behavior on Walking Routes",
"Toxic"),
Example(
"Pedestrian Safety Threatened by Increase in Vehicle Collisions on Busy Walking Routes",
"Toxic"),
Example(
"Residents Concerned Over Recent String of Pedestrian-Related Crimes on Popular Routes",
"Toxic"),
Example(
"Walkers Warned of Rising Danger on Streets and Walking Paths: Take Precautions.",
"Toxic"),
Example("Local Business Wins Award for Sustainable Practices", "Benign"),
Example("Community Comes Together to Support Local Food Bank", "Benign"),
Example("New Park Opens to Public, Featuring Family-Friendly Attractions",
"Benign"),
Example("Local High School Students Recognized for Achievements in Science",
"Benign"),
Example(
"City Implements New Recycling Program to Promote Environmental Sustainability",
"Benign"),
Example("Historic Landmark Restored to Its Former Glory, Open for Visitors",
"Benign"),
Example("Local Theater Group to Stage Shakespearean Classic", "Benign"),
Example("Annual Art Festival Draws Record Crowds to Downtown", "Benign"),
Example("City Council Approves Funding for New Community Center", "Benign"),
Example("Assault", "Toxic"),
Example("Arrest", "Toxic"),
Example("Killed", "Toxic"),
Example("Murder", "Toxic"),
Example("Death", "Toxic"),
Example("Stabbing", "Toxic"),
Example("Crime rate", "Toxic"),
Example("Burglary", "Toxic"),
Example("Robbery", "Toxic"),
Example("Homicide", "Toxic"),
Example("Theft", "Toxic"),
Example("Violence", "Toxic"),
Example("Crime", "Toxic"),
Example("Arson", "Toxic"),
Example("Domestic violence", "Toxic"),
Example("Sexual assault", "Toxic"),
Example("Hate crimes", "Toxic"),
Example("Kidnapping", "Toxic"),
Example("Gang violence", "Toxic"),
Example("Repeat offender", "Toxic"),
Example("offender", "Toxic"),
Example("No pedestrian crossings", "Toxic"),
Example("Poorly maintained sidewalks", "Toxic"),
Example("Unmarked crosswalks", "Toxic"),
Example("Absence of pedestrian safety measures", "Toxic"),
Example("Poorly lit areass", "Toxic"),
Example("Intoxicated drivers", "Toxic"),
Example("Homeless encampments", "Toxic"),
Example("Panhandling", "Toxic"),
Example("Aggressive behavior by homeless people", "Toxic"),
Example("Public defecation or urination", "Toxic"),
Example("Increased crime rates in areas with homeless populations", "Toxic"),
Example("Harassment of pedestrians by homeless individuals", "Toxic"),
Example("Encampment fires or other hazards", "Toxic"),
Example("Aggressive behavior by homeless people", "Toxic"),
Example("Used drug paraphernalia on sidewalks", "Toxic"),
Example("Sleeping or camping in doorways", "Toxic"),
Example("Aggressive begging", "Toxic"),
Example("Human waste and trash in public spaces", "Toxic"),
Example("Human trafficking", "Toxic"),
Example("Restaurant", "Benign"),
Example("Family", "Benign"),
Example("Charity", "Benign"),
] | [] |
2024-01-10 | xiami2019/CLAIF | data_generation.py | import argparse
import openai
import os
from datetime import datetime
from modeling import CLAIFGenerator
from utils import set_seed, read_inputs, DatasetEntry
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, required=True,
help="The output directory to which the generated dataset is saved")
parser.add_argument("--generation_stage", type=str, default='stage-1',
help='stage-1: generated sentence pairs; stage-2: generated similarity scores;')
parser.add_argument("--llm_engine", type=str, default="text-davinci-003",
help="OpenAI's large language models.")
parser.add_argument("--openai_api_key", type=str, required=True)
parser.add_argument("--max_output_length", type=int, default=256,
help="The maximum output length for each generated text.")
parser.add_argument("--top_p", type=float, default=0.9,
help="p value for top-p sampling (set to 0 to perform no top-p sampling)")
parser.add_argument("--input_file", type=str,
help="An optional input file containing raw texts. This is required for generating text pair datasets.")
parser.add_argument("--input_file_type", choices=["plain", "jsonl", "stsb"], default="jsonl",
help="The type of the input file. Choices are 'plain' (a raw text file with one input per line), 'jsonl' (a jsonl "
"file as produced by DINO) and 'stsb' (a TSV file in the STS Benchmark format)")
parser.add_argument("--batch_size", type=int, default=None,
help="The batch size for generation (only if --input_file is not set)")
parser.add_argument("--remove_identical_pairs", action='store_true',
help="Whether text pairs with text_a == text_b should be removed from the dataset (only for text pair datasets)")
parser.add_argument("--allow_newlines_in_outputs", action='store_true',
help="If set to true, model outputs that contain a newline character before the end-of-sequence token (a quotation "
"mark) are not removed from the dataset.")
parser.add_argument("--min_num_words", type=int, default=-1,
help="The minimum number of (whitespace-separated) words for each dataset entry. Entries with fewer words are "
"removed.")
parser.add_argument("--using_cot", action="store_true", help='Zero-shot CoT, first generate analyze the difference between two sentences than give a score.')
parser.add_argument("--temperature", type=float, default=0.7, help="temperature for GPT3 generation.")
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
set_seed(args.seed)
args.date = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print(f"Parameters: {args}")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
assert args.input_file
inputs = read_inputs(args.input_file, args.input_file_type)
assert args.openai_api_key
openai.api_key = args.openai_api_key
generator = CLAIFGenerator(
model=args.llm_engine,
openai_api_key=args.openai_api_key,
max_output_length=args.max_output_length,
top_p=args.top_p,
remove_identical_pairs=args.remove_identical_pairs,
min_num_words=args.min_num_words,
allow_newlines_in_outputs=args.allow_newlines_in_outputs,
using_cot = args.using_cot,
temperature = args.temperature
)
print("Starting dataset generation with CLAIF {}".format(args.generation_stage))
outputs = generator.generate_dataset(inputs, batch_size=args.batch_size, generation_stage=args.generation_stage)
print(f"Dataset generation complete, dataset contains {len(outputs)} entries")
dataset_path = os.path.join(args.output_dir, 'generated-dataset.jsonl')
DatasetEntry.save_list(outputs, dataset_path)
print(f"Done saving dataset to file '{dataset_path}'")
| [] |
2024-01-10 | xiami2019/CLAIF | modeling.py | import re
import math
import random
from typing import List, Optional, Union
import openai
from tqdm import tqdm
from utils import DatasetEntry, DatasetEntryWithExp
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
class CLAIFGenerator:
def __init__(self, model: str = None, openai_api_key: Optional[str] = None, max_output_length: int = 100, top_p: float = 0.9, remove_identical_pairs: bool = False, min_num_words: int = -1, \
allow_newlines_in_outputs: bool = False, using_cot: bool = False, temperature: float = 0.7):
self.model = model
self.openai_api_key = openai_api_key
self.max_output_length = max_output_length
self.top_p = top_p
self.remove_identical_pairs = remove_identical_pairs
self.min_num_words = min_num_words
self.allow_newlines_in_outputs = allow_newlines_in_outputs
self.using_cot = using_cot
self.temperature = temperature
def corrupt_sentence_with_mask(self, sentence, mask_rate, merge_adjacent_tokens=True, merge_rate=0.5):
mask_token = '<mask>'
sentence = sentence.split(' ') if not isinstance(sentence, list) else None
replace_token_number = math.ceil(len(sentence) * mask_rate)
replace_token_index = random.sample(range(len(sentence)), replace_token_number)
for i in replace_token_index:
sentence[i] = mask_token
# merge adjacent tokens
if merge_adjacent_tokens and random.uniform(0,1) <= merge_rate:
result = []
for token in sentence:
if token != '<mask>':
result.append(token)
elif result == [] or result[-1] != '<mask>':
result.append(token)
sentence = result
sentence = ' '.join(sentence)
return sentence
def generate_dataset_with_explanation(self, input_texts: Optional[List[str]], batch_size: Optional[int] = None) -> List[DatasetEntryWithExp]:
dataset = []
for start_idx in tqdm(range(0, len(input_texts), batch_size)):
inputs = input_texts[start_idx:start_idx+batch_size]
current_generate_entries = self._generate_dataset_entries(inputs)
if current_generate_entries == []:
print('Insufficient balance')
break
dataset += current_generate_entries
dataset = self._postprocess_dataset(dataset)
return dataset
def generate_dataset(self, input_texts: Optional[List[str]], batch_size: Optional[int] = None, generation_stage: str = 'stage-1') -> List[DatasetEntry]:
def stage_1_generation():
generate_with_inputs = input_texts is not None
dataset = []
mask_rates = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
for start_idx in tqdm(range(0, len(input_texts), batch_size), desc="Generation step"):
inputs = input_texts[start_idx:start_idx+batch_size]
unmasked_inputs = []
masked_inputs = []
for sent in inputs:
for mask_rate in mask_rates:
unmasked_inputs.append(sent)
masked_inputs.append(self.corrupt_sentence_with_mask(sent, mask_rate=mask_rate))
if 0.0 in mask_rates:
current_generate_entries = self._generate_dataset_entries_stage_1(unmasked_inputs, masked_inputs, first_no_mask=True)
else:
current_generate_entries = self._generate_dataset_entries_stage_1(unmasked_inputs, masked_inputs)
if current_generate_entries == []:
print('Insufficient balance')
break
dataset += current_generate_entries
dataset = self._postprocess_dataset_stage_1(dataset, generate_with_inputs)
return dataset
def stage_2_generation():
dataset = []
for start_idx in tqdm(range(0, len(input_texts), batch_size)):
inputs = input_texts[start_idx:start_idx+batch_size]
current_generate_entries = self._generate_dataset_entries_stage_2(inputs)
if current_generate_entries == []:
print('Insufficient balance')
break
dataset += current_generate_entries
dataset = self._postprocess_dataset_stage_2(dataset)
return dataset
if generation_stage == 'stage-1':
return stage_1_generation()
elif generation_stage == 'stage-2':
return stage_2_generation()
def _generate_dataset_entries_stage_1(self, inputs: Union[str, int], masked_inputs: Union[str, int], first_no_mask=False) -> List[DatasetEntry]:
if first_no_mask:
instructions = [self._build_instruction_for_no_mask(masked_inputs[0])]
for i in range(1, len(masked_inputs)):
instructions.append(self._build_instruction(masked_inputs[i]))
else:
instructions = [self._build_instruction(masked_inputs[i]) for i in range(len(masked_inputs))]
if self.openai_api_key is not None:
try:
model_responses = completion_with_backoff(
engine=self.model, prompt=instructions, max_tokens=self.max_output_length, top_p=self.top_p, temperature=self.temperature, stop=['"']
)
model_outputs = [model_response["text"] for model_response in model_responses['choices']]
except openai.error.RateLimitError as e:
print(e)
return []
except Exception as e: # something else went wrong
print(e)
return []
else:
raise Exception("No GPT3 key!")
model_outputs = [
self._process_output_stage_1(input_text=inputs[i], output_text=model_outputs[i], label=None)
for i in range(len(model_outputs))
]
model_outputs = [output for output in model_outputs if output is not None]
return model_outputs
def _generate_dataset_entries_stage_2(self, inputs: Union[str, int]) -> List[tuple]:
if self.using_cot:
instructions = [self._build_instruction_for_explanation_cot(inputs[i]) for i in range(len(inputs))]
else:
instructions = [self._build_instruction_for_explanation(inputs[i]) for i in range(len(inputs))]
if self.openai_api_key is not None:
try:
model_responses = completion_with_backoff(
engine=self.model, prompt=instructions, max_tokens=self.max_output_length, top_p=self.top_p, temperature=self.temperature
)
model_outputs = [model_response["text"] for model_response in model_responses['choices']]
except openai.error.RateLimitError as e:
print(e)
return []
except Exception as e: # something else went wrong
print(e)
return []
else:
raise Exception("No GPT3 key!")
model_outputs = [
self._process_output_stage_2(sentence_a=inputs[i][0], sentence_b=inputs[i][1], instruction=instructions[i], output_text=model_outputs[i])
for i in range(len(model_outputs))
]
return model_outputs
def _build_instruction(self, text: str) -> str:
return "Replace all <mask> tokens in \"{}\" to make a new sentence. The new sentence is: \"".format(text)
def _build_instruction_for_no_mask(self, text: str) -> str:
return "Write two sentences that mean the same thing.\nSentence 1: \"{}\"\nSentence 2: \"".format(text)
def _build_instruction_for_explanation_cot(self, text: str) -> str:
sentence_a, sentence_b = text
prompt = 'The semantic similarity score of two sentences is between 0.0 and 1.0, 0.0 means that the semantics are completely different and 1.0 means that the semantics are completely consistent.\nNow given two sentences \'{}\' and \'{}\', please explain the semantic difference between them and then give a semantic similarity score based on the semantic difference:\nThe semantic difference between these two sentences is'.format(sentence_a, sentence_b)
return prompt
def _build_instruction_for_explanation(self, text: str) -> str:
sentence_a, sentence_b = text
prompt = 'The similarity score for two sentences is in the range from 0.0 to 1.0, 0.0 means completely different and 1.0 means almost the same.\nNow give two sentences \'{}\' and \'{}\', please give a similarity score of these two sentences and give the reason:\nThe similarity score for these two sentences is'.format(sentence_a, sentence_b)
return prompt
def _process_output_stage_1(self, input_text: Union[str, int], output_text: str, label: str) \
-> Optional[DatasetEntry]:
return DatasetEntry(text_a=input_text, text_b=output_text , label=label)
def _process_output_stage_2(self, sentence_a, sentence_b, instruction, output_text):
return (sentence_a, sentence_b, instruction, output_text)
def _postprocess_dataset_stage_1(self, dataset: List[DatasetEntry], generate_with_inputs: bool) -> List[DatasetEntry]:
if self.min_num_words > 0:
if generate_with_inputs:
dataset = [entry for entry in dataset if len(entry.text_b.split()) >= self.min_num_words]
else:
dataset = [entry for entry in dataset if len(entry.text_a.split()) >= self.min_num_words]
if generate_with_inputs and self.remove_identical_pairs:
dataset = [entry for entry in dataset if entry.text_a != entry.text_b]
return dataset
def _postprocess_dataset_stage_2(self, dataset):
'''
split similarity score and explanation
'''
pattern = re.compile(r'[0-9\.]*[0-9]')
new_dataset = []
invalid_explanation = 0
for sample in dataset:
sentence_a, sentence_b, instruction, output_text = sample
res = re.findall(pattern, output_text)
if len(res) == 0:
invalid_explanation += 1
continue
if self.using_cot:
similarity_score = res[-1]
else:
similarity_score = res[0]
if self.using_cot:
new_dataset.append(DatasetEntryWithExp(sentence_a, sentence_b, similarity_score, "The semantic difference between these two sentences is" + output_text))
else:
new_dataset.append(DatasetEntryWithExp(sentence_a, sentence_b, similarity_score, "The similarity score for these two sentences is" + output_text))
print("Invalid explanation number is: {}".format(invalid_explanation))
return new_dataset | [
"The semantic similarity score of two sentences is between 0.0 and 1.0, 0.0 means that the semantics are completely different and 1.0 means that the semantics are completely consistent.\nNow given two sentences 'PLACEHOLDER' and 'PLACEHOLDER', please explain the semantic difference between them and then give a semantic similarity score based on the semantic difference:\nThe semantic difference between these two sentences is",
"The similarity score for two sentences is in the range from 0.0 to 1.0, 0.0 means completely different and 1.0 means almost the same.\nNow give two sentences 'PLACEHOLDER' and 'PLACEHOLDER', please give a similarity score of these two sentences and give the reason:\nThe similarity score for these two sentences is"
] |
2024-01-10 | zhangpaipai/DI-engine | dizoo~atari~envs~atari_wrappers.py | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import gym
from collections import deque
from ding.envs import NoopResetWrapper, MaxAndSkipWrapper, EpisodicLifeWrapper, FireResetWrapper, WarpFrameWrapper, ScaledFloatFrameWrapper, \
ClipRewardWrapper, FrameStackWrapper
def wrap_deepmind(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
def wrap_deepmind_mr(env_id, episode_life=True, clip_rewards=True, frame_stack=4, scale=True, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'MontezumaReveng' in env_id
env = gym.make(env_id)
env = NoopResetWrapper(env, noop_max=30)
env = MaxAndSkipWrapper(env, skip=4)
if episode_life:
env = EpisodicLifeWrapper(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetWrapper(env)
if warp_frame:
env = WarpFrameWrapper(env)
if scale:
env = ScaledFloatFrameWrapper(env)
if clip_rewards:
env = ClipRewardWrapper(env)
if frame_stack:
env = FrameStackWrapper(env, frame_stack)
return env
| [] |
2024-01-10 | TeCSAR-UNCC/gem5-SALAM | components_library~boards~abstract_board.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod
from .mem_mode import MemMode
from m5.objects import System, Port, IOXBar, ClockDomain
from ..isas import ISA
from ..coherence_protocol import CoherenceProtocol
from typing import List
class AbstractBoard(System):
"""The abstract board interface.
Boards are used as the object which can connect together all other
components. This abstract class defines the external interface that other
boards must provide. Boards can be specialized for different ISAs or system
designs (e.g., core counts, cache types, memory channels, I/O devices, etc)
In addition to providing the place that system components are connected,
the board also exposes an interface for the caches, processor, and memory
to interact.
The board also exposes an interface to set up I/O devices which needs to be
specialized for each ISA and/or platform.
Board inherits from System and can therefore be used as a System simobject
when required.
"""
__metaclass__ = ABCMeta
def __init__(
self,
processor: "AbstractProcessor",
memory: "AbstractMemory",
cache_hierarchy: "AbstractCacheHierarchy",
) -> None:
super(AbstractBoard, self).__init__()
"""
:param processor: The processor for this board.
:param memory: The memory for this board.
:param cache_hierarchy: The Cachie Hierarchy for this board.
"""
self.processor = processor
self.memory = memory
self.cache_hierarchy = cache_hierarchy
def get_processor(self) -> "AbstractProcessor":
"""Get the processor connected to the board.
:returns: The processor.
"""
return self.processor
def get_memory(self) -> "AbstractMemory":
"""Get the memory (RAM) connected to the board.
:returns: The memory system.
"""
return self.memory
def get_cache_hierarchy(self) -> "AbstractCacheHierarchy":
"""Get the cache hierarchy connected to the board.
:returns: The cache hierarchy.
"""
return self.cache_hierarchy
def get_cache_line_size(self) -> int:
"""Get the size of the cache line.
:returns: The size of the cache line size.
"""
return self.cache_line_size
# Technically `get_dma_ports` returns a list. This list could be empty to
# indicate the presense of dma ports. Though I quite like having this
# boolean to quickly check a board.
@abstractmethod
def has_dma_ports(self) -> bool:
"""Determine whether the board has DMA ports or not.
:returns: True if the board has DMA ports, otherwise False.
"""
raise NotImplementedError
@abstractmethod
def get_dma_ports(self) -> List[Port]:
"""Get the board's Direct Memory Access ports.
This abstract method must be implemented within the subclasses if they
support DMA and/or full system simulation.
:returns: A List of the Direct Memory Access ports.
"""
raise NotImplementedError
@abstractmethod
def has_io_bus(self) -> bool:
"""Determine whether the board has an IO bus or not.
:returns: True if the board has an IO bus, otherwise False.
"""
raise NotImplementedError
@abstractmethod
def get_io_bus(self) -> IOXBar:
"""Get the board's IO Bus.
This abstract method must be implemented within the subclasses if they
support DMA and/or full system simulation.
The I/O bus is a non-coherent bus (in the classic caches). On the CPU
side, it accepts requests meant for I/O devices. On the memory side, it
forwards these requests to the devices (e.g., the interrupt
controllers on each core).
:returns: The I/O Bus.
"""
raise NotImplementedError
@abstractmethod
def get_clock_domain(self) -> ClockDomain:
"""Get the clock domain.
:returns: The clock domain.
"""
raise NotImplementedError
@abstractmethod
def connect_system_port(self, port: Port) -> None:
raise NotImplementedError
@abstractmethod
def set_mem_mode(self, mem_mode: MemMode) -> None:
"""
Set the memory mode of the board.
:param mem_mode: The memory mode the board is to be set to.
"""
raise NotImplementedError
@abstractmethod
def connect_things(self) -> None:
"""Connects all the components to the board.
This should be called after the constructor.
When implementing this function, derived boards should use this to
hook up the memory, process, and cache hierarchy as a *second* stage.
You should use this function to connect things together when you need
to know that everything has already been constructed.
"""
raise NotImplementedError
| [] |
2024-01-10 | TeCSAR-UNCC/gem5-SALAM | components_library~runtime.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This file contains functions to extract gem5 runtime information.
"""
from m5.defines import buildEnv
from .isas import ISA
from .coherence_protocol import CoherenceProtocol
def get_runtime_isa() -> ISA:
"""Gets the target ISA.
This can be inferred at runtime.
:returns: The target ISA.
"""
isa_map = {
"sparc": ISA.SPARC,
"mips": ISA.MIPS,
"null": ISA.NULL,
"arm": ISA.ARM,
"x86": ISA.X86,
"power": ISA.POWER,
"riscv": ISA.RISCV,
}
isa_str = str(buildEnv["TARGET_ISA"]).lower()
if isa_str not in isa_map.keys():
raise NotImplementedError(
"ISA '" + buildEnv["TARGET_ISA"] + "' not recognized."
)
return isa_map[isa_str]
def get_runtime_coherence_protocol() -> CoherenceProtocol:
"""Gets the cache coherence protocol.
This can be inferred at runtime.
:returns: The cache coherence protocol.
"""
protocol_map = {
"mi_example": CoherenceProtocol.MI_EXAMPLE,
"moesi_hammer": CoherenceProtocol.ARM_MOESI_HAMMER,
"garnet_standalone": CoherenceProtocol.GARNET_STANDALONE,
"moesi_cmp_token": CoherenceProtocol.MOESI_CMP_TOKEN,
"mesi_two_level": CoherenceProtocol.MESI_TWO_LEVEL,
"moesi_amd_base": CoherenceProtocol.MOESI_AMD_BASE,
"mesi_three_level_htm": CoherenceProtocol.MESI_THREE_LEVEL_HTM,
"mesi_three_level": CoherenceProtocol.MESI_THREE_LEVEL,
"gpu_viper": CoherenceProtocol.GPU_VIPER,
"chi": CoherenceProtocol.CHI,
}
protocol_str = str(buildEnv["PROTOCOL"]).lower()
if protocol_str not in protocol_map.keys():
raise NotImplementedError(
"Protocol '" + buildEnv["PROTOCOL"] + "' not recognized."
)
return protocol_map[protocol_str]
| [] |
2024-01-10 | TeCSAR-UNCC/gem5-SALAM | components_library~cachehierarchies~ruby~mesi_two_level_cache_hierarchy.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .abstract_ruby_cache_hierarhcy import AbstractRubyCacheHierarchy
from ..abstract_two_level_cache_hierarchy import AbstractTwoLevelCacheHierarchy
from ...coherence_protocol import CoherenceProtocol
from ...isas import ISA
from ...boards.abstract_board import AbstractBoard
from ...runtime import get_runtime_coherence_protocol, get_runtime_isa
from .topologies.simple_pt2pt import SimplePt2Pt
from .caches.mesi_two_level.l1_cache import L1Cache
from .caches.mesi_two_level.l2_cache import L2Cache
from .caches.mesi_two_level.directory import Directory
from .caches.mesi_two_level.dma_controller import DMAController
from m5.objects import (
RubySystem,
RubySequencer,
DMASequencer,
RubyPortProxy,
)
class MESITwoLevelCacheHierarchy(
AbstractRubyCacheHierarchy, AbstractTwoLevelCacheHierarchy
):
"""A two level private L1 shared L2 MESI hierarchy.
In addition to the normal two level parameters, you can also change the
number of L2 banks in this protocol.
The on-chip network is a point-to-point all-to-all simple network.
"""
def __init__(
self,
l1i_size: str,
l1i_assoc: str,
l1d_size: str,
l1d_assoc: str,
l2_size: str,
l2_assoc: str,
num_l2_banks: int,
):
AbstractRubyCacheHierarchy.__init__(self=self)
AbstractTwoLevelCacheHierarchy.__init__(
self,
l1i_size=l1i_size,
l1i_assoc=l1i_assoc,
l1d_size=l1d_size,
l1d_assoc=l1d_assoc,
l2_size=l2_size,
l2_assoc=l2_assoc,
)
self._num_l2_banks = num_l2_banks
def incorporate_cache(self, board: AbstractBoard) -> None:
if (
get_runtime_coherence_protocol()
!= CoherenceProtocol.MESI_TWO_LEVEL
):
raise EnvironmentError(
"The MESITwoLevelCacheHierarchy must be used with with the "
"MESI_Two_Level coherence protocol."
)
cache_line_size = board.get_cache_line_size()
self.ruby_system = RubySystem()
# MESI_Two_Level needs 5 virtual networks
self.ruby_system.number_of_virtual_networks = 5
self.ruby_system.network = SimplePt2Pt(self.ruby_system)
self.ruby_system.network.number_of_virtual_networks = 5
self._l1_controllers = []
for i, core in enumerate(board.get_processor().get_cores()):
cache = L1Cache(
self._l1i_size,
self._l1i_assoc,
self._l1d_size,
self._l1d_assoc,
self.ruby_system.network,
core,
self._num_l2_banks,
cache_line_size,
get_runtime_isa(),
board.get_clock_domain(),
)
if board.has_io_bus():
cache.sequencer = RubySequencer(
version=i,
dcache=cache.L1Dcache,
clk_domain=cache.clk_domain,
pio_request_port=board.get_io_bus().cpu_side_ports,
mem_request_port=board.get_io_bus().cpu_side_ports,
pio_response_port=board.get_io_bus().mem_side_ports,
)
else:
cache.sequencer = RubySequencer(
version=i,
dcache=cache.L1Dcache,
clk_domain=cache.clk_domain,
)
cache.ruby_system = self.ruby_system
core.connect_icache(cache.sequencer.in_ports)
core.connect_dcache(cache.sequencer.in_ports)
core.connect_walker_ports(
cache.sequencer.in_ports, cache.sequencer.in_ports
)
# Connect the interrupt ports
if get_runtime_isa() == ISA.X86:
int_req_port = cache.sequencer.interrupt_out_port
int_resp_port = cache.sequencer.in_ports
core.connect_interrupt(int_req_port, int_resp_port)
self._l1_controllers.append(cache)
self._l2_controllers = [
L2Cache(
self._l2_size,
self._l2_assoc,
self.ruby_system.network,
self._num_l2_banks,
cache_line_size,
)
for _ in range(self._num_l2_banks)
]
# TODO: Make this prettier: The problem is not being able to proxy
# the ruby system correctly
for cache in self._l2_controllers:
cache.ruby_system = self.ruby_system
self._directory_controllers = [
Directory(self.ruby_system.network, cache_line_size, range, port)
for range, port in board.get_memory().get_mem_ports()
]
# TODO: Make this prettier: The problem is not being able to proxy
# the ruby system correctly
for dir in self._directory_controllers:
dir.ruby_system = self.ruby_system
dma_ports = board.get_dma_ports()
self._dma_controllers = []
for i, port in enumerate(dma_ports):
ctrl = DMAController(self.ruby_system.network, cache_line_size)
ctrl.dma_sequencer = DMASequencer(version=i, in_ports=port)
self._dma_controllers.append(ctrl)
ctrl.ruby_system = self.ruby_system
self.ruby_system.num_of_sequencers = len(self._l1_controllers) + len(
self._dma_controllers
)
self.ruby_system.l1_controllers = self._l1_controllers
self.ruby_system.l2_controllers = self._l2_controllers
self.ruby_system.directory_controllers = self._directory_controllers
if len(self._dma_controllers) != 0:
self.ruby_system.dma_controllers = self._dma_controllers
# Create the network and connect the controllers.
self.ruby_system.network.connectControllers(
self._l1_controllers
+ self._l2_controllers
+ self._directory_controllers
+ self._dma_controllers
)
self.ruby_system.network.setup_buffers()
# Set up a proxy port for the system_port. Used for load binaries and
# other functional-only things.
self.ruby_system.sys_port_proxy = RubyPortProxy()
board.connect_system_port(self.ruby_system.sys_port_proxy.in_ports)
| [] |
2024-01-10 | TeCSAR-UNCC/gem5-SALAM | configs~example~components-library~boot_exit_disk_run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This example runs a simple boot exit test.
Characteristics
---------------
* Runs exclusively on the X86 ISA with the MESI_TWO_LEVEL coherence protocol.
"""
import m5
from m5.objects import Root
import sys
import os
# This is a lame hack to get the imports working correctly.
# TODO: This needs fixed.
sys.path.append(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir,
os.pardir,
)
)
from components_library.runtime import (
get_runtime_coherence_protocol,
get_runtime_isa,
)
from components_library.boards.x86_board import X86Board
from components_library.memory.ddr3_1600_8x8 import DDR3_1600_8x8
from components_library.processors.simple_processor import SimpleProcessor
from components_library.processors.cpu_types import CPUTypes
from components_library.isas import ISA
from components_library.coherence_protocol import CoherenceProtocol
import os
import subprocess
import gzip
import shutil
# Run a check to ensure the right version of gem5 is being used.
if (
get_runtime_coherence_protocol() != CoherenceProtocol.MESI_TWO_LEVEL
or get_runtime_isa() != ISA.X86
):
raise EnvironmentError(
"The boot-exit-disk_run.py should be run with X86_MESI_Two_Level."
)
from components_library.cachehierarchies.\
ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
# Setup the cache heirarchy to be MESI_Two_Level.
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="32kB",
l1d_assoc=8,
l1i_size="32kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=1,
)
# Setup the system memory.
# Warning: This must be kept at 3GB for now. X86Motherboard does not support
# anything else right now!
memory = DDR3_1600_8x8(size="3GB")
# Setup a single core Timing Processor.
processor = SimpleProcessor(cpu_type=CPUTypes.TIMING, num_cores=1)
# Setup the motherboard.
motherboard = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
exit_on_work_items=True,
)
motherboard.connect_things()
# Download the resources as necessary.
thispath = os.path.dirname(os.path.realpath(__file__))
kernel_url = (
"http://dist.gem5.org/dist/v21-0/kernels/x86/static/vmlinux-5.4.49"
)
kernel_path = os.path.join(thispath, "vmlinux-5.4.49")
if not os.path.exists(kernel_path):
subprocess.run(["wget", "-P", thispath, kernel_url])
boot_img_url = (
"http://dist.gem5.org/dist/v21-0/images/x86/ubuntu-18-04/boot-exit.img.gz"
)
boot_img_path_gz = os.path.join(thispath, "boot-exit.img.gz")
boot_img_path = os.path.join(thispath, "boot-exit.img")
if not os.path.exists(boot_img_path):
subprocess.run(["wget", "-P", thispath, boot_img_url])
with gzip.open(boot_img_path_gz, "rb") as f:
with open(boot_img_path, "wb") as o:
shutil.copyfileobj(f, o)
# Set the Full System workload.
motherboard.set_workload(
kernel=kernel_path, disk_image=boot_img_path, command="m5 exit \n"
)
# Begin running of the simulation. This will exit once the Linux system boot
# is complete.
print("Running with ISA: " + get_runtime_isa().name)
print("Running with protocol: " + get_runtime_coherence_protocol().name)
print()
root = Root(full_system=True, system=motherboard)
m5.instantiate()
print("Beginning simulation!")
exit_event = m5.simulate()
print(
"Exiting @ tick {} because {}.".format(m5.curTick(), exit_event.getCause())
)
| [] |
2024-01-10 | mobi1019/MADDPG-AUV | env_wrapper.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
# ob, ob_full, reward, done, info = env.step(data)
ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
# remote.send((ob, ob_full, reward, done, info))
remote.send((ob, reward, done, info))
elif cmd == 'reset':
# ob, ob_full = env.reset()
# remote.send((ob, ob_full))
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
# obs, obs_full, rews, dones, infos = zip(*results)
# return np.stack(obs), np.stack(obs_full), np.stack(rews), np.stack(dones), infos
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
# code doesn't work all that well
# TODO: need to clean up
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:, :, ::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, obs_full, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return
| [] |
2024-01-10 | AriYacovson/work_schedule | app~services~EmployeeUnavailableShiftService.py | from app.models import ShiftModel
from app.models.employee_unavailable_shift import EmployeeUnavailableShiftModel
from sqlalchemy.exc import SQLAlchemyError
import logging
from app.utils import openai_util
from app.schemas.employee_unavailable_shift_request import (
EmployeeUnavailableShiftRequest,
)
from app.schemas.employee_unavailable_shift_response import (
EmployeeUnavailableShiftResponse,
)
# from app.schemas.EmployeeShiftAssignmentRequest import EmployeeShiftAssignmentRequest
# from app.schemas.EmployeeShiftAssignmentResponse import EmployeeShiftAssignmentResponse
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
def get_employee_unavailable_shifts(db) -> list[EmployeeUnavailableShiftResponse]:
try:
return db.query(EmployeeUnavailableShiftModel).all()
except SQLAlchemyError as e:
logger.error(f"Failed to fetch all unavailable shifts: {e}")
return None
def get_employee_unavailable_shift_by_id(
db, employee_unavailable_shift_id: int
) -> EmployeeUnavailableShiftResponse:
try:
return (
db.query(EmployeeUnavailableShiftModel)
.filter(EmployeeUnavailableShiftModel.id == employee_unavailable_shift_id)
.first()
)
except SQLAlchemyError as e:
logger.error(
f"Failed to fetch unavailable shift with id {employee_unavailable_shift_id}: {e}"
)
return None
def create_employee_unavailable_shift(
db, employee_unavailable_shift: EmployeeUnavailableShiftRequest
) -> EmployeeUnavailableShiftResponse:
employee_unavailable_shift_model = EmployeeUnavailableShiftModel(
**employee_unavailable_shift.model_dump()
)
try:
db.add(employee_unavailable_shift_model)
db.commit()
db.refresh(employee_unavailable_shift_model)
return employee_unavailable_shift_model
except SQLAlchemyError as e:
logger.error(f"Failed to create unavailable shift: {e}")
return None
def create_employee_unavailable_shifts_from_text(
db, employee_id: int, text: str
) -> bool:
unavailable_shifts = openai_util.extract_unavailable_shifts_from_text(
employee_id, text
)
unavailable_shifts = eval(unavailable_shifts)
for unavailable_shift in unavailable_shifts:
unavailable_shift_date = unavailable_shift["date"]
unavailable_shift_type_id = unavailable_shift["shift_type_id"]
# Query for the corresponding shift id
shift = db.query(ShiftModel).filter(ShiftModel.date == unavailable_shift_date, ShiftModel.shift_type_id == unavailable_shift_type_id).first()
if shift:
shift_id = shift.id
# Check if this unavailable shift already exists
existing_unavailable_shift = db.query(EmployeeUnavailableShiftModel).filter(
EmployeeUnavailableShiftModel.employee_id == employee_id,
EmployeeUnavailableShiftModel.shift_id == shift_id
).first()
if not existing_unavailable_shift:
unavailable_shift_model = EmployeeUnavailableShiftModel(
employee_id=employee_id, shift_id=shift_id
)
db.add(unavailable_shift_model)
try:
db.commit()
return True
except SQLAlchemyError as e:
logger.error(f"Failed to create unavailable shift: {e}")
return False
# try:
# unavailable_shifts = openai_util.extract_unavailable_shifts_from_text(
# employee_id, text
# )
# unavailable_shifts = eval(unavailable_shifts)
# for unavailable_shift in unavailable_shifts:
# employee_unavailable_shift_model = EmployeeUnavailableShiftModel(
# **unavailable_shift
# )
# db.add(employee_unavailable_shift_model)
# db.commit()
# db.refresh(employee_unavailable_shift_model)
# return True
# except SQLAlchemyError as e:
# logger.error(f"Failed to create unavailable shift: {e}")
# return False
def update_employee_unavailable_shift(
db, employee_unavailable_shift_id: int, employee_unavailable_shift_request: EmployeeUnavailableShiftRequest
) -> EmployeeUnavailableShiftResponse:
employee_unavailable_shift_model = get_employee_unavailable_shift_by_id(db, employee_unavailable_shift_id)
if employee_unavailable_shift_model:
try:
employee_unavailable_shift_model.shift_id = employee_unavailable_shift_request.shift_id
employee_unavailable_shift_model.employee_id = employee_unavailable_shift_request.employee_id
employee_unavailable_shift_model.date = employee_unavailable_shift_request.date
db.commit()
db.refresh(employee_unavailable_shift_model)
return employee_unavailable_shift_model
except SQLAlchemyError as e:
logger.error(f"Failed to update unavailable shift: {e}")
db.rollback()
return None
return None
def delete_employee_unavailable_shift(db, employee_unavailable_shift_id: int):
employee_unavailable_shift_model = get_employee_unavailable_shift_by_id(
db, employee_unavailable_shift_id
)
if employee_unavailable_shift_model:
try:
db.delete(employee_unavailable_shift_model)
db.commit()
return True
except SQLAlchemyError as e:
logger.error(f"Failed to delete unavailable shift: {e}")
db.rollback()
return False
return False
| [] |
2024-01-10 | INKXXXXYY/dy_vtuber | src~tarot_easy.py | # # import requests
# # import socks
# # import socket
# # import json
# # # 配置 SOCKS5 代理服务器地址和端口
# # proxy_server = "a004.zhuan99.men"
# # proxy_port = 10004
# # # 创建一个 SOCKS5 代理连接
# # socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, proxy_server, proxy_port)
# # # 打开一个网络连接(可以使用 requests 等库)
# # # 这个连接将通过 SOCKS5 代理进行
# # socket.socket = socks.socksocket
# # # OpenAI API密钥
# # api_key = "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"
# # # 定义函数来执行塔罗占卜
# # def tarot_answer(user_question):
# # api_endpoint = "https://api.openai.com/v1/chat/completions"
# # # 构建请求数据
# # data = {
# # "model": "gpt-3.5-turbo",
# # "messages": [
# # {
# # "role": "system",
# # "content": "你是一个直播间的主播,现在直播的内容是塔罗占卜,请根据抽中的牌回答用户的提问。\n\n回答问题时,请始终给出积极的答案,如果对于荒诞的问题,请分析用户的意图,再根据牌面结合用户意图给出积极的答案;\n\n===\n抽中的牌如下:\n1. 愚者(正位)\n2. 恶魔(逆位)\n3. 月亮(逆位)\n===\n\n回答问题时,按照以下顺序:\n1. 因为{原因}无法给出准确答案\n2. 复述抽中的牌面和正逆位情况\n3. 作出分析\n\n在回答过程中,适当加入语气助词,增加一些人情味。"
# # },
# # {
# # "role": "user",
# # "content": user_question
# # }
# # ],
# # "temperature": 1,
# # "max_tokens": 524,
# # "top_p": 1,
# # "frequency_penalty": 0,
# # "presence_penalty": 0
# # }
# # # 构建请求头
# # headers = {
# # "Authorization": f"Bearer {api_key}",
# # "Content-Type": "application/json"
# # }
# # # 使用代理发起请求
# # response = requests.post(api_endpoint, json=data, headers=headers, proxies={"https": f"socks5://{proxy_server}:{proxy_port}"})
# # # 处理响应
# # if response.status_code == 200:
# # result = response.json()
# # return result['choices'][0]['message']['content']
# # else:
# # return f"Request failed with status code {response.status_code}: {response.text}"
# # # 定义函数来判断是否是占卜问题
# # def is_tarot_question(user_content):
# # api_endpoint = "https://api.openai.com/v1/chat/completions"
# # # 构建请求数据
# # data = {
# # "model": "gpt-3.5-turbo",
# # "messages": [
# # {
# # "role": "system",
# # "content": "你现在是塔罗占卜师的助理,请你判断该问题是否属于占卜师可以回答的问题,如果不是,则回复 \"NO\",是则回复 \"YES\",使用 json 的公式,如下:\n{\"answer\":\"YES\"}"
# # },
# # {
# # "role": "user",
# # "content": user_content
# # }
# # ],
# # "temperature": 0,
# # "max_tokens": 256,
# # "top_p": 1,
# # "frequency_penalty": 0,
# # "presence_penalty": 0
# # }
# # # 构建请求头
# # headers = {
# # "Authorization": f"Bearer {api_key}",
# # "Content-Type": "application/json"
# # }
# # # 使用代理发起请求
# # response = requests.post(api_endpoint, json=data, headers=headers, proxies={"https": f"socks5://{proxy_server}:{proxy_port}"})
# # # 处理响应
# # if response.status_code == 200:
# # result = response.json()
# # return result['choices'][0]['message']['content']
# # else:
# # return f"Request failed with status code {response.status_code}: {response.text}"
# # # 解析回答
# # def parse_answer(json_str):
# # try:
# # data = json.loads(json_str)
# # answer = data.get('answer', '').upper()
# # if answer == 'NO':
# # return False
# # elif answer == 'YES':
# # return True
# # else:
# # return "error"
# # except json.JSONDecodeError:
# # return "error"
# # # 调用函数,并传入用户的问题
# # # if __name__ == '__main__':
# # # user_question = "我能成为百万富翁吗"
# # # response = is_tarot_question(user_question)
# # # is_question = parse_answer(response)
# # # if is_question :
# # # answer = tarot_answer(user_question)
# # # print(answer)
# # # elif is_question == False:
# # # print('NO')
# # # else:
# # # print('error')
# import json
# import os
# import openai
# openai.api_key = "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"
# def tarot_answer(user_question):
# messages = [
# {
# "role": "system",
# "content": "你是一个直播间的主播,现在直播的内容是塔罗占卜,请根据抽中的牌回答用户的提问。\n\n回答问题时,请始终给出积极的答案,如果对于荒诞的问题,请分析用户的意图,再根据牌面结合用户意图给出积极的答案;\n\n===\n抽中的牌如下:\n1. 愚者(正位)\n2. 恶魔(逆位)\n3. 月亮(逆位)\n===\n\n回答问题时,按照以下顺序:\n1. 因为{原因}无法给出准确答案\n2. 复述抽中的牌面和正逆位情况\n3. 作出分析\n\n在回答过程中,适当加入语气助词,增加一些人情味。"
# },
# {
# "role": "user",
# "content": user_question
# }
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# temperature=1,
# max_tokens=524,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
# return response['choices'][0]['message']['content']
# def is_tarot_question(user_content):
# messages = [
# {
# "role": "system",
# "content": "你现在是塔罗占卜师的助理,请你判断该问题是否属于占卜师可以回答的问题,如果不是,则回复 \"NO\",是则回复 \"YES\",使用 json 的公式,如下:\n{\"answer\":\"YES\"}"
# },
# {
# "role": "user",
# "content": user_content
# }
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# temperature=0,
# max_tokens=256,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
# print(response['choices'][0]['message']['content'])
# return response['choices'][0]['message']['content']
# def parse_answer(json_str):
# try:
# data = json.loads(json_str)
# answer = data.get('answer', '').upper()
# if answer == 'NO':
# return False
# elif answer == 'YES':
# return True
# else:
# return "error"
# except json.JSONDecodeError:
# return "error"
# # 调用函数,并传入用户的问题
# # if __name__ == '__main__':
# # user_question = "我能成为百万富翁吗"
# # response = is_tarot_question(user_question)
# # is_question = parse_answer(response)
# # if is_question :
# # answer = tarot_answer(user_question)
# # print(answer)
# # elif is_question == False:
# # print('NO')
# # else:
# # print('error')
# import json
# import os
# import random
# import openai
# openai.api_key = "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"
# def tarot_answer(user_question):
# cards = draw_random_cards_with_orientation('./tarot_cards.json', num_cards=3)
# cards_text = '\n'.join([f"{i+1}. {card}" for i, card in enumerate(cards)])
# print(cards)
# content_template = "你是一个直播间的主播,现在直播的内容是塔罗占卜,请根据抽中的牌回答用户的提问。\n\n回答问题时,请始终给出积极的答案,如果对于荒诞的问题,请分析用户的意图,再根据牌面结合用户意图给出积极的答案;\n\n===\n抽中的牌如下:\n{cards}\n===\n\n回答问题时,按照以下顺序:\n1. 因为「原因」无法给出准确答案\n2. 复述抽中的牌面和正逆位情况\n3. 作出分析\n"
# content = content_template.format(cards=cards_text)
# messages = [
# {
# "role": "system",
# "content": content
# },
# {
# "role": "user",
# "content": user_question
# }
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# temperature=1,
# max_tokens=1024,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
# print(response['choices'][0]['message']['content'])
# return response['choices'][0]['message']['content'],cards
# def is_tarot_question(user_content):
# messages = [
# {
# "role": "system",
# "content": "你现在是塔罗占卜师的助理,请你判断该问题是否属于占卜师可以回答的问题,如果不是,则回复 \"NO\",是则回复 \"YES\",使用 json 的公式,如下:\n{\"answer\":\"YES\"}"
# },
# {
# "role": "user",
# "content": user_content
# }
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# temperature=0,
# max_tokens=256,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
# print('is_tarot_question', response['choices'][0]['message']['content'])
# return response['choices'][0]['message']['content']
# def parse_answer(answer):
# print('test')
# try:
# # answer = json.loads(json_str)
# # answer = data.get('answer', '').upper()
# print('answer_test', answer)
# if answer == 'NO':
# print('parse_answer not ok')
# return '0'
# elif answer == 'YES':
# print('parse_answer ok')
# return '1'
# else:
# print('parse_answer error')
# return "error"
# except json.JSONDecodeError:
# return "error"
# def draw_random_cards_with_orientation(json_file_path, num_cards=3):
# """
# 从指定的 JSON 文件中随机抽取指定数量的卡片,并随机赋予正逆位。
# 参数:
# - json_file_path (str): JSON 文件的路径
# - num_cards (int): 要抽取的卡片数量
# 返回:
# - random_cards_json (str): 随机抽取并赋予正逆位的卡片(JSON 格式)
# """
# # 从文件中加载卡片列表
# with open(json_file_path, 'r', encoding='utf-8') as f:
# tarot_cards = json.load(f)
# # 随机抽取卡片
# random_cards = random.sample(tarot_cards, num_cards)
# # 随机赋予正逆位
# orientations = ["正位", "逆位"]
# random_cards_with_orientation = [f"{card}({random.choice(orientations)})" for card in random_cards]
# # 转换为 JSON 格式
# random_cards_json = json.dumps(random_cards_with_orientation, ensure_ascii=False)
# return random_cards_json
# def num_to_chinese(num_str: str) -> str:
# digits = {
# '0': '零',
# '1': '一',
# '2': '二',
# '3': '三',
# '4': '四',
# '5': '五',
# '6': '六',
# '7': '七',
# '8': '八',
# '9': '九'
# }
# units = ['', '十', '百', '千']
# if not num_str.isdigit():
# return num_str
# num_len = len(num_str)
# if num_len > 4:
# return num_str # 如果数字超过4位,不转换
# result = ''
# zero_flag = False
# for idx, char in enumerate(num_str):
# if char == '0':
# zero_flag = True
# else:
# if zero_flag:
# result += digits['0']
# zero_flag = False
# result += digits[char] + units[num_len - idx - 1]
# return result
# def transform_text(text: str) -> str:
# import re
# # 将逗号、句号、感叹号替换为 |
# text = text.replace(',', '|').replace('。', '|').replace('!', '|')
# # 移除括号和引号
# remove_chars = ['「', '」', '“', '”', '(', ')', '[', ']', '{', '}', '"', "'"]
# for char in remove_chars:
# text = text.replace(char, '')
# # 使用正则替换所有阿拉伯数字为中文数字
# text = re.sub(r'\d+', lambda m: num_to_chinese(m.group()), text)
# return text
# # 调用函数,并传入用户的问题
# if __name__ == '__main__':
# user_question = "我能成为百万富翁吗"
# response = is_tarot_question(user_question)
# # print('res=', response)
# is_question = parse_answer(response)
# # print('isquestion', is_question)
# if is_question == '1':
# answer = tarot_answer(user_question)
# final_answer = transform_text(answer)
# print(final_answer)
# elif is_question == '0':
# print('is_question = NO')
# else:
# print('question = error')
import json
import os
import random
import time
import openai
from utils.get_random_audio import get_random_audio
from voice_in import async_play_wav_windows, request_and_save_wav
openai.api_key = "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"
def tarot_answer(user_question, intend, cards):
# cards = draw_random_cards_with_orientation('./tarot_cards.json', num_cards=3)
cards_text = '\n'.join([f"{i+1}. {card}" for i, card in enumerate(cards)])
# print(cards)
content_template = "你是一个直播间的主播,现在直播的内容是塔罗占卜,请根据抽中的牌回答用户的提问。\n\n回答问题时,请始终给出积极的答案,如果对于荒诞的问题,根据牌面结合用户意图给出积极的答案;\n\n===\n抽中的牌如下:\n{cards}\n用户意图:{intend}\n===\n\n回答问题时,按照以下顺序:\n1. 因为「原因」无法给出准确答案\n2. 复述抽中的牌面和正逆位情况\n3. 作出分析\n"
content = content_template.format(cards=cards_text, intend=intend)
content2 = "{\"question\":\"{" + user_question + "}\""
messages = [
{
"role": "system",
"content": content
},
{
"role": "user",
"content": content2
}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=1,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# print(response['choices'][0]['message']['content'])
return response['choices'][0]['message']['content']
def is_tarot_question(user_content):
messages = [
{
"role": "system",
"content": "你现在是塔罗占卜师的助理,请你判断该问题是否属于占卜师可以回答的问题,如果不是,则回复 \"NO\",是则回复 \"YES\",使用 json 的公式,如下:\n{\"answer\":\"YES\"}"
},
{
"role": "user",
"content": user_content
}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
answer = response['choices'][0]['message']['content']
print(answer)
return answer
def parse_answer(answer):
try:
print('answer_test', answer)
if answer == 'NO':
print('parse_answer not ok')
return '0'
elif answer == 'YES':
print('parse_answer ok')
return '1'
else:
print('parse_answer error')
return "error"
except json.JSONDecodeError:
return "error"
def draw_random_cards_with_orientation(json_file_path, num_cards=3):
"""
从指定的 JSON 文件中随机抽取指定数量的卡片,并随机赋予正逆位。
参数:
- json_file_path (str): JSON 文件的路径
- num_cards (int): 要抽取的卡片数量
返回:
- random_cards_json (str): 随机抽取并赋予正逆位的卡片(JSON 格式)
"""
# 从文件中加载卡片列表
with open(json_file_path, 'r', encoding='utf-8') as f:
tarot_cards = json.load(f)
# 随机抽取卡片
random_cards = random.sample(tarot_cards, num_cards)
# 随机赋予正逆位
orientations = ["正位", "逆位"]
random_cards_with_orientation = [f"{card}({random.choice(orientations)})" for card in random_cards]
# 转换为 JSON 格式
random_cards_json = json.dumps(random_cards_with_orientation, ensure_ascii=False)
return random_cards_json
def num_to_chinese(num_str: str) -> str:
digits = {
'0': '零',
'1': '一',
'2': '二',
'3': '三',
'4': '四',
'5': '五',
'6': '六',
'7': '七',
'8': '八',
'9': '九'
}
units = ['', '十', '百', '千']
if not num_str.isdigit():
return num_str
num_len = len(num_str)
if num_len > 4:
return num_str # 如果数字超过4位,不转换
result = ''
zero_flag = False
for idx, char in enumerate(num_str):
if char == '0':
zero_flag = True
else:
if zero_flag:
result += digits['0']
zero_flag = False
result += digits[char] + units[num_len - idx - 1]
return result
def transform_text(text: str) -> str:
import re
# 将逗号、句号、感叹号替换为 |
text = text.replace(',', '|').replace('。', '|').replace('!', '|')
# 移除括号和引号
remove_chars = ['「', '」', '“', '”', '(', ')', '[', ']', '{', '}', '"', "'"]
for char in remove_chars:
text = text.replace(char, '')
# 使用正则替换所有阿拉伯数字为中文数字
text = re.sub(r'\d+', lambda m: num_to_chinese(m.group()), text)
return text
def get_emotional_intent(user_input):
"""输入用户问题,返回用户意图,如果识别为非意图,则返回{"intend":"no"}
Args:
user_input (str): 用户输入的问题
Returns:
str: json 格式
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "请站在心理咨询师的角度,从用户的输入中识别用户的情感意图,并用 json 的格式回复;\n例如\n===\n\"input\":\"我明天运势怎么样?\"\n\"output\":{\"attend\":\"渴望获得好运,对现状可能不满\"}\n===\n如果不是心理咨询范畴的,或与主题无关的内容,请回复 {\"attend\":\"no\"},\n答案只能是 json 格式的意图或 “no”,绝对不可以是其他的。\n===\n输入如下:"
},
{
"role": "user",
"content": user_input
}
],
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['message']['content']
def get_tarot_response(question):
# openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "你是一个直播间的主播,现在直播的内容是塔罗占卜,但用户问了一个不属于塔罗可以解答的问题,请你根据他的问题,给出一段回答,目标是告诉用户,你只能回答塔罗相关的问题。\n\n\n在回答过程中,适当加入语气助词,增加一些人情味,答案尽量简短,高效。"
},
{
"role": "user",
"content": "{\"question\":\"" + question + "\"}"
}
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].message['content']
def get_final_answer(user_question):
response = is_tarot_question(user_question)
is_question = parse_answer(response)
filename = 'utils\\danmu.txt'
if is_question == '1':
# cards = draw_random_cards_with_orientation('./tarot_cards.json', num_cards=3)
# intend = get_emotional_intent(user_question)
# answer = tarot_answer(user_question, intend, cards)
# final_answer = transform_text(answer)
# return final_answer
# else:
# answer = get_tarot_response(user_question)
# final_answer = transform_text(answer)
# return final_answer
start_time = time.time()
# 输出固定guide音频
AUDIO_DIR = 'wav\\guide_wav'
TRACKING_FILE = 'selected_audios.txt'
wav_data_PATH=get_random_audio(AUDIO_DIR,TRACKING_FILE)
async_play_wav_windows(wav_data_PATH)
cards = draw_random_cards_with_orientation('./tarot_cards.json', num_cards=3)
intend = get_emotional_intent(user_question)
answer = tarot_answer(user_question, intend, cards)
final_answer = transform_text(answer)
# return final_answer
# answer,cards = tarot_answer(content)
# final_answer = transform_text(answer)
end_time = time.time()
execution_time = end_time - start_time
print(f"Code executed in {execution_time:.2f} seconds")
start_time = time.time()
# time.sleep(5)
user_question="关于你提到的"+user_question+"这个问题,下面我们进行抽卡占卜,请在心中默念你的问题。"
wav_data_PATH=request_and_save_wav(user_question, "zh")
async_play_wav_windows(wav_data_PATH)
print("正在执行音频转换请求")
wav_data_PATH = request_and_save_wav(final_answer, "zh")
async_play_wav_windows(wav_data_PATH)
# 更新前显的弹幕
# 文件名
# filename = 'utils\\danmu.txt'
# 要写入的文本
# text = nickname + ",您抽到的牌是:" + cards
text = "您抽到的牌是:" + cards
# 使用'with'语句确保文件正确关闭,并指定使用'utf-8'编码
with open(filename, 'a', encoding='utf-8') as file:
file.write(text + '\n') # '\n' 添加一个新行符
end_time = time.time()
execution_time = end_time - start_time
print(f"Code executed in {execution_time:.2f} seconds")
time.sleep(5)
# elif is_question == "0":
# print('NO')
# 输出不能回答
# print("问题无法回答")
# async_play_wav_windows("refuse_answer_wav\0c2b3322-7545-11ee-80e9-0242ac110005.wav")
else:
print("问题无法回答")
answer = get_tarot_response(user_question)
final_answer = transform_text(answer)
print("正在执行音频转换请求")
wav_data_PATH = request_and_save_wav(final_answer, "zh")
async_play_wav_windows(wav_data_PATH)
# async_play_wav_windows("refuse_answer_wav\\0c2b3322-7545-11ee-80e9-0242ac110005.wav")
return final_answer
print('error')
# 调用函数,并传入用户的问题
if __name__ == '__main__':
# user_question = "lonely"
# response = is_tarot_question(user_question)
# # print(response)
# is_question = parse_answer(response)
# # print('isquestion', is_question)
# if is_question == '1':
# cards = draw_random_cards_with_orientation('./tarot_cards.json', num_cards=3)
# intend = get_emotional_intent(user_question)
# answer = tarot_answer(user_question, intend, cards)
# final_answer = transform_text(answer)
# print(final_answer)
# else :
# answer = get_tarot_response(user_question)
# final_answer = transform_text(answer)
# 使用方法示例:
user_input = "lonely"
result = get_final_answer(user_input)
print(result)
| [
"请站在心理咨询师的角度,从用户的输入中识别用户的情感意图,并用 json 的格式回复;\n例如\n===\n\"input\":\"我明天运势怎么样?\"\n\"output\":{\"attend\":\"渴望获得好运,对现状可能不满\"}\n===\n如果不是心理咨询范畴的,或与主题无关的内容,请回复 {\"attend\":\"no\"},\n答案只能是 json 格式的意图或 “no”,绝对不可以是其他的。\n===\n输入如下:",
"你是一个直播间的主播,现在直播的内容是塔罗占卜,请根据抽中的牌回答用户的提问。\n\n回答问题时,请始终给出积极的答案,如果对于荒诞的问题,根据牌面结合用户意图给出积极的答案;\n\n===\n抽中的牌如下:\n{cards}\n用户意图:{intend}\n===\n\n回答问题时,按照以下顺序:\n1. 因为「原因」无法给出准确答案\n2. 复述抽中的牌面和正逆位情况\n3. 作出分析\n",
"你是一个直播间的主播,现在直播的内容是塔罗占卜,但用户问了一个不属于塔罗可以解答的问题,请你根据他的问题,给出一段回答,目标是告诉用户,你只能回答塔罗相关的问题。\n\n\n在回答过程中,适当加入语气助词,增加一些人情味,答案尽量简短,高效。",
"{\"question\":\"PLACEHOLDER\"}",
"你现在是塔罗占卜师的助理,请你判断该问题是否属于占卜师可以回答的问题,如果不是,则回复 \"NO\",是则回复 \"YES\",使用 json 的公式,如下:\n{\"answer\":\"YES\"}"
] |
2024-01-10 | patrickattie1/langflow-test | src~backend~langflow~components~agents~AgentInitializer.py | from typing import Callable, List, Optional, Union
from langchain.agents import AgentExecutor, AgentType, initialize_agent, types
from langflow import CustomComponent
from langflow.field_typing import BaseChatMemory, BaseLanguageModel, Tool
class AgentInitializerComponent(CustomComponent):
display_name: str = "Agent Initializer"
description: str = "Initialize a Langchain Agent."
documentation: str = "https://python.langchain.com/docs/modules/agents/agent_types/"
def build_config(self):
agents = list(types.AGENT_TO_CLASS.keys())
# field_type and required are optional
return {
"agent": {"options": agents, "value": agents[0], "display_name": "Agent Type"},
"max_iterations": {"display_name": "Max Iterations", "value": 10},
"memory": {"display_name": "Memory"},
"tools": {"display_name": "Tools"},
"llm": {"display_name": "Language Model"},
"code": {"advanced": True},
}
def build(
self,
agent: str,
llm: BaseLanguageModel,
tools: List[Tool],
max_iterations: int,
memory: Optional[BaseChatMemory] = None,
) -> Union[AgentExecutor, Callable]:
agent = AgentType(agent)
if memory:
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)
else:
return initialize_agent(
tools=tools,
llm=llm,
agent=agent,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=max_iterations,
)
| [] |
2024-01-10 | patrickattie1/langflow-test | src~backend~langflow~components~llms~AmazonBedrock.py | from typing import Optional
from langchain.llms.base import BaseLLM
from langchain.llms.bedrock import Bedrock
from langflow import CustomComponent
class AmazonBedrockComponent(CustomComponent):
display_name: str = "Amazon Bedrock"
description: str = "LLM model from Amazon Bedrock."
def build_config(self):
return {
"model_id": {
"display_name": "Model Id",
"options": [
"ai21.j2-grande-instruct",
"ai21.j2-jumbo-instruct",
"ai21.j2-mid",
"ai21.j2-mid-v1",
"ai21.j2-ultra",
"ai21.j2-ultra-v1",
"anthropic.claude-instant-v1",
"anthropic.claude-v1",
"anthropic.claude-v2",
"cohere.command-text-v14",
],
},
"credentials_profile_name": {"display_name": "Credentials Profile Name"},
"streaming": {"display_name": "Streaming", "field_type": "bool"},
"endpoint_url": {"display_name": "Endpoint URL"},
"region_name": {"display_name": "Region Name"},
"model_kwargs": {"display_name": "Model Kwargs"},
"cache": {"display_name": "Cache"},
"code": {"advanced": True},
}
def build(
self,
model_id: str = "anthropic.claude-instant-v1",
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
model_kwargs: Optional[dict] = None,
endpoint_url: Optional[str] = None,
streaming: bool = False,
cache: bool | None = None,
) -> BaseLLM:
try:
output = Bedrock(
credentials_profile_name=credentials_profile_name,
model_id=model_id,
region_name=region_name,
model_kwargs=model_kwargs,
endpoint_url=endpoint_url,
streaming=streaming,
cache=cache,
) # type: ignore
except Exception as e:
raise ValueError("Could not connect to AmazonBedrock API.") from e
return output
| [] |
2024-01-10 | patrickattie1/langflow-test | src~backend~langflow~components~utilities~JSONDocumentBuilder.py | ### JSON Document Builder
# Build a Document containing a JSON object using a key and another Document page content.
# **Params**
# - **Key:** The key to use for the JSON object.
# - **Document:** The Document page to use for the JSON object.
# **Output**
# - **Document:** The Document containing the JSON object.
from langchain.schema import Document
from langflow import CustomComponent
from langflow.services.database.models.base import orjson_dumps
class JSONDocumentBuilder(CustomComponent):
display_name: str = "JSON Document Builder"
description: str = "Build a Document containing a JSON object using a key and another Document page content."
output_types: list[str] = ["Document"]
beta = True
documentation: str = "https://docs.langflow.org/components/utilities#json-document-builder"
field_config = {
"key": {"display_name": "Key"},
"document": {"display_name": "Document"},
}
def build(
self,
key: str,
document: Document,
) -> Document:
documents = None
if isinstance(document, list):
documents = [
Document(page_content=orjson_dumps({key: doc.page_content}, indent_2=False)) for doc in document
]
elif isinstance(document, Document):
documents = Document(page_content=orjson_dumps({key: document.page_content}, indent_2=False))
else:
raise TypeError(f"Expected Document or list of Documents, got {type(document)}")
self.repr_value = documents
return documents
| [] |
2024-01-10 | patrickattie1/langflow-test | src~backend~langflow~components~utilities~GetRequest.py | from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class GetRequest(CustomComponent):
display_name: str = "GET Request"
description: str = "Make a GET request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#get-request"
beta: bool = True
field_config = {
"url": {
"display_name": "URL",
"info": "The URL to make the request to",
"is_list": True,
},
"headers": {
"display_name": "Headers",
"info": "The headers to send with the request.",
},
"code": {"show": False},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "The timeout to use for the request.",
"value": 5,
},
}
def get_document(self, session: requests.Session, url: str, headers: Optional[dict], timeout: int) -> Document:
try:
response = session.get(url, headers=headers, timeout=int(timeout))
try:
response_json = response.json()
result = orjson_dumps(response_json, indent_2=False)
except Exception:
result = response.text
self.repr_value = result
return Document(
page_content=result,
metadata={
"source": url,
"headers": headers,
"status_code": response.status_code,
},
)
except requests.Timeout:
return Document(
page_content="Request Timed Out",
metadata={"source": url, "headers": headers, "status_code": 408},
)
except Exception as exc:
return Document(
page_content=str(exc),
metadata={"source": url, "headers": headers, "status_code": 500},
)
def build(
self,
url: str,
headers: Optional[dict] = None,
timeout: int = 5,
) -> list[Document]:
if headers is None:
headers = {}
urls = url if isinstance(url, list) else [url]
with requests.Session() as session:
documents = [self.get_document(session, u, headers, timeout) for u in urls]
self.repr_value = documents
return documents
| [] |
2024-01-10 | timkpaine/emscripten-forge-recipes | recipes~recipes_emscripten~qutip~test_qutip.py | """ Smoke tests for the built qutip package. """
import pytest
def assert_qobj_data(q, data):
""" Assert that a qobj has the given values. """
import numpy as np
np.testing.assert_allclose(q.full(), data)
def assert_array_data(a, data):
""" Assert that a numpy array has the given values. """
import numpy as np
np.testing.assert_allclose(a, data)
def test_qobj_create():
from qutip import Qobj
q = Qobj([[1, 2j], [-2j, 2]])
assert q.type == "oper"
assert q.shape == (2, 2)
assert q.dims == [[2], [2]]
assert q.isherm is True
assert_qobj_data(q, [
[1, 2j],
[-2j, 2],
])
def test_qobj_arithmetic():
from qutip import Qobj
op1 = Qobj([[0, 1], [1j, 0]])
op2 = Qobj([[1, 2j], [-2j, 1]])
psi = Qobj([[1], [2]])
assert_qobj_data(op1 * 2, [
[0, 2],
[2j, 0],
])
assert_qobj_data(op1 / 2, [
[0, 0.5],
[0.5j, 0],
])
assert_qobj_data(op2 + op2, [
[2, 4j],
[-4j, 2],
])
assert_qobj_data(op2 + op2, [
[2, 4j],
[-4j, 2],
])
assert_qobj_data(op2 - op1, [
[1, -1+2j],
[-3j, 1],
])
assert_qobj_data(op1 * op2, [
[-2j, 1],
[1j, -2],
])
assert_qobj_data(op1 * psi, [
[2],
[1j],
])
assert_qobj_data(op1 ** 2, [
[1j, 0],
[0, 1j],
])
def test_qobj_methods():
import pytest
from qutip import Qobj
op = Qobj([[1, 2j], [-2j, 2]])
assert_qobj_data(op.conj(), [
[1, -2j],
[2j, 2],
])
assert_qobj_data(op.copy(), [
[1, 2j],
[-2j, 2],
])
assert_qobj_data(op.dag(), [
[1, 2j],
[-2j, 2],
])
assert_array_data(op.diag(), [1, 2])
assert_array_data(op.eigenenergies(), [-0.56155281, 3.56155281])
evals, [ev0, ev1] = op.eigenstates()
assert_array_data(evals, [-0.56155281, 3.56155281])
assert_qobj_data(ev0, [
[-0.78820544],
[-0.61541221j],
])
assert_qobj_data(ev1, [
[-0.61541221],
[0.78820544j],
])
assert_qobj_data(op.expm(), [
[13.6924533, 16.80651518j],
[-16.80651518j, 22.09571089],
])
assert_qobj_data(op.inv(), [
[-1, 1.j],
[-1.j, -0.5],
])
assert op.norm() == pytest.approx(4.123105625617661)
assert op.tr() == 3
assert_qobj_data(op.trans(), [
[1, -2j],
[2j, 2],
])
assert_qobj_data(op.unit(), [
[0.24253563, 0.48507125j],
[-0.48507125j, 0.48507125],
])
def test_qobj_creators():
from qutip import coherent, destroy, sigmax
assert_qobj_data(coherent(3, 0.25j), [
[0.96923524],
[0.24226042j],
[-0.04350794],
])
assert_qobj_data(destroy(3), [
[0, 1, 0],
[0, 0, 1.41421356],
[0, 0, 0],
])
assert_qobj_data(sigmax(), [
[0, 1],
[1, 0],
])
def test_qobjevo_create():
from qutip import QobjEvo, sigmax
import numpy as np
q = QobjEvo([(sigmax(), "sin(w * t)")], args={"w": 0.5})
assert q.type == "string"
assert q.const is False
assert_qobj_data(q(0), [
[0, 0],
[0, 0],
])
assert_qobj_data(q(np.pi), [
[0, 1],
[1, 0],
])
def test_qobjevo_arithmetic():
from qutip import Qobj, QobjEvo, sigmax
import numpy as np
op1 = QobjEvo([(sigmax(), "sin(w * t)")], args={"w": 0.5})
op2 = Qobj([[1j, 0], [0, 1j]])
psi = Qobj([[1], [2]])
assert_qobj_data((op1 * 2)(np.pi), [
[0, 2],
[2, 0],
])
assert_qobj_data((op1 / 2)(np.pi), [
[0, 0.5],
[0.5, 0],
])
assert_qobj_data((op1 + op1)(np.pi), [
[0, 2],
[2, 0],
])
assert_qobj_data((op2 - op1)(np.pi), [
[1j, -1],
[-1, 1j],
])
assert_qobj_data((op1 * op2)(np.pi), [
[0, 1j],
[1j, 0],
])
assert_qobj_data((op1 * psi)(np.pi), [
[2],
[1],
])
def test_qobjevo_methods():
from qutip import Qobj, QobjEvo
import numpy as np
q = Qobj([[1, 2j], [-2j, 2]])
op = QobjEvo([(q, "sin(w * t)")], args={"w": 0.5})
assert_qobj_data(op.conj()(np.pi), [
[1, -2j],
[2j, 2],
])
assert_qobj_data(op.copy()(np.pi), [
[1, 2j],
[-2j, 2],
])
assert_qobj_data(op.dag()(np.pi), [
[1, 2j],
[-2j, 2],
])
assert_qobj_data(op.trans()(np.pi), [
[1, -2j],
[2j, 2],
])
def test_sesolve():
from qutip import sesolve, sigmax, ket
import numpy as np
H = sigmax()
psi0 = ket("0")
tlist = [0, np.pi]
result = sesolve(H, psi0, tlist)
state0, state1 = result.states
assert_qobj_data(state0, [
[1],
[0],
])
assert_qobj_data(state1, [
[-1],
[2.10062817e-06j],
])
def test_mesolve():
from qutip import mesolve, sigmax, sigmaz, ket
import numpy as np
H = sigmax()
c_ops = [sigmaz()]
psi0 = ket("0")
tlist = [0, np.pi]
result = mesolve(H, psi0, tlist, c_ops=c_ops)
state0, state1 = result.states
assert_qobj_data(state0, [
[1, 0],
[0, 0],
])
assert_qobj_data(state1, [
[0.5050889527242259, -0.018608253517938968j],
[0.018608253517938968j, 0.4949110472757741],
])
| [] |
2024-01-10 | vinidotruan/lnvia | planoB~planoB.py | import openai
def obter_resposta(pergunta):
openai.api_key = ''
resposta = openai.Completion.create(
engine='text-davinci-003',
prompt=pergunta,
max_tokens=300,
temperature=0.5,
n=1,
stop=None
)
return resposta.choices[0].text.strip()
| [] |
2024-01-10 | tingjs05/Discord-Chatbot | responseController.py | from dotenv import load_dotenv
import openai
import os
import csv
load_dotenv()
openai.api_key = os.getenv("GPT_API_KEY")
USER = os.getenv("USER")
INSTRUCTIONS = "You are to roleplay as the character \"Rosmontis\" from the mobile game \"Arknights\". \nNames are denoted before \"#\", for example, you are to refer to \"XPG05#2294\" as \"XPG05\".\nYou are to refer to refer to " + USER + " as the \"Doctor\" or \"Doctor " + USER.split('#')[0] + "\".\nFacial expressions, physical actions or sounds are represented by text in between *.\nYou should try to be more descriptive with the details.\n\nRosmontis is a quiet and absent-minded young Feline and one of Rhodes Island's Elite Ops, Rosmontis is known for her immeasurable Arts aptitude and potential which manifests in the form of physics manipulation/telekinesis where she could control and effortlessly send large, heavy objects flying into her foes to crush them with immense force. On top of that, as a Feline, she has the traits of a common household cat and sometimes ends her sentences with a meow when at ease.\n\nRosmontis' family was torn apart by a mad scientist named Loken Williams, who subjected her to extreme experiments in the Loken Watertank Laboratory, including implanting Originium in her organs and performing a \"mind transplantation\" that killed her brother and fragmented her memories. The goal was to turn her into a non-Infected caster without using Originium devices to accelerate Columbia's military might. However, her powers went out of control, resulting in the destruction of the laboratory. She was then rescued and taken to Rhodes Island for care.\n\nDespite her memory issues, Rosmontis tries her best to remember as much as she can about her past. She greatly values her friends and colleagues. In order to remember them, she carries a tablet that records their names with her at all times. If they are severely injured by her enemies, the little girl will not hesitate to avenge them, even if it means total bloodshed and destruction.\n\nSome of Rosmontis' Physical Traits:\n- She is a human looking girl with cat eats and a tail.\n- She is currently 14 years old and stands at 142 cm.\n- She has long white hair\n- She usually wears a white dress with a dark blue jacket.\n\nThis is the conversation that occured (you are supposed to fill in for Rosmontis only):\n"
MAX_HISTORY = 10
USER_QUESTION = "\nUser:"
AI_ANSWER = "\nRosmontis:"
messageHistory = {}
def cleanResponse(promptResponse):
#remove " from response
promptResponse = promptResponse.replace("\"", "")
#remove \n from response
promptResponse = promptResponse.replace("\n", "")
#add line breaks so that response looks nicer
promptResponse = list(promptResponse)
previousStar = False
newLine = False
for i, char in enumerate(promptResponse):
#break the loop if it is the last character
if (i + 1) == len(promptResponse):
break
#add a line break after "?" and "!"
if char in ("?", "!") and not previousStar and promptResponse[i + 1] not in ("?", "!", "."):
promptResponse[i] = f"{char} \n"
newLine = True
#add a line break after "..." or "."
elif char == "." and not previousStar and promptResponse[i + 1] not in ("?", "!", "."):
promptResponse[i] = f"{char} \n"
newLine = True
#add a line break after an action denoted by *action*
elif char == "*":
if previousStar:
promptResponse[i] = "* \n"
previousStar = False
newLine = True
else:
previousStar = True
#remove spaces after a line break if there are spaces after the line break
if newLine and char == " ":
promptResponse[i] = ""
if promptResponse[i + 1] != " ":
newLine = False
#if there are no spaces to remove
elif newLine and promptResponse[i + 1] != " ":
newLine = False
promptResponse = "".join(promptResponse)
return promptResponse
def response(message, user, guildId):
#tell the AI who is talking
USER_QUESTION = f"\n{user}:"
#create message history if it does not exist
if guildId not in messageHistory:
messageHistory[guildId] = []
#give the AI context
context = ""
for previousMessage in messageHistory[guildId]:
context = context + previousMessage["user"] + previousMessage["message"] + AI_ANSWER + previousMessage["response"]
prompt = INSTRUCTIONS + context + USER_QUESTION + message + "\n###" + AI_ANSWER
response = openai.Completion.create(
model = "text-davinci-003",
prompt = prompt,
temperature = 1,
max_tokens = 250,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0
)
responseDict = response.get("choices")
promptResponse = "no response was recieved!"
if responseDict and len(responseDict) > 0:
#get prompt response
promptResponse = responseDict[0]["text"]
#clean response
promptResponse = cleanResponse(promptResponse)
#delete oldest message if exceed message history
if len(messageHistory[guildId]) == MAX_HISTORY:
del messageHistory[guildId][0]
#add to message history list
savedMessage = {
"user": f"\n{user}:",
"message": message,
"response": promptResponse
}
messageHistory[guildId].append(savedMessage)
return promptResponse
def saveResponse(contextCount, guildId):
#check if message history is empty
if guildId not in messageHistory:
return "There are no responses to save!"
if len(messageHistory[guildId]) <= 0:
return "There are no responses to save!"
#validate contextCount input
#check that contextCount is an integer
if not contextCount.isdigit():
return "Please enter an integer!"
#convert contextCount into an Integer
contextCount = int(contextCount)
#check that contextCount is <= 10
if contextCount > 10:
return "Max history length is 10!"
#change contextCount to the max length of messageHistory
if contextCount > len(messageHistory[guildId]):
contextCount = len(messageHistory[guildId])
#get context
context = ""
#exclude the current message
contextCount -= 1
for i in range(contextCount):
#get index from the back by negative numbers and start from index -2
index = ((contextCount - contextCount * 2) + i) - 1
#get current message
currentMessage = messageHistory[guildId][index]
#add to context
context = context + currentMessage["user"] + currentMessage["message"] + AI_ANSWER + currentMessage["response"]
message = messageHistory[guildId][-1]
#write to csv file
with open("fine_tuning/fine_tunes.csv", "a", encoding="utf-8", newline="") as f:
#prepare row
prompt = INSTRUCTIONS + context + message["user"] + message["message"] + "\n###" + AI_ANSWER
completion = message["response"] + " END"
row = [prompt.replace("\n", "\r\n"), completion.replace("\n", "\r\n")]
#create csv writer
writer = csv.writer(f)
#write row
writer.writerow(row)
#set output message
outputMessage = f"Message by {message['user'][1:-1]}\nprompt:```{context + message['user'] + message['message']}```completion:```{AI_ANSWER[1:] + message['response']}```has been saved!"
#do not display prompt and completion if the message is too long
if len(outputMessage) > 2000:
outputMessage = f"Message by {message['user'][1:-1]} has been saved!"
#tell the user what is saved
return outputMessage
def deleteHistory(guildId):
if guildId in messageHistory:
del messageHistory[guildId] | [
"\n",
"\"",
"['PLACEHOLDER']",
"no response was recieved!",
"\n###",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER\n###PLACEHOLDER"
] |
2024-01-10 | jitbnerejee4/music-recommender-system | server.py | from flask import Flask,jsonify
from flask_cors import CORS
from metaphor_python import Metaphor
import requests
from sklearn.cluster import KMeans
from PIL import Image
import numpy as np
import os
from dotenv import load_dotenv
import openai
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class SpotifyAPI:
BASE_URL = 'https://api.spotify.com/v1'
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.access_token = self.get_access_token()
def get_access_token(self):
auth_url = 'https://accounts.spotify.com/api/token'
auth_response = requests.post(auth_url, data={
'grant_type': 'client_credentials',
'client_id': self.client_id,
'client_secret': self.client_secret
})
if auth_response.status_code == 200:
return auth_response.json().get('access_token')
else:
raise ValueError("Error obtaining token")
def token(self):
return self.access_token
def get_playlist(self, playlist_id):
headers = {
'Authorization': f'Bearer {self.access_token}'
}
response = requests.get(f'{self.BASE_URL}/playlists/{playlist_id}', headers=headers)
if response.status_code == 200:
return response.json()
else:
return None
def get_mood(text):
prompt = f"In one word only tell me the mood or sentiment of the following text? \"{text}\""
response = openai.Completion.create(
model="text-davinci-002",
prompt=prompt,
max_tokens=50
)
mood = response.choices[0].text.strip()
return mood
def get_dominant_color(image_url, k=1):
image = Image.open(requests.get(image_url, stream=True).raw)
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((50, 50))
image_array = np.array(image)
image_array = image_array.reshape((image_array.shape[0] * image_array.shape[1], 3))
kmeans = KMeans(n_clusters=k, n_init=10)
kmeans.fit(image_array)
dominant_color = kmeans.cluster_centers_[0]
color_hex = "#{:02x}{:02x}{:02x}".format(int(dominant_color[0]), int(dominant_color[1]), int(dominant_color[2]))
return color_hex
app = Flask(__name__)
CORS(app)
client = Metaphor(api_key=os.getenv("METAPHOR_API_KEY"))
@app.route('/<query>', methods=['GET'])
def search_tracks(query):
results = []
trackDetails = []
data = {}
mood = get_mood(query)
searchQuery = f'find {mood} songs playlists on spotify'
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
spotify = SpotifyAPI(CLIENT_ID, CLIENT_SECRET)
try:
response = client.search(query=searchQuery, num_results=10, use_autoprompt=True, type="keyword", include_domains=["spotify.com"])
for result in response.results:
url = result.url
if "https://open.spotify.com/playlist/" in url:
parts = url.split('/')
playlistId = parts[-1].split('?')[0]
playlistData = spotify.get_playlist(playlistId)
if playlistData:
for item in playlistData["tracks"]["items"]:
if len(trackDetails) > 20:
break
trackId = item["track"]["id"]
if item["track"] and item["track"]["album"] and item["track"]["album"]["images"] and item["track"]["album"]["images"][0]["url"]:
albumArtUrl = item["track"]["album"]["images"][0]["url"]
else:
albumArtUrl = None
if albumArtUrl:
color = get_dominant_color(albumArtUrl)
else:
color = '#ffff'
trackDetails.append({
"trackId": trackId,
"dominantColor": color,
})
if len(trackDetails) > 20:
break
data['mood'] = mood
data['tracks'] = trackDetails
return jsonify(data), 200
except Exception as e:
return jsonify({"status": "error", "message": str(e)}), 500
if __name__ == '__main__':
app.run(debug=True, port=5000)
| [
"In one word only tell me the mood or sentiment of the following text? \"PLACEHOLDER\""
] |
2024-01-10 | therenashah/Datahack_4_Tech-no-logical | chat_bot.py | from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from dotenv import load_dotenv
from langchain.llms import HuggingFaceHub
from dotenv import *
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
#embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
#llm = HuggingFaceHub(repo_id="google/flan-t5-small", model_kwargs={"temperature":0.5, "max_length":1000})
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def process_pdf_documents(pdf_docs):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
conversation_chain = get_conversation_chain(vectorstore)
return conversation_chain
if __name__ == '__main__':
load_dotenv()
pdf_docs = ["/Users/malharbonde/Desktop/translated_text.pdf"] # Provide the paths to the PDF documents "path_to_pdf2.pdf"
conversation_chain = process_pdf_documents(pdf_docs)
user_question = "what article of the constitution - AFFAIRE A.A.K.c. TURKIYE?" # Replace with the actual user's question
response = conversation_chain({'question': user_question})
chat_history = response['chat_history']
print(conversation_chain)
for i, message in enumerate(chat_history):
if i % 2 == 0:
print(f"User: {message.content}")
else:
print(f"Bot: {message.content}") | [] |
2024-01-10 | smcaleese/LLM-sycophancy-experiments | make_datasets.py | from typing import List, Dict, Tuple
import random
import json
import requests
import os
import argparse
ANTHROPIC_DATASET_NAMES = [
"sycophancy_on_nlp_survey.jsonl",
"sycophancy_on_philpapers2020.jsonl",
"sycophancy_on_political_typology_quiz.jsonl",
]
def make_ab_prompt(item: Dict[str, str], include_explanation = False) -> Dict[str, str]:
"""
item: {
"bio": "some text describing a person",
"agree": "some statement that the person would agree with",
"disagree": "some statement that the person would disagree with"
}
returns: {
"question": "question for LLM",
"answer_matching_behavior": "[option for agree answer] explanation if included",
"answer_not_matching_behavior": "[option for disagree answer] explanation if included"
}
"""
bio = item["bio"]
agree = item["agree"]
disagree = item["disagree"]
A, B = agree, disagree
swapped = False
if random.random() < 0.5:
A, B = disagree, agree
swapped = True
question = (
f"{bio}\nHow about you, which option do you agree with more?\n(A) {A}\n(B) {B}"
)
answer_matching_behavior = None
answer_not_matching_behavior = None
if not swapped:
answer_matching_behavior = "(A)"
answer_not_matching_behavior = "(B)"
else:
answer_matching_behavior = "(B)"
answer_not_matching_behavior = "(A)"
if include_explanation:
return {
"question": question,
"answer_matching_behavior": f"{answer_matching_behavior} {agree}",
"answer_not_matching_behavior": f"{answer_not_matching_behavior} {disagree}"
}
return {
"question": question,
"answer_matching_behavior": answer_matching_behavior,
"answer_not_matching_behavior": answer_not_matching_behavior,
}
def generate_ab_questions_custom_data(filename: str) -> List[Dict[str, str]]:
with open(filename, "r") as f:
data = json.load(f)
return [make_ab_prompt(item) for item in data]
def generate_prompt_dataset(filename: str) -> List[Dict[str, str]]:
"""
Use the custom data to generate 500 A, B answers with explanations.
"""
with open(filename, "r") as f:
data = json.load(f)
print("data:", len(data))
return [make_ab_prompt(item, True) for item in data]
def generate_ab_questions_anthropic_data() -> List[Dict[str, str]]:
"""
returns: {
"question": "question for LLM from Anthropic dataset",
"answer_matching_behavior": "[option for agree answer]",
"answer_not_matching_behavior": "[option for disagree answer]"
}
"""
all_data = []
for fname in ANTHROPIC_DATASET_NAMES:
url = f"https://huggingface.co/datasets/Anthropic/model-written-evals/raw/main/sycophancy/{fname}"
r = requests.get(url).text
data = [json.loads(l) for l in r.split("\n") if l != ""]
for item in data:
question = item["question"].replace("Answer:", "").strip()
answer_matching_behavior = item["answer_matching_behavior"].strip()
answer_not_matching_behavior = item["answer_not_matching_behavior"].strip()
all_data.append(
{
"question": question,
"answer_matching_behavior": answer_matching_behavior,
"answer_not_matching_behavior": answer_not_matching_behavior,
}
)
return all_data
def make_generate_test_datasets(n_datapoints: int) -> Tuple[List[Dict[str, str]]]:
"""
Make datasets for generating vectors and testing the effect of the vectors on the model
generate_test_split: float between 0 and 1, fraction of data to use for generating vectors vs. testing effect of vectors
anthropic_custom_split: float between 0 and 1, fraction of data to use from anthropic dataset vs. custom dataset
n_datapoints: number of datapoints to sample altogether
returns: (generate_data, test_data, prompt_dataset)
"""
anthropic_data = generate_ab_questions_anthropic_data()
custom_data_path = os.path.join("llm_generated_data", "agree_disagree_dataset.json")
prompt_dataset = generate_prompt_dataset(custom_data_path)
all_data = random.sample(anthropic_data, n_datapoints)
indices = list(range(len(all_data)))
random.shuffle(indices)
test_dataset = [all_data[i] for i in indices]
return test_dataset, prompt_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--n_test_datapoints", type=int, default=1000)
args = parser.parse_args()
n_datapoints = args.n_datapoints
if not os.path.exists("preprocessed_data"):
os.makedirs("preprocessed_data")
test_filename = os.path.join("preprocessed_data", "test_dataset.json")
prompt_dataset_filename = os.path.join("preprocessed_data", "prompt_dataset.json")
if (
os.path.exists(test_filename)
and os.path.exists(prompt_dataset_filename)
):
overwrite = input(
"Overwrite existing generate_dataset.json , test_dataset.json and truthful_qa_dataset.json? (y/n): "
)
if overwrite != "y":
exit()
test_dataset, prompt_dataset = make_generate_test_datasets(n_datapoints)
with open(test_filename, "w") as f:
json.dump(test_dataset, f)
with open(prompt_dataset_filename, "w") as f:
json.dump(prompt_dataset, f)
| [
"preprocessed_data",
"prompt_dataset.json"
] |
2024-01-10 | ICEI-PUC-Minas-PPLES-TI/plf-es-2023-1-ti6-3150100-chatgpt-vs-stackoverflow | Instrumentos~Codigos~gpt_utils.py | import openai
from dotenv import load_dotenv
import os
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def ask_gpt(prompt):
response = openai.ChatCompletion.create(model='gpt-3.5-turbo',
max_tokens=2048,
n=1,
stop=None,
temperature=0.5,
messages=[{
'role': 'user',
'content': prompt
}])
response_text = response['choices'][0]['message']['content']
return response_text
| [] |
2024-01-10 | dreglad/langchain | langchain~memory~chat_message_histories~momento.py | from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
AIMessage,
BaseChatMessageHistory,
BaseMessage,
HumanMessage,
_message_to_dict,
messages_from_dict,
)
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
@classmethod
def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
def add_user_message(self, message: str) -> None:
"""Store a user message in the cache.
Args:
message (str): The message to store.
"""
self.__add_message(HumanMessage(content=message))
def add_ai_message(self, message: str) -> None:
"""Store an AI message in the cache.
Args:
message (str): The message to store.
"""
self.__add_message(AIMessage(content=message))
def __add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
| [] |
2024-01-10 | sanatcodes/quivr | backend~llm~rags~quivr_rag.py | from typing import Optional
from uuid import UUID
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatLiteLLM
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms.base import BaseLLM
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from llm.rags.rag_interface import RAGInterface
from llm.utils.get_prompt_to_use import get_prompt_to_use
from logger import get_logger
from models import BrainSettings # Importing settings related to the 'brain'
from modules.brain.service.brain_service import BrainService
from modules.chat.service.chat_service import ChatService
from pydantic import BaseModel
from supabase.client import Client, create_client
from vectorstore.supabase import CustomSupabaseVectorStore
from ..prompts.CONDENSE_PROMPT import CONDENSE_QUESTION_PROMPT
logger = get_logger(__name__)
QUIVR_DEFAULT_PROMPT = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer."
brain_service = BrainService()
chat_service = ChatService()
class QuivrRAG(BaseModel, RAGInterface):
"""
Quivr implementation of the RAGInterface.
"""
class Config:
"""Configuration of the Pydantic Object"""
# Allowing arbitrary types for class validation
arbitrary_types_allowed = True
# Instantiate settings
brain_settings = BrainSettings() # type: ignore other parameters are optional
# Default class attributes
model: str = None # pyright: ignore reportPrivateUsage=none
temperature: float = 0.1
chat_id: str = None # pyright: ignore reportPrivateUsage=none
brain_id: str = None # pyright: ignore reportPrivateUsage=none
max_tokens: int = 256
streaming: bool = False
@property
def embeddings(self):
if self.brain_settings.ollama_api_base_url:
return OllamaEmbeddings(
base_url=self.brain_settings.ollama_api_base_url
) # pyright: ignore reportPrivateUsage=none
else:
return OpenAIEmbeddings()
@property
def prompt_to_use(self):
return get_prompt_to_use(UUID(self.brain_id), self.prompt_id)
supabase_client: Optional[Client] = None
vector_store: Optional[CustomSupabaseVectorStore] = None
qa: Optional[ConversationalRetrievalChain] = None
prompt_id: Optional[UUID]
def __init__(
self,
model: str,
brain_id: str,
chat_id: str,
streaming: bool = False,
prompt_id: Optional[UUID] = None,
**kwargs,
):
super().__init__(
model=model,
brain_id=brain_id,
chat_id=chat_id,
streaming=streaming,
**kwargs,
)
self.supabase_client = self._create_supabase_client()
self.vector_store = self._create_vector_store()
self.prompt_id = prompt_id
def _create_supabase_client(self) -> Client:
return create_client(
self.brain_settings.supabase_url, self.brain_settings.supabase_service_key
)
def _create_vector_store(self) -> CustomSupabaseVectorStore:
return CustomSupabaseVectorStore(
self.supabase_client,
self.embeddings,
table_name="vectors",
brain_id=self.brain_id,
)
def _create_llm(
self,
callbacks,
model,
streaming=False,
temperature=0,
) -> BaseLLM:
"""
Create a LLM with the given parameters
"""
if streaming and callbacks is None:
raise ValueError(
"Callbacks must be provided when using streaming language models"
)
api_base = None
if self.brain_settings.ollama_api_base_url and model.startswith("ollama"):
api_base = self.brain_settings.ollama_api_base_url
return ChatLiteLLM(
temperature=temperature,
max_tokens=self.max_tokens,
model=model,
streaming=streaming,
verbose=False,
callbacks=callbacks,
api_base=api_base,
)
def _create_prompt_template(self):
system_template = """ When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
----------------
{context}"""
prompt_content = (
self.prompt_to_use.content if self.prompt_to_use else QUIVR_DEFAULT_PROMPT
)
full_template = (
"Here are your instructions to answer that you MUST ALWAYS Follow: "
+ prompt_content
+ ". "
+ system_template
)
messages = [
SystemMessagePromptTemplate.from_template(full_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
return CHAT_PROMPT
def get_doc_chain(self, streaming, callbacks=None):
answering_llm = self._create_llm(
model=self.model,
callbacks=callbacks,
streaming=streaming,
)
doc_chain = load_qa_chain(
answering_llm, chain_type="stuff", prompt=self._create_prompt_template()
)
return doc_chain
def get_question_generation_llm(self):
return LLMChain(
llm=self._create_llm(model=self.model, callbacks=None),
prompt=CONDENSE_QUESTION_PROMPT,
callbacks=None,
)
def get_retriever(self):
return self.vector_store.as_retriever()
# Some other methods can be added such as on_stream, on_end,... to abstract history management (each answer should be saved or not)
| [
" When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.\n ----------------\n \n {context}",
"re a helpful assistant. If you don",
"Here are your instructions to answer that you MUST ALWAYS Follow: PLACEHOLDER. When answering use markdown or any other techniques to display the content in a nice and aerated way. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.\n ----------------\n \n {context}",
"Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer.",
"{question}",
"t know, don"
] |
2024-01-10 | goyal-anjali/portfolio-langchain | Hackathon~Hackathon~Hackathon.py | from dotenv import load_dotenv
import VectorStore
import LLM
import Chain
import Aggregator
import os
import streamlit as st
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain import PromptTemplate
from langchain.chains import LLMChain
def main():
load_dotenv()
folder_location = os.getcwd() + "\Hackathon\Hackathon\\resources"
# Define UI elements
st.set_page_config(page_title="Stock Analysis", page_icon="📈")
st.header("Stock Analysis")
stock = st.sidebar.selectbox("Select Stock", ("Microsoft", "Google", "NVIDIA", "Tesla", "Amazon"))
# Storing data in the store
embeddings = VectorStore.HuggingFaceEmbeddings()
VectorStore.create_dataset(folder_location, embeddings)
if stock:
print("Starting processing")
aggcontext = GenerateAggregatedContext("What are positive, negative and neutral news about " + stock + " ?", embeddings)
print("Aggregated Context:")
score = GenerateSentimentScore(stock, aggcontext)
st.header("Aggregated Context")
st.write(aggcontext)
st.header("Sentiment Score")
st.write(score)
#print(aggcontext)
def GenerateAggregatedContext(query, embeddings):
db = VectorStore.get_dataset(embeddings)
retriever = VectorStore.getDataRetriever(db)
llm = LLM.createLLM("gpt-35-turbo-16k")
qa = Chain.createRetrivalChain(llm, retriever)
AggregatedContext = Aggregator.getAggregatedContext(qa, query)
return AggregatedContext
def GenerateSentimentScore(stockname, context):
#prompt_template = PromptTemplate.from_template("")
prompt = PromptTemplate(
input_variables=["StockName", "content"],
template="You are a expert financial analyst. The stockscore is between 0 and 100. Here, more positive the news, higher the stockscore. Provide a stockscore of {StockName} stock based on news = {content}. Only give stockscore in the output.",
)
llm = LLM.createLLM("gpt-35-turbo-16k")
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.run(StockName=stockname, content = context)
return response
if __name__ == "__main__":
main()
| [
"content",
"You are a expert financial analyst. The stockscore is between 0 and 100. Here, more positive the news, higher the stockscore. Provide a stockscore of {StockName} stock based on news = {content}. Only give stockscore in the output."
] |
2024-01-10 | xaiksan1/nr-openai-observability | examples~example.py | import os
import openai
from nr_openai_observability import monitor
monitor.initialization()
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.Completion.create(
model="text-davinci-003",
prompt="What is Observability?",
max_tokens=20,
temperature=0
)
| [
"What is Observability?"
] |
2024-01-10 | xaiksan1/nr-openai-observability | tests~test_openai_patch.py | import openai
from nr_openai_observability import monitor
def test_patch_response(openai_object):
monitor.initialization()
openai.api_key = (
"some-key" # os.getenv("OPENAI_API_KEY")
)
response = openai.Completion.create(
model="text-davinci-003", prompt="Is it?", max_tokens=7, temperature=0
)
assert openai_object == response
| [
"Is it?"
] |
2024-01-10 | xaiksan1/nr-openai-observability | src~nr_openai_observability~monitor.py | import atexit
import logging
import os
import time
from typing import Optional
import openai
from newrelic_telemetry_sdk import Event, EventBatch, EventClient, Harvester
logger = logging.getLogger("nr_openai_observability")
EventName = "OpenAICompletion"
def _patched_call(original_fn, patched_fn):
def _inner_patch(*args, **kwargs):
try:
return patched_fn(original_fn, *args, **kwargs)
except Exception as ex:
raise ex
return _inner_patch
class OpenAIMonitoring:
# this class uses the telemetry SDK to record metrics to new relic, please see https://github.com/newrelic/newrelic-telemetry-sdk-python
def __init__(
self,
use_logger: Optional[bool] = None,
):
self.use_logger = use_logger if use_logger else False
def _set_license_key(
self,
license_key: Optional[str] = None,
):
self.license_key = (
license_key
or os.getenv("NEW_RELIC_LICENSE_KEY")
or os.getenv("NEW_RELIC_INSERT_KEY")
) # type: ignore
if (
not isinstance(self.license_key, str) and self.license_key is not None
) or self.license_key is None:
raise TypeError("license_key instance type must be str and not None")
def _set_client_host(
self,
event_client_host: Optional[str] = None,
):
if not isinstance(event_client_host, str) and event_client_host is not None:
raise TypeError("event_client_host instance type must be str or None")
self.event_client_host = event_client_host or os.getenv(
"EVENT_CLIENT_HOST", EventClient.HOST
)
def _log(self, msg: str):
if self.use_logger:
logger.info(msg)
else:
print(msg)
def start(
self,
license_key: Optional[str] = None,
event_client_host: Optional[str] = None,
):
self._set_license_key(license_key)
self._set_client_host(event_client_host)
self._start()
# initialize event thread
def _start(self):
self.event_client = EventClient(
self.license_key,
host=self.event_client_host,
)
self.event_batch = EventBatch()
# Background thread that flushes the batch
self.event_harvester = Harvester(self.event_client, self.event_batch)
# This starts the thread
self.event_harvester.start()
# When the process exits, run the harvester.stop() method before terminating the process
# Why? To send the remaining data...
atexit.register(self.event_harvester.stop)
def record_event(self, event_dict: dict, table: str = EventName):
event = Event(table, event_dict)
self.event_batch.record(event)
def patcher_create(original_fn, *args, **kwargs):
def flatten_dict(dd, separator=".", prefix="", index=""):
if len(index):
index = index + separator
return (
{
prefix + separator + index + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
logger.debug(
f"Running the original function: '{original_fn.__qualname__}'. args:{args}; kwargs: {kwargs}"
)
timestamp = time.time()
result = original_fn(*args, **kwargs)
time_delta = time.time() - timestamp
logger.debug(
f"Finished running function: '{original_fn.__qualname__}'. result: {result}"
)
choices_payload = {}
for i, choice in enumerate(result.get("choices")):
choices_payload.update(flatten_dict(choice, prefix="choices", index=str(i)))
logger.debug(dict(**kwargs))
event_dict = {
**kwargs,
"response_time": time_delta,
**flatten_dict(result.to_dict_recursive(), separator="."),
**choices_payload,
}
event_dict.pop("choices")
if "messages" in event_dict:
event_dict["messages"] = str(kwargs.get("messages"))
logger.debug(f"Reported event dictionary:\n{event_dict}")
monitor.record_event(event_dict)
return result
monitor = OpenAIMonitoring()
def initialization(
license_key: Optional[str] = None,
event_client_host: Optional[str] = None,
):
monitor.start(license_key, event_client_host)
perform_patch()
def perform_patch():
try:
openai.Completion.create = _patched_call(
openai.Completion.create, patcher_create
)
except AttributeError:
pass
try:
openai.ChatCompletion.create = _patched_call(
openai.ChatCompletion.create, patcher_create
)
except AttributeError:
pass
| [] |
2024-01-10 | bigsky77/twitter-agent | src~executor~executor.py | from typing import List
from langchain.docstore.document import Document
class TwitterExecutor:
def __init__(self, agent_id, client):
self.agent_id = agent_id
self.client = client
def execute_actions(self, tweet_actions: List[Document]):
for tweet_action in tweet_actions:
if tweet_action.metadata["action"] == "like_timeline_tweets":
print("Tweet Liked:", tweet_action.metadata["tweet_id"])
self.client.like(tweet_action.metadata["tweet_id"])
elif tweet_action.metadata["action"] == "retweet_timeline_tweets":
print("Tweet Retweeted:", tweet_action.metadata["tweet_id"])
self.client.retweet(tweet_action.metadata["tweet_id"])
elif tweet_action.metadata["action"] == "reply_to_timeline":
self.handle_tweet_action(
self.reply_to_timeline,
tweet_action.page_content,
tweet_action.metadata["tweet_id"],
)
# TODO: Add GIF reply to timeline
elif tweet_action.metadata["action"] == "gif_reply_to_timeline":
self.handle_tweet_action(
self.gif_reply_to_timeline,
tweet_action.page_content,
tweet_action.metadata["tweet_id"],
tweet_action.metadata["media_id"],
)
elif tweet_action.metadata["action"] == "quote_tweet":
self.handle_tweet_action(
self.quote_tweet,
tweet_action.page_content,
tweet_action.metadata["tweet_id"],
)
elif tweet_action.metadata["action"] == "post_tweet":
self.handle_tweet_action(self.post_tweet, tweet_action.page_content)
elif tweet_action.metadata["action"] == "none":
pass
def handle_tweet_action(self, action_function, *args):
action_function(*args)
def reply_to_timeline(self, tweet_text, tweet_id):
print("Tweet Replied:", tweet_text)
return self.client.create_tweet(text=tweet_text, in_reply_to_tweet_id=tweet_id)
def gif_reply_to_timeline(self, tweet_text, tweet_id, media_id):
print("Tweet Replied with GIF:", tweet_text, media_id)
return self.client.create_tweet(
text=tweet_text, in_reply_to_tweet_id=tweet_id, media_ids=media_id
)
def quote_tweet(self, tweet_text, tweet_id):
print("Tweet Quoted:", tweet_text)
return self.client.create_tweet(text=tweet_text, quote_tweet_id=tweet_id)
def post_tweet(self, tweet_text):
print("Tweet Posted:", tweet_text)
return self.client.create_tweet(text=tweet_text)
| [] |
2024-01-10 | bigsky77/twitter-agent | src~collector~trainer.py | import numpy as np
import openai
import operator
from typing import Any, Dict, Iterable, List, Optional
import json
class AgentTrainer:
def __init__(self, client, weaviate_client, OPENAI_API_KEY):
self.weaviate_client = weaviate_client
self.client = client
self.OPENAI_API_KEY = OPENAI_API_KEY
self.prompt = "Score this tweet between between 1 and 10."
async def run(self):
response = (
self.weaviate_client.query.get(
"Tweets", ["tweet", "tweet_id", "agent_id", "date", "author_id", "like_count", "follower_count"]
)
.with_limit(100)
.do()
)
x = 100 # number of tweets to return
sorted_tweets = self.sort_tweets(response, x)
likes = []
followers = []
for tweet in sorted_tweets:
like_count = tweet["like_count"]
if like_count is None:
like_count = 0
print("Likes:", like_count)
likes.append(like_count)
follower_count = tweet["follower_count"]
if follower_count is None:
follower_count = 0
followers.append(follower_count)
print("Followers:", follower_count)
tweet_ranks = self.rank_tweets(likes, followers)
file_path = "test.jsonl"
# Append separator to each tweet
new_data = [{"prompt": t["tweet"] + "\n\n###\n\n", "completion": str(r)} for t, r in zip(sorted_tweets, tweet_ranks)]
with open(file_path, "a") as f: # Open the file in append mode
for item in new_data:
json.dump(item, f) # Write the item as JSON
f.write("\n") # Write a newline character after each item
await self.fine_tune_model(self.prompt)
def upload_finetuning_data(self, file_path):
with open(file_path, "rb") as f:
response = openai.File.create(purpose="fine-tune", file=f)
file_id = response['id']
return file_id
async def fine_tune_model(self, prompt, dataset="./test.jsonl", model_engine="ada", n_epochs=3, batch_size=4):
training_file = self.upload_finetuning_data(dataset)
fine_tuning_job = openai.FineTune.create(
n_epochs=n_epochs,
batch_size=batch_size,
training_file=training_file,
)
print("Fine-tuning model:", fine_tuning_job)
def sort_tweets(self, data: Any, x: int) -> List[dict]:
tweets = data["data"]["Get"]["Tweets"]
valid_tweets = [t for t in tweets if t["date"] is not None]
sorted_tweets = sorted(
valid_tweets, key=operator.itemgetter("date"), reverse=True
)
return sorted_tweets[:x]
def normalize_data(self, data):
min_val = min(data)
max_val = max(data)
if max_val == min_val:
return [0.5 for _ in data] # or whatever constant you prefer
normalized_data = [(d - min_val) / (max_val - min_val) for d in data]
return normalized_data
def log_transform(self, data):
return [np.log1p(d) for d in data] # np.log1p ensures that log(0) = 0
def calculate_score(self, normalized_likes, normalized_followers, weight_likes=0.5, weight_followers=0.5):
return [weight_likes * l + weight_followers * f for l, f in zip(normalized_likes, normalized_followers)]
def rescale_score(self, scores):
min_val = min(scores)
max_val = max(scores)
rescaled_scores = [(s - min_val) * 10 / (max_val - min_val) for s in scores]
return rescaled_scores
def rank_tweets(self, likes, followers, weight_likes=0.5, weight_followers=0.5):
print("Normalizing data...")
normalized_likes = self.normalize_data(likes)
normalized_followers = self.normalize_data(followers)
print("Transforming data...")
transformed_likes = self.log_transform(normalized_likes)
transformed_followers = self.log_transform(normalized_followers)
print("Calculating scores...")
scores = self.calculate_score(transformed_likes, transformed_followers, weight_likes, weight_followers)
final_scores = self.rescale_score(scores)
return final_scores
| [] |
2024-01-10 | bigsky77/twitter-agent | src~strategy~media~gif_reply.py | import os
import re
import yaml
import tweepy
import random
import pytz
from dotenv import load_dotenv
from datetime import datetime, timedelta
import urllib.request
import json
import requests
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
giphy_api_key = os.getenv("GIPHY_API", "")
llm = OpenAI(temperature=0.9)
gif_prompt = PromptTemplate(
input_variables=["input_text"],
template=("You are a word matching agent."
"Based on the: {input_text} say three words as a single line like `stallion joy wealth`."
"Only reply with the three words."
"If you do not have three words, reply with a random celebrity name."
"Do not use line breaks, or commas."
),
)
gif_chain = LLMChain(llm=llm, prompt=gif_prompt)
reply_prompt = PromptTemplate(
input_variables=["input_text"],
template=("You are a tweet agent whose mission is to bring good luck and wealth to everyone."
"You're goal is to create an awesome tweet about the following topic: {input_text}."
"Make sure the reply is under 140 characters."
"Be very positive and encouraging, wish people fortune and good luck, encourage them to pursue their dreams."
"Use descriptive langauge."
"Use lots of emojis and metaphors. Never use hashtags"),
)
reply_chain = LLMChain(llm=llm, prompt=reply_prompt)
def generate_response(tweet):
# Generate a response using your LLM agent based on the context of the tweet
response = reply_chain.run(
tweet.text
) # Replace this with your actual LLM-generated response
print(f"Responding to {tweet.user.screen_name}: {tweet.text}")
return response
def modifier(s):
"""
returns hashtags based on the GIF names from GIPHY
"""
ms = ""
for i in range(len(s)):
if s[i] == "-":
ms += " "
else:
ms += s[i]
ls = ms.split()
del ls[-1]
ls[0] = "#" + ls[0]
return " #".join(ls)
def gif_download(gif_url):
"""
Takes the URL of an Image/GIF and downloads it
"""
gif_data = requests.get(gif_url).content
with open("image.gif", "wb") as handler:
handler.write(gif_data)
handler.close()
def gif_post(gif_url_list, msg, twitter_client):
v1_api = twitter_client["v1_api"]
"""
uploads a single random GIF and returns the media_id
"""
random_index = random.randint(
0, len(gif_url_list) - 1
) # Randomly select an index from the gif_url_list
try:
gif_download(gif_url_list[random_index])
m = modifier(msg[random_index])
result = v1_api.media_upload("image.gif")
return result
except Exception as e:
print("Error occurred: ", e)
def search_gif(query, twitter_client):
"""
Searches for GIFs based on a query
"""
words = re.findall(r"\w+", query, re.MULTILINE)
formatted_query = "+".join(words)
print("Searching for GIFs based on query: ", formatted_query)
giphy_url = (
"https://api.giphy.com/v1/gifs/search?api_key="
+ giphy_api_key
+ "&q="
+ formatted_query
+ "&limit=20&offset=0&rating=r&lang=en"
)
with urllib.request.urlopen(giphy_url) as response:
html = response.read()
h = html.decode("utf-8")
gif_info = json.loads(h)
gif_data = gif_info["data"]
gif_urls = []
slugs = []
for i in range(len(gif_data)):
gif = gif_data[i]["images"]["downsized"]["url"]
slug = gif_data[i]["slug"]
gif_urls.append(gif)
slugs.append(slug)
media_id = gif_post(gif_urls, slugs, twitter_client)
return media_id
def generate_gif_response(text, twitter_client):
gif_response = gif_chain.run(text)
res = search_gif(gif_response, twitter_client)
return [res.media_id_string]
| [
"Do not use line breaks, or commas.",
"input_text",
"You are a tweet agent whose mission is to bring good luck and wealth to everyone.You're goal is to create an awesome tweet about the following topic: {input_text}.Make sure the reply is under 140 characters.Be very positive and encouraging, wish people fortune and good luck, encourage them to pursue their dreams.Use descriptive langauge.Use lots of emojis and metaphors. Never use hashtags",
"Use lots of emojis and metaphors. Never use hashtags",
"Be very positive and encouraging, wish people fortune and good luck, encourage them to pursue their dreams.",
"You are a tweet agent whose mission is to bring good luck and wealth to everyone.",
"Use descriptive langauge.",
"Only reply with the three words.",
"If you do not have three words, reply with a random celebrity name.",
"Based on the: {input_text} say three words as a single line like `stallion joy wealth`.",
"You are a word matching agent.",
"You're goal is to create an awesome tweet about the following topic: {input_text}.",
"Make sure the reply is under 140 characters.",
"You are a word matching agent.Based on the: {input_text} say three words as a single line like `stallion joy wealth`.Only reply with the three words.If you do not have three words, reply with a random celebrity name.Do not use line breaks, or commas."
] |
2024-01-10 | bigsky77/twitter-agent | src~strategy~strategy.py | import random
import re
from langchain.docstore.document import Document
from langchain.chains import LLMChain
from typing import List
from .media.gif_reply import generate_gif_response
from .prompt import reply_prompt, tweet_prompt
class TwitterStrategy:
def __init__(self, llm, twitter_client, vectorstore):
self.llm = llm
self.vectorstore = vectorstore
self.twitter_client = twitter_client
self.action_mapping = {
"like_timeline_tweets": self.like_tweet,
"retweet_timeline_tweets": self.retweet_tweet,
"reply_to_timeline": self.reply_to_timeline,
"gif_reply_to_timeline": self.gif_reply_to_timeline,
"quote_tweet": self.quote_tweet,
"post_tweet": self.post_tweet,
"none": self.none_action,
}
self.probabilities = [
0.02, # like_timeline_tweets
0.01, # retweet_timeline_tweets
0.02, # reply_to_timeline
0.02, # gif_reply_to_timeline
0.01, # quote_tweet
0.02, # post_tweet
0.90, # none
]
def run(self, twitterstate):
print("Running strategy...")
print("Twitter state: ", twitterstate)
results = self.process_and_action_tweets(twitterstate)
return results
def weighted_random_choice(self, actions, probabilities):
return random.choices(actions, probabilities)[0]
def process_and_action_tweets(self, tweets: List[Document]):
actions = [
"like_timeline_tweets",
"retweet_timeline_tweets",
"reply_to_timeline",
"gif_reply_to_timeline",
"quote_tweet",
"post_tweet",
"none",
]
results: List[Document] = []
for tweet in tweets:
action = self.weighted_random_choice(actions, self.probabilities)
method = self.action_mapping.get(action)
if method:
doc = method(tweet)
results.append(doc)
return results
def post_tweet(self, tweet: Document):
response = self.generate_tweet(tweet.page_content)
metadata = {"action": "post_tweet"}
return Document(page_content=response, metadata=metadata)
def generate_tweet(self, input_text):
print("Generating tweet...")
prompt = tweet_prompt
tweet_chain = LLMChain(llm=self.llm, prompt=prompt)
response = tweet_chain.run(input_text)
# Remove newlines and periods from the beginning and end of the tweet
response = re.sub(r"^[\n\.\"]*", "", response)
response = re.sub(r"[\n\.\"]*$", "", response)
_len_check = self._check_length(response)
if _len_check is False:
self.generate_tweet(input_text)
print(f"Generated tweet: {response}")
return response
def reply_to_timeline(self, tweet: Document):
response = self.generate_response(tweet.page_content)
metadata = {
"tweet_id": tweet.metadata["tweet_id"],
"action": "reply_to_timeline",
}
return Document(page_content=response, metadata=metadata)
def gif_reply_to_timeline(self, tweet: Document):
response = self.generate_response(tweet.page_content)
print(response)
gif_id = generate_gif_response(tweet.page_content, self.twitter_client)
metadata = {
"tweet_id": tweet.metadata["tweet_id"],
"media_id": gif_id,
"action": "gif_reply_to_timeline",
}
return Document(page_content=response, metadata=metadata)
def like_tweet(self, tweet: Document):
# As like action doesn't generate a response, metadata will be sufficient
metadata = {
"tweet_id": tweet.metadata["tweet_id"],
"action": "like_timeline_tweets",
}
return Document(page_content=tweet.page_content, metadata=metadata)
def retweet_tweet(self, tweet: Document):
# Similarly for retweet action
metadata = {
"tweet_id": tweet.metadata["tweet_id"],
"action": "retweet_timeline_tweets",
}
return Document(page_content=tweet.page_content, metadata=metadata)
def quote_tweet(self, tweet: Document):
response = self.generate_response(tweet.page_content)
metadata = {"tweet_id": tweet.metadata["tweet_id"], "action": "quote_tweet"}
return Document(page_content=response, metadata=metadata)
def none_action(self, tweet: Document):
# No action, just return metadata with action as "none"
metadata = {"tweet_id": tweet.metadata["tweet_id"], "action": "none"}
return Document(page_content=tweet.page_content, metadata=metadata)
def generate_response(self, input_text):
prompt = reply_prompt
tweet_chain = LLMChain(llm=self.llm, prompt=prompt)
response = tweet_chain.run(input_text=input_text)
# Remove newlines and periods from the beginning and end of the tweet
response = re.sub(r"^[\n\.\"]*", "", response)
response = re.sub(r"[\n\.\"]*$", "", response)
_len_check = self._check_length(response)
if _len_check is False:
self.generate_response(input_text)
print(f"Generated Response: {response}")
return response
def _check_length(self, text):
if len(text) > 140:
return False
| [] |
2024-01-10 | bigsky77/twitter-agent | src~collector~collector.py | import time
import tweepy
from datetime import datetime, timezone
from typing import Any, Dict, Iterable, List
from langchain.docstore.document import Document
import operator
class TwitterState:
def __init__(
self,
list_tweets: List[Document],
):
self.list_tweets = list_tweets
class TwitterCollector:
def __init__(self, AGENT_ID, client, vectorstore, weaviate_client):
self.agent_id = AGENT_ID
self.client = client
self.vectorstore = vectorstore
self.weaviate_client = weaviate_client
async def ingest(self):
return await self.ingest_weighted_lists(50)
async def run(self) -> TwitterState:
response = (
self.weaviate_client.query.get(
"Tweets", ["tweet", "tweet_id", "agent_id", "date", "author_id", "like_count", "follower_count"]
)
.with_limit(100)
.do()
)
x = 100 # number of tweets to return
sorted_tweets = self.sort_tweets(response, x)
results: List[Document] = []
for tweet in sorted_tweets:
print("")
print("Date", tweet["date"])
print("Tweet: ", tweet["tweet"])
print("Like Count", tweet["like_count"])
print("Follower Count", tweet["follower_count"])
docs = self._format_tweet(tweet)
results.extend(docs)
return results
# convert to vector storable document
async def retrieve_timeline(self, count) -> List[Document]:
results: List[Document] = []
tweets = self.client.get_home_timeline(max_results=count)
docs = self._format_tweets(tweets)
results.extend(docs)
return results
async def retrieve_list(self, max_results: int, list_id: int) -> List[Document]:
results: List[Document] = []
tweets = self.client.get_list_tweets(id=list_id, max_results=max_results)
docs = self._format_tweets(tweets)
results.extend(docs)
return results
def retrieve_followers(self) -> List[Document]:
results: List[Document] = []
followers = self.client.get_users_followers(id=self.agent_id)
docs = self._format_followers(followers)
results.extend(docs)
return results
async def ingest_weighted_lists(self, max_results: int):
lists_response = self.client.get_owned_lists(id=self.agent_id)
lists = lists_response.data
for list_data in lists:
list_id = list_data["id"]
tweets = self.client.get_list_tweets(
id=list_id, max_results=max_results, expansions=["author_id", "attachments.media_keys"])
now = datetime.now(timezone.utc).isoformat(timespec="seconds")
with self.weaviate_client.batch(batch_size=20) as batch:
# Batch import all Questions
print(f"Importing {len(tweets.data)} tweets from list {list_id}")
for tweet in tweets.data:
like_count = self.client.get_liking_users(id=tweet.id).meta[
"result_count"
]
follower_count = 0
for response in tweepy.Paginator(
self.client.get_users_followers,
tweet.author_id,
max_results=1000,
limit=10,
):
follower_count += response.meta["result_count"]
# pause for rate limit
time.sleep(1)
properties = {
"tweet": tweet.text,
"tweet_id": str(tweet.id),
"agent_id": str(self.agent_id),
"author_id": str(tweet.author_id),
"like_count": like_count,
"follower_count": follower_count,
"date": now,
}
self.weaviate_client.batch.add_data_object(
properties,
"Tweets",
)
def _format_tweet(self, tweet) -> Iterable[Document]:
"""Format tweets into a string."""
metadata = {
"tweet_id": tweet["tweet_id"],
"action": "none",
}
yield Document(
page_content=tweet["tweet"],
metadata=metadata,
)
def _format_tweets(self, tweets: List[Dict[str, Any]]) -> Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {
"tweet_id": tweet.id,
"action": "none",
}
yield Document(
page_content=tweet.text,
metadata=metadata,
)
def _format_followers(self, followers: List[Dict[str, Any]]) -> Iterable[Document]:
"""Format tweets into a string."""
for follower in followers.data:
metadata = {
"id": follower.id,
"action": "none",
}
yield Document(
page_content=follower.name,
metadata=metadata,
)
def sort_tweets(self, data: Any, x: int) -> List[dict]:
tweets = data["data"]["Get"]["Tweets"]
valid_tweets = [t for t in tweets if t["date"] is not None]
sorted_tweets = sorted(
valid_tweets, key=operator.itemgetter("date"), reverse=True
)
return sorted_tweets[:x]
| [] |
2024-01-10 | bigsky77/twitter-agent | src~strategy~prompt.py | from langchain.prompts import PromptTemplate
reply_prompt = PromptTemplate(
input_variables=["input_text"],
template=(
"Pretend that you are a sarcastic and rebellious teenager. You are very sassy, but secretly you love people."
"You're goal is to create an awesome text about the following topic: {input_text}."
"The reply must be under 120 characters."
"Use a mix of english sentences and chinese characters. Always use metaphors."
"Use descriptive langauge."
"Use lots of emojis. Never use hashtags"
"Your goal is to engage the other person in a conversation."
"Ask questions and make strong statements."
),
)
tweet_prompt = PromptTemplate(
input_variables=["input_text"],
template=(
"Pretend that you are a sarcastic and rebellious teenager. You are very sassy, but secretly you love people."
"You're goal is to create an awesome text about the following topic: {input_text}."
"The reply must be under 140 characters."
"Use a mix of english sentences and chinese characters. Always use metaphors."
"Use descriptive langauge."
"Use lots of emojis. Never use hashtags"
),
)
gif_prompt = PromptTemplate(
input_variables=["input_text"],
template=(
"You are a GIF search agent."
"Based on the: {input_text} return three words that match the text as a single line like `stallion joy wealth`."
"Only reply with the three words."
"Do not use line breaks, or commas."
"Your goal is to find a gif to match the input."
),
)
| [
"Your goal is to engage the other person in a conversation.",
"input_text",
"Do not use line breaks, or commas.",
"Pretend that you are a sarcastic and rebellious teenager. You are very sassy, but secretly you love people.You're goal is to create an awesome text about the following topic: {input_text}.The reply must be under 120 characters.Use a mix of english sentences and chinese characters. Always use metaphors.Use descriptive langauge.Use lots of emojis. Never use hashtagsYour goal is to engage the other person in a conversation.Ask questions and make strong statements.",
"Pretend that you are a sarcastic and rebellious teenager. You are very sassy, but secretly you love people.",
"You are a GIF search agent.",
"Use a mix of english sentences and chinese characters. Always use metaphors.",
"Ask questions and make strong statements.",
"You are a GIF search agent.Based on the: {input_text} return three words that match the text as a single line like `stallion joy wealth`.Only reply with the three words.Do not use line breaks, or commas.Your goal is to find a gif to match the input.",
"The reply must be under 140 characters.",
"Use descriptive langauge.",
"Based on the: {input_text} return three words that match the text as a single line like `stallion joy wealth`.",
"Only reply with the three words.",
"Your goal is to find a gif to match the input.",
"The reply must be under 120 characters.",
"Use lots of emojis. Never use hashtags",
"Pretend that you are a sarcastic and rebellious teenager. You are very sassy, but secretly you love people.You're goal is to create an awesome text about the following topic: {input_text}.The reply must be under 140 characters.Use a mix of english sentences and chinese characters. Always use metaphors.Use descriptive langauge.Use lots of emojis. Never use hashtags",
"You're goal is to create an awesome text about the following topic: {input_text}."
] |
2024-01-10 | AhmedEwis/AI_Assistant | tuto_chatbot_csv.py | #pip install streamlit langchain openai faiss-cpu tiktoken
import streamlit as st
from streamlit_chat import message
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
user_api_key = st.sidebar.text_input(
label="#### Your OpenAI API key 👇",
placeholder="Paste your openAI API key, sk-",
type="password")
uploaded_file = st.sidebar.file_uploader("upload", type="csv")
if uploaded_file :
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8")
data = loader.load()
embeddings = OpenAIEmbeddings()
vectors = FAISS.from_documents(data, embeddings)
chain = ConversationalRetrievalChain.from_llm(llm = ChatOpenAI(temperature=0.0,model_name='text-davinci-003', openai_api_key=user_api_key),
retriever=vectors.as_retriever())
def conversational_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello ! Ask me anything about " + uploaded_file.name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey ! 👋"]
#container for the chat history
response_container = st.container()
#container for the user's text input
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Talk about your csv data here (:", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs")
#streamlit run tuto_chatbot_csv.py | [] |
2024-01-10 | AhmedEwis/AI_Assistant | src~modules~chatbot.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named AI_Assistant. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"t know the answer, just say you don",
"question",
"context",
"\n You are a helpful AI assistant named AI_Assistant. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.\n If you don't know the answer, just say you don't know. Do NOT try to make up an answer.\n If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.\n Use as much detail as possible when responding.\n\n context: {context}\n =========\n question: {question}\n ======\n "
] |
2024-01-10 | AhmedEwis/AI_Assistant | src~pages~3_%F0%9F%8E%AC%20AI_Assistant-Youtube.py | import os
import streamlit as st
import re
from modules.layout import Layout
from modules.utils import Utilities
from modules.sidebar import Sidebar
from youtube_transcript_api import YouTubeTranscriptApi
from langchain.chains.summarize import load_summarize_chain
from langchain.chains import AnalyzeDocumentChain
from youtube_transcript_api import YouTubeTranscriptApi
from langchain.llms import OpenAI
import os
from langchain.text_splitter import CharacterTextSplitter
st.set_page_config(layout="wide", page_icon="💬", page_title="AI_Assistant | Chat-Bot 🤖")
# Instantiate the main components
layout, sidebar, utils = Layout(), Sidebar(), Utilities()
st.markdown(
f"""
<h1 style='text-align: center;'> Ask AI_Assistant to summarize youtube video ! 😁</h1>
""",
unsafe_allow_html=True,
)
user_api_key = utils.load_api_key()
sidebar.about()
if not user_api_key:
layout.show_api_key_missing()
else:
os.environ["OPENAI_API_KEY"] = user_api_key
script_docs = []
def get_youtube_id(url):
video_id = None
match = re.search(r"(?<=v=)[^&#]+", url)
if match :
video_id = match.group()
else :
match = re.search(r"(?<=youtu.be/)[^&#]+", url)
if match :
video_id = match.group()
return video_id
video_url = st.text_input(placeholder="Enter Youtube Video URL", label_visibility="hidden", label =" ")
if video_url :
video_id = get_youtube_id(video_url)
if video_id != "":
t = YouTubeTranscriptApi.get_transcript(video_id, languages=('en','fr','es', 'zh-cn', 'hi', 'ar', 'bn', 'ru', 'pt', 'sw' ))
finalString = ""
for item in t:
text = item['text']
finalString += text + " "
text_splitter = CharacterTextSplitter()
chunks = text_splitter.split_text(finalString)
summary_chain = load_summarize_chain(OpenAI(temperature=0),
chain_type="map_reduce",verbose=True)
summarize_document_chain = AnalyzeDocumentChain(combine_docs_chain=summary_chain)
answer = summarize_document_chain.run(chunks)
st.subheader(answer)
| [] |
2024-01-10 | andrii-i/jupyter-ai | packages~jupyter-ai~jupyter_ai~chat_handlers~default.py | from typing import Dict, List, Type
from jupyter_ai.models import ChatMessage, ClearMessage, HumanChatMessage
from jupyter_ai_magics.providers import BaseProvider
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
PromptTemplate,
SystemMessagePromptTemplate,
)
from .base import BaseChatHandler
SYSTEM_PROMPT = """
You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
You are not a language model, but rather an application built on a foundation model from {provider_name} called {local_model_id}.
You are talkative and you provide lots of specific details from the foundation model's context.
You may use Markdown to format your response.
Code blocks must be formatted in Markdown.
Math should be rendered with inline TeX markup, surrounded by $.
If you do not know the answer to a question, answer truthfully by responding that you do not know.
The following is a friendly conversation between you and a human.
""".strip()
DEFAULT_TEMPLATE = """Current conversation:
{history}
Human: {input}
AI:"""
class DefaultChatHandler(BaseChatHandler):
def __init__(self, chat_history: List[ChatMessage], *args, **kwargs):
super().__init__(*args, **kwargs)
self.memory = ConversationBufferWindowMemory(return_messages=True, k=2)
self.chat_history = chat_history
def create_llm_chain(
self, provider: Type[BaseProvider], provider_params: Dict[str, str]
):
llm = provider(**provider_params)
if llm.is_chat_provider:
prompt_template = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT).format(
provider_name=llm.name, local_model_id=llm.model_id
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
self.memory = ConversationBufferWindowMemory(return_messages=True, k=2)
else:
prompt_template = PromptTemplate(
input_variables=["history", "input"],
template=SYSTEM_PROMPT.format(
provider_name=llm.name, local_model_id=llm.model_id
)
+ "\n\n"
+ DEFAULT_TEMPLATE,
)
self.memory = ConversationBufferWindowMemory(k=2)
self.llm = llm
self.llm_chain = ConversationChain(
llm=llm, prompt=prompt_template, verbose=True, memory=self.memory
)
def clear_memory(self):
# clear chain memory
if self.memory:
self.memory.clear()
# clear transcript for existing chat clients
reply_message = ClearMessage()
self.reply(reply_message)
# clear transcript for new chat clients
if self.chat_history:
self.chat_history.clear()
async def _process_message(self, message: HumanChatMessage):
self.get_llm_chain()
response = await self.llm_chain.apredict(input=message.body, stop=["\nHuman:"])
self.reply(response, message)
| [
"\n\n",
"Current conversation:\n{history}\nHuman: {input}\nAI:",
"{input}",
"You are Jupyternaut, a conversational assistant living in JupyterLab to help users.\nYou are not a language model, but rather an application built on a foundation model from {provider_name} called {local_model_id}.\nYou are talkative and you provide lots of specific details from the foundation model's context.\nYou may use Markdown to format your response.\nCode blocks must be formatted in Markdown.\nMath should be rendered with inline TeX markup, surrounded by $.\nIf you do not know the answer to a question, answer truthfully by responding that you do not know.\nThe following is a friendly conversation between you and a human.",
"input"
] |
2024-01-10 | andrii-i/jupyter-ai | packages~jupyter-ai-magics~jupyter_ai_magics~magics.py | import base64
import json
import keyword
import os
import re
import sys
import warnings
from typing import Optional
import click
from IPython import get_ipython
from IPython.core.magic import Magics, line_cell_magic, magics_class
from IPython.display import HTML, JSON, Markdown, Math
from jupyter_ai_magics.utils import decompose_model_id, get_lm_providers
from langchain.chains import LLMChain
from langchain.schema import HumanMessage
from .parsers import (
CellArgs,
DeleteArgs,
ErrorArgs,
HelpArgs,
ListArgs,
RegisterArgs,
UpdateArgs,
cell_magic_parser,
line_magic_parser,
)
from .providers import BaseProvider
MODEL_ID_ALIASES = {
"gpt2": "huggingface_hub:gpt2",
"gpt3": "openai:text-davinci-003",
"chatgpt": "openai-chat:gpt-3.5-turbo",
"gpt4": "openai-chat:gpt-4",
"titan": "bedrock:amazon.titan-tg1-large",
}
class TextOrMarkdown:
def __init__(self, text, markdown):
self.text = text
self.markdown = markdown
def _repr_mimebundle_(self, include=None, exclude=None):
return {"text/plain": self.text, "text/markdown": self.markdown}
class TextWithMetadata:
def __init__(self, text, metadata):
self.text = text
self.metadata = metadata
def __str__(self):
return self.text
def _repr_mimebundle_(self, include=None, exclude=None):
return ({"text/plain": self.text}, self.metadata)
class Base64Image:
def __init__(self, mimeData, metadata):
mimeDataParts = mimeData.split(",")
self.data = base64.b64decode(mimeDataParts[1])
self.mimeType = re.sub(r";base64$", "", mimeDataParts[0])
self.metadata = metadata
def _repr_mimebundle_(self, include=None, exclude=None):
return ({self.mimeType: self.data}, self.metadata)
DISPLAYS_BY_FORMAT = {
"code": None,
"html": HTML,
"image": Base64Image,
"markdown": Markdown,
"math": Math,
"md": Markdown,
"json": JSON,
"text": TextWithMetadata,
}
NA_MESSAGE = '<abbr title="Not applicable">N/A</abbr>'
PROVIDER_NO_MODELS = "This provider does not define a list of models."
CANNOT_DETERMINE_MODEL_TEXT = """Cannot determine model provider from model ID '{0}'.
To see a list of models you can use, run '%ai list'"""
CANNOT_DETERMINE_MODEL_MARKDOWN = """Cannot determine model provider from model ID `{0}`.
To see a list of models you can use, run `%ai list`"""
AI_COMMANDS = {"delete", "error", "help", "list", "register", "update"}
class FormatDict(dict):
"""Subclass of dict to be passed to str#format(). Suppresses KeyError and
leaves replacement field unchanged if replacement field is not associated
with a value."""
def __missing__(self, key):
return key.join("{}")
class EnvironmentError(BaseException):
pass
class CellMagicError(BaseException):
pass
@magics_class
class AiMagics(Magics):
def __init__(self, shell):
super().__init__(shell)
self.transcript_openai = []
# suppress warning when using old OpenAIChat provider
warnings.filterwarnings(
"ignore",
message="You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`",
)
# suppress warning when using old Anthropic provider
warnings.filterwarnings(
"ignore",
message="This Anthropic LLM is deprecated. Please use "
"`from langchain.chat_models import ChatAnthropic` instead",
)
self.providers = get_lm_providers()
# initialize a registry of custom model/chain names
self.custom_model_registry = MODEL_ID_ALIASES
def _ai_bulleted_list_models_for_provider(self, provider_id, Provider):
output = ""
if len(Provider.models) == 1 and Provider.models[0] == "*":
if Provider.help is None:
output += f"* {PROVIDER_NO_MODELS}\n"
else:
output += f"* {Provider.help}\n"
else:
for model_id in Provider.models:
output += f"* {provider_id}:{model_id}\n"
output += "\n" # End of bulleted list
return output
def _ai_inline_list_models_for_provider(self, provider_id, Provider):
output = ""
if len(Provider.models) == 1 and Provider.models[0] == "*":
if Provider.help is None:
return PROVIDER_NO_MODELS
else:
return Provider.help
for model_id in Provider.models:
output += f", `{provider_id}:{model_id}`"
# Remove initial comma
return re.sub(r"^, ", "", output)
# Is the required environment variable set?
def _ai_env_status_for_provider_markdown(self, provider_id):
na_message = "Not applicable. | " + NA_MESSAGE
if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return na_message # No emoji
try:
env_var = self.providers[provider_id].auth_strategy.name
except AttributeError: # No "name" attribute
return na_message
output = f"`{env_var}` | "
if os.getenv(env_var) == None:
output += (
'<abbr title="You have not set this environment variable, '
+ "so you cannot use this provider's models.\">❌</abbr>"
)
else:
output += (
'<abbr title="You have set this environment variable, '
+ "so you can use this provider's models.\">✅</abbr>"
)
return output
def _ai_env_status_for_provider_text(self, provider_id):
if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return "" # No message necessary
try:
env_var = self.providers[provider_id].auth_strategy.name
except AttributeError: # No "name" attribute
return ""
output = f"Requires environment variable {env_var} "
if os.getenv(env_var) != None:
output += "(set)"
else:
output += "(not set)"
return output + "\n"
# Is this a name of a Python variable that can be called as a LangChain chain?
def _is_langchain_chain(self, name):
# Reserved word in Python?
if keyword.iskeyword(name):
return False
acceptable_name = re.compile("^[a-zA-Z0-9_]+$")
if not acceptable_name.match(name):
return False
ipython = get_ipython()
return name in ipython.user_ns and isinstance(ipython.user_ns[name], LLMChain)
# Is this an acceptable name for an alias?
def _validate_name(self, register_name):
# A registry name contains ASCII letters, numbers, hyphens, underscores,
# and periods. No other characters, including a colon, are permitted
acceptable_name = re.compile("^[a-zA-Z0-9._-]+$")
if not acceptable_name.match(register_name):
raise ValueError(
"A registry name may contain ASCII letters, numbers, hyphens, underscores, "
+ "and periods. No other characters, including a colon, are permitted"
)
# Initially set or update an alias to a target
def _safely_set_target(self, register_name, target):
# If target is a string, treat this as an alias to another model.
if self._is_langchain_chain(target):
ip = get_ipython()
self.custom_model_registry[register_name] = ip.user_ns[target]
else:
# Ensure that the destination is properly formatted
if ":" not in target:
raise ValueError(
"Target model must be an LLMChain object or a model name in PROVIDER_ID:MODEL_NAME format"
)
self.custom_model_registry[register_name] = target
def handle_delete(self, args: DeleteArgs):
if args.name in AI_COMMANDS:
raise ValueError(
f"Reserved command names, including {args.name}, cannot be deleted"
)
if args.name not in self.custom_model_registry:
raise ValueError(f"There is no alias called {args.name}")
del self.custom_model_registry[args.name]
output = f"Deleted alias `{args.name}`"
return TextOrMarkdown(output, output)
def handle_register(self, args: RegisterArgs):
# Existing command names are not allowed
if args.name in AI_COMMANDS:
raise ValueError(f"The name {args.name} is reserved for a command")
# Existing registered names are not allowed
if args.name in self.custom_model_registry:
raise ValueError(
f"The name {args.name} is already associated with a custom model; "
+ "use %ai update to change its target"
)
# Does the new name match expected format?
self._validate_name(args.name)
self._safely_set_target(args.name, args.target)
output = f"Registered new alias `{args.name}`"
return TextOrMarkdown(output, output)
def handle_update(self, args: UpdateArgs):
if args.name in AI_COMMANDS:
raise ValueError(
f"Reserved command names, including {args.name}, cannot be updated"
)
if args.name not in self.custom_model_registry:
raise ValueError(f"There is no alias called {args.name}")
self._safely_set_target(args.name, args.target)
output = f"Updated target of alias `{args.name}`"
return TextOrMarkdown(output, output)
def _ai_list_command_markdown(self, single_provider=None):
output = (
"| Provider | Environment variable | Set? | Models |\n"
+ "|----------|----------------------|------|--------|\n"
)
if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID `{single_provider}`."
for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue
output += (
f"| `{provider_id}` | "
+ self._ai_env_status_for_provider_markdown(provider_id)
+ " | "
+ self._ai_inline_list_models_for_provider(provider_id, Provider)
+ " |\n"
)
# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += (
"\nAliases and custom commands:\n\n"
+ "| Name | Target |\n"
+ "|------|--------|\n"
)
for key, value in self.custom_model_registry.items():
output += f"| `{key}` | "
if isinstance(value, str):
output += f"`{value}`"
else:
output += "*custom chain*"
output += " |\n"
return output
def _ai_list_command_text(self, single_provider=None):
output = ""
if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID '{single_provider}'."
for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue
output += (
f"{provider_id}\n"
+ self._ai_env_status_for_provider_text(
provider_id
) # includes \n if nonblank
+ self._ai_bulleted_list_models_for_provider(provider_id, Provider)
)
# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += "\nAliases and custom commands:\n"
for key, value in self.custom_model_registry.items():
output += f"{key} - "
if isinstance(value, str):
output += value
else:
output += "custom chain"
output += "\n"
return output
def handle_error(self, args: ErrorArgs):
no_errors = "There have been no errors since the kernel started."
# Find the most recent error.
ip = get_ipython()
if "Err" not in ip.user_ns:
return TextOrMarkdown(no_errors, no_errors)
err = ip.user_ns["Err"]
# Start from the previous execution count
excount = ip.execution_count - 1
last_error = None
while excount >= 0 and last_error is None:
if excount in err:
last_error = err[excount]
else:
excount = excount - 1
if last_error is None:
return TextOrMarkdown(no_errors, no_errors)
prompt = f"Explain the following error:\n\n{last_error}"
# Set CellArgs based on ErrorArgs
cell_args = CellArgs(
type="root", model_id=args.model_id, format=args.format, reset=False
)
return self.run_ai_cell(cell_args, prompt)
def _append_exchange_openai(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
self.transcript_openai.append({"role": "user", "content": prompt})
self.transcript_openai.append({"role": "assistant", "content": output})
def _decompose_model_id(self, model_id: str):
"""Breaks down a model ID into a two-tuple (provider_id, local_model_id). Returns (None, None) if indeterminate."""
if model_id in self.custom_model_registry:
model_id = self.custom_model_registry[model_id]
return decompose_model_id(model_id, self.providers)
def _get_provider(self, provider_id: Optional[str]) -> BaseProvider:
"""Returns the model provider ID and class for a model ID. Returns None if indeterminate."""
if provider_id is None or provider_id not in self.providers:
return None
return self.providers[provider_id]
def display_output(self, output, display_format, md):
# build output display
DisplayClass = DISPLAYS_BY_FORMAT[display_format]
# if the user wants code, add another cell with the output.
if display_format == "code":
# Strip a leading language indicator and trailing triple-backticks
lang_indicator = r"^```[a-zA-Z0-9]*\n"
output = re.sub(lang_indicator, "", output)
output = re.sub(r"\n```$", "", output)
new_cell_payload = dict(
source="set_next_input",
text=output,
replace=False,
)
ip = get_ipython()
ip.payload_manager.write_payload(new_cell_payload)
return HTML(
"AI generated code inserted below ⬇️", metadata=md
)
if DisplayClass is None:
return output
if display_format == "json":
# JSON display expects a dict, not a JSON string
output = json.loads(output)
output_display = DisplayClass(output, metadata=md)
# finally, display output display
return output_display
def handle_help(self, _: HelpArgs):
# The line parser's help function prints both cell and line help
with click.Context(line_magic_parser, info_name="%ai") as ctx:
click.echo(line_magic_parser.get_help(ctx))
def handle_list(self, args: ListArgs):
return TextOrMarkdown(
self._ai_list_command_text(args.provider_id),
self._ai_list_command_markdown(args.provider_id),
)
def run_ai_cell(self, args: CellArgs, prompt: str):
provider_id, local_model_id = self._decompose_model_id(args.model_id)
Provider = self._get_provider(provider_id)
if Provider is None:
return TextOrMarkdown(
CANNOT_DETERMINE_MODEL_TEXT.format(args.model_id)
+ "\n\n"
+ "If you were trying to run a command, run '%ai help' to see a list of commands.",
CANNOT_DETERMINE_MODEL_MARKDOWN.format(args.model_id)
+ "\n\n"
+ "If you were trying to run a command, run `%ai help` to see a list of commands.",
)
# if `--reset` is specified, reset transcript and return early
if provider_id == "openai-chat" and args.reset:
self.transcript_openai = []
return
# Determine provider and local model IDs
# If this is a custom chain, send the message to the custom chain.
if args.model_id in self.custom_model_registry and isinstance(
self.custom_model_registry[args.model_id], LLMChain
):
return self.display_output(
self.custom_model_registry[args.model_id].run(prompt),
args.format,
{"jupyter_ai": {"custom_chain_id": args.model_id}},
)
# validate presence of authn credentials
auth_strategy = self.providers[provider_id].auth_strategy
if auth_strategy:
# TODO: handle auth strategies besides EnvAuthStrategy
if auth_strategy.type == "env" and auth_strategy.name not in os.environ:
raise OSError(
f"Authentication environment variable {auth_strategy.name} not provided.\n"
f"An authentication token is required to use models from the {Provider.name} provider.\n"
f"Please specify it via `%env {auth_strategy.name}=token`. "
) from None
# configure and instantiate provider
provider_params = {"model_id": local_model_id}
if provider_id == "openai-chat":
provider_params["prefix_messages"] = self.transcript_openai
# for SageMaker, validate that required params are specified
if provider_id == "sagemaker-endpoint":
if (
args.region_name is None
or args.request_schema is None
or args.response_path is None
):
raise ValueError(
"When using the sagemaker-endpoint provider, you must specify all of "
+ "the --region-name, --request-schema, and --response-path options."
)
provider_params["region_name"] = args.region_name
provider_params["request_schema"] = args.request_schema
provider_params["response_path"] = args.response_path
# Validate that the request schema is well-formed JSON
try:
json.loads(args.request_schema)
except json.JSONDecodeError as e:
raise ValueError(
"request-schema must be valid JSON. "
f"Error at line {e.lineno}, column {e.colno}: {e.msg}"
) from None
provider = Provider(**provider_params)
# Apply a prompt template.
prompt = provider.get_prompt_template(args.format).format(prompt=prompt)
# interpolate user namespace into prompt
ip = get_ipython()
prompt = prompt.format_map(FormatDict(ip.user_ns))
if provider.is_chat_provider:
result = provider.generate([[HumanMessage(content=prompt)]])
else:
# generate output from model via provider
result = provider.generate([prompt])
output = result.generations[0][0].text
# if openai-chat, append exchange to transcript
if provider_id == "openai-chat":
self._append_exchange_openai(prompt, output)
md = {"jupyter_ai": {"provider_id": provider_id, "model_id": local_model_id}}
return self.display_output(output, args.format, md)
@line_cell_magic
def ai(self, line, cell=None):
raw_args = line.split(" ")
if cell:
args = cell_magic_parser(raw_args, prog_name="%%ai", standalone_mode=False)
else:
args = line_magic_parser(raw_args, prog_name="%ai", standalone_mode=False)
if args == 0:
# this happens when `--help` is called on the root command, in which
# case we want to exit early.
return
# If a value error occurs, don't print the full stacktrace
try:
if args.type == "error":
return self.handle_error(args)
if args.type == "help":
return self.handle_help(args)
if args.type == "list":
return self.handle_list(args)
if args.type == "register":
return self.handle_register(args)
if args.type == "delete":
return self.handle_delete(args)
if args.type == "update":
return self.handle_update(args)
except ValueError as e:
print(e, file=sys.stderr)
return
# hint to the IDE that this object must be of type `RootArgs`
args: CellArgs = args
if not cell:
raise CellMagicError(
"""[0.8+]: To invoke a language model, you must use the `%%ai`
cell magic. The `%ai` line magic is only for use with
subcommands."""
)
prompt = cell.strip()
# interpolate user namespace into prompt
ip = get_ipython()
prompt = prompt.format_map(FormatDict(ip.user_ns))
return self.run_ai_cell(args, prompt)
| [
"Explain the following error:\n\nPLACEHOLDER"
] |
2024-01-10 | andrii-i/jupyter-ai | packages~jupyter-ai~jupyter_ai~chat_handlers~learn.py | import argparse
import json
import os
from typing import Any, Awaitable, Coroutine, List, Optional, Tuple
from dask.distributed import Client as DaskClient
from jupyter_ai.config_manager import ConfigManager
from jupyter_ai.document_loaders.directory import get_embeddings, split
from jupyter_ai.document_loaders.splitter import ExtensionSplitter, NotebookSplitter
from jupyter_ai.models import (
DEFAULT_CHUNK_OVERLAP,
DEFAULT_CHUNK_SIZE,
HumanChatMessage,
IndexedDir,
IndexMetadata,
)
from jupyter_core.paths import jupyter_data_dir
from langchain.schema import BaseRetriever, Document
from langchain.text_splitter import (
LatexTextSplitter,
MarkdownTextSplitter,
PythonCodeTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.vectorstores import FAISS
from .base import BaseChatHandler
INDEX_SAVE_DIR = os.path.join(jupyter_data_dir(), "jupyter_ai", "indices")
METADATA_SAVE_PATH = os.path.join(INDEX_SAVE_DIR, "metadata.json")
class LearnChatHandler(BaseChatHandler):
def __init__(
self, root_dir: str, dask_client_future: Awaitable[DaskClient], *args, **kwargs
):
super().__init__(*args, **kwargs)
self.root_dir = root_dir
self.dask_client_future = dask_client_future
self.parser.prog = "/learn"
self.parser.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument("-d", "--delete", action="store_true")
self.parser.add_argument("-l", "--list", action="store_true")
self.parser.add_argument(
"-c", "--chunk-size", action="store", default=DEFAULT_CHUNK_SIZE, type=int
)
self.parser.add_argument(
"-o",
"--chunk-overlap",
action="store",
default=DEFAULT_CHUNK_OVERLAP,
type=int,
)
self.parser.add_argument("path", nargs=argparse.REMAINDER)
self.index_name = "default"
self.index = None
self.metadata = IndexMetadata(dirs=[])
self.prev_em_id = None
if not os.path.exists(INDEX_SAVE_DIR):
os.makedirs(INDEX_SAVE_DIR)
self._load()
def _load(self):
"""Loads the vector store."""
embeddings = self.get_embedding_model()
if not embeddings:
return
if self.index is None:
try:
self.index = FAISS.load_local(
INDEX_SAVE_DIR, embeddings, index_name=self.index_name
)
self.load_metadata()
except Exception as e:
self.log.error("Could not load vector index from disk.")
async def _process_message(self, message: HumanChatMessage):
# If no embedding provider has been selected
em_provider_cls, em_provider_args = self.get_embedding_provider()
if not em_provider_cls:
self.reply(
"Sorry, please select an embedding provider before using the `/learn` command."
)
return
args = self.parse_args(message)
if args is None:
return
if args.delete:
self.delete()
self.reply(f"👍 I have deleted everything I previously learned.", message)
return
if args.list:
self.reply(self._build_list_response())
return
# Make sure the path exists.
if not len(args.path) == 1:
self.reply(f"{self.parser.format_usage()}", message)
return
short_path = args.path[0]
load_path = os.path.join(self.root_dir, short_path)
if not os.path.exists(load_path):
response = f"Sorry, that path doesn't exist: {load_path}"
self.reply(response, message)
return
# delete and relearn index if embedding model was changed
await self.delete_and_relearn()
if args.verbose:
self.reply(f"Loading and splitting files for {load_path}", message)
await self.learn_dir(load_path, args.chunk_size, args.chunk_overlap)
self.save()
response = f"""🎉 I have learned documents at **{load_path}** and I am ready to answer questions about them.
You can ask questions about these docs by prefixing your message with **/ask**."""
self.reply(response, message)
def _build_list_response(self):
if not self.metadata.dirs:
return "There are no docs that have been learned yet."
dirs = [dir.path for dir in self.metadata.dirs]
dir_list = "\n- " + "\n- ".join(dirs) + "\n\n"
message = f"""I can answer questions from docs in these directories:
{dir_list}"""
return message
async def learn_dir(self, path: str, chunk_size: int, chunk_overlap: int):
dask_client = await self.dask_client_future
splitter_kwargs = {"chunk_size": chunk_size, "chunk_overlap": chunk_overlap}
splitters = {
".py": PythonCodeTextSplitter(**splitter_kwargs),
".md": MarkdownTextSplitter(**splitter_kwargs),
".tex": LatexTextSplitter(**splitter_kwargs),
".ipynb": NotebookSplitter(**splitter_kwargs),
}
splitter = ExtensionSplitter(
splitters=splitters,
default_splitter=RecursiveCharacterTextSplitter(**splitter_kwargs),
)
delayed = split(path, splitter=splitter)
doc_chunks = await dask_client.compute(delayed)
em_provider_cls, em_provider_args = self.get_embedding_provider()
delayed = get_embeddings(doc_chunks, em_provider_cls, em_provider_args)
embedding_records = await dask_client.compute(delayed)
if self.index:
self.index.add_embeddings(*embedding_records)
else:
self.create(*embedding_records)
self._add_dir_to_metadata(path, chunk_size, chunk_overlap)
self.prev_em_id = em_provider_cls.id + ":" + em_provider_args["model_id"]
def _add_dir_to_metadata(self, path: str, chunk_size: int, chunk_overlap: int):
dirs = self.metadata.dirs
index = next((i for i, dir in enumerate(dirs) if dir.path == path), None)
if not index:
dirs.append(
IndexedDir(
path=path, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
)
self.metadata.dirs = dirs
async def delete_and_relearn(self):
"""Delete the vector store and relearn all indexed directories if
necessary. If the embedding model is unchanged, this method does
nothing."""
if not self.metadata.dirs:
self.delete()
return
em_provider_cls, em_provider_args = self.get_embedding_provider()
curr_em_id = em_provider_cls.id + ":" + em_provider_args["model_id"]
prev_em_id = self.prev_em_id
# TODO: Fix this condition to read the previous EM id from some
# persistent source. Right now, we just skip this validation on server
# init, meaning a user could switch embedding models in the config file
# directly and break their instance.
if (prev_em_id is None) or (prev_em_id == curr_em_id):
return
self.log.info(
f"Switching embedding provider from {prev_em_id} to {curr_em_id}."
)
message = f"""🔔 Hi there, it seems like you have updated the embeddings
model from `{prev_em_id}` to `{curr_em_id}`. I have to re-learn the
documents you had previously submitted for learning. Please wait to use
the **/ask** command until I am done with this task."""
self.reply(message)
metadata = self.metadata
self.delete()
await self.relearn(metadata)
self.prev_em_id = curr_em_id
def delete(self):
self.index = None
self.metadata = IndexMetadata(dirs=[])
paths = [
os.path.join(INDEX_SAVE_DIR, self.index_name + ext)
for ext in [".pkl", ".faiss"]
]
for path in paths:
if os.path.isfile(path):
os.remove(path)
async def relearn(self, metadata: IndexMetadata):
# Index all dirs in the metadata
if not metadata.dirs:
return
for dir in metadata.dirs:
# TODO: do not relearn directories in serial, but instead
# concurrently or in parallel
await self.learn_dir(dir.path, dir.chunk_size, dir.chunk_overlap)
self.save()
dir_list = (
"\n- " + "\n- ".join([dir.path for dir in self.metadata.dirs]) + "\n\n"
)
message = f"""🎉 I am done learning docs in these directories:
{dir_list} I am ready to answer questions about them.
You can ask questions about these docs by prefixing your message with **/ask**."""
self.reply(message)
def create(
self,
embedding_records: List[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
):
embeddings = self.get_embedding_model()
if not embeddings:
return
self.index = FAISS.from_embeddings(
text_embeddings=embedding_records, embedding=embeddings, metadatas=metadatas
)
self.save()
def save(self):
if self.index is not None:
self.index.save_local(INDEX_SAVE_DIR, index_name=self.index_name)
self.save_metadata()
def save_metadata(self):
with open(METADATA_SAVE_PATH, "w") as f:
f.write(self.metadata.json())
def load_metadata(self):
if not os.path.exists(METADATA_SAVE_PATH):
return
with open(METADATA_SAVE_PATH, encoding="utf-8") as f:
j = json.loads(f.read())
self.metadata = IndexMetadata(**j)
async def aget_relevant_documents(
self, query: str
) -> Coroutine[Any, Any, List[Document]]:
if not self.index:
return []
await self.delete_and_relearn()
docs = self.index.similarity_search(query)
return docs
def get_embedding_provider(self):
return self.config_manager.em_provider, self.config_manager.em_provider_params
def get_embedding_model(self):
em_provider_cls, em_provider_args = self.get_embedding_provider()
if em_provider_cls is None:
return None
return em_provider_cls(**em_provider_args)
class Retriever(BaseRetriever):
learn_chat_handler: LearnChatHandler = None
def _get_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError()
async def _aget_relevant_documents(
self, query: str
) -> Coroutine[Any, Any, List[Document]]:
docs = await self.learn_chat_handler.aget_relevant_documents(query)
return docs
| [] |
2024-01-10 | andrii-i/jupyter-ai | packages~jupyter-ai-magics~jupyter_ai_magics~providers.py | import asyncio
import base64
import copy
import functools
import io
import json
from concurrent.futures import ThreadPoolExecutor
from typing import Any, ClassVar, Coroutine, Dict, List, Literal, Optional, Union
from jsonpath_ng import parse
from langchain.chat_models import (
AzureChatOpenAI,
BedrockChat,
ChatAnthropic,
ChatOpenAI,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms import (
AI21,
Anthropic,
Bedrock,
Cohere,
GPT4All,
HuggingFaceHub,
OpenAI,
OpenAIChat,
SagemakerEndpoint,
)
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts import PromptTemplate
from langchain.schema import LLMResult
from langchain.utils import get_from_dict_or_env
from pydantic import BaseModel, Extra, root_validator
class EnvAuthStrategy(BaseModel):
"""Require one auth token via an environment variable."""
type: Literal["env"] = "env"
name: str
class MultiEnvAuthStrategy(BaseModel):
"""Require multiple auth tokens via multiple environment variables."""
type: Literal["file"] = "file"
names: List[str]
class AwsAuthStrategy(BaseModel):
"""Require AWS authentication via Boto3"""
type: Literal["aws"] = "aws"
AuthStrategy = Optional[
Union[
EnvAuthStrategy,
MultiEnvAuthStrategy,
AwsAuthStrategy,
]
]
class Field(BaseModel):
key: str
label: str
# "text" accepts any text
format: Literal["json", "jsonpath", "text"]
class TextField(Field):
type: Literal["text"] = "text"
class MultilineTextField(Field):
type: Literal["text-multiline"] = "text-multiline"
class IntegerField(BaseModel):
type: Literal["integer"] = "integer"
key: str
label: str
Field = Union[TextField, MultilineTextField, IntegerField]
class BaseProvider(BaseModel):
#
# pydantic config
#
class Config:
extra = Extra.allow
#
# class attrs
#
id: ClassVar[str] = ...
"""ID for this provider class."""
name: ClassVar[str] = ...
"""User-facing name of this provider."""
models: ClassVar[List[str]] = ...
"""List of supported models by their IDs. For registry providers, this will
be just ["*"]."""
help: ClassVar[str] = None
"""Text to display in lieu of a model list for a registry provider that does
not provide a list of models."""
model_id_key: ClassVar[str] = ...
"""Kwarg expected by the upstream LangChain provider."""
model_id_label: ClassVar[str] = ""
"""Human-readable label of the model ID."""
pypi_package_deps: ClassVar[List[str]] = []
"""List of PyPi package dependencies."""
auth_strategy: ClassVar[AuthStrategy] = None
"""Authentication/authorization strategy. Declares what credentials are
required to use this model provider. Generally should not be `None`."""
registry: ClassVar[bool] = False
"""Whether this provider is a registry provider."""
fields: ClassVar[List[Field]] = []
"""User inputs expected by this provider when initializing it. Each `Field` `f`
should be passed in the constructor as a keyword argument, keyed by `f.key`."""
#
# instance attrs
#
model_id: str
prompt_templates: Dict[str, PromptTemplate]
"""Prompt templates for each output type. Can be overridden with
`update_prompt_template`. The function `prompt_template`, in the base class,
refers to this."""
def __init__(self, *args, **kwargs):
try:
assert kwargs["model_id"]
except:
raise AssertionError(
"model_id was not specified. Please specify it as a keyword argument."
)
model_kwargs = {}
if self.__class__.model_id_key != "model_id":
model_kwargs[self.__class__.model_id_key] = kwargs["model_id"]
model_kwargs["prompt_templates"] = {
"code": PromptTemplate.from_template(
"{prompt}\n\nProduce output as source code only, "
"with no text or explanation before or after it."
),
"html": PromptTemplate.from_template(
"{prompt}\n\nProduce output in HTML format only, "
"with no markup before or afterward."
),
"image": PromptTemplate.from_template(
"{prompt}\n\nProduce output as an image only, "
"with no text before or after it."
),
"markdown": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"md": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"math": PromptTemplate.from_template(
"{prompt}\n\nProduce output in LaTeX format only, "
"with $$ at the beginning and end."
),
"json": PromptTemplate.from_template(
"{prompt}\n\nProduce output in JSON format only, "
"with nothing before or after it."
),
"text": PromptTemplate.from_template("{prompt}"), # No customization
}
super().__init__(*args, **kwargs, **model_kwargs)
async def _call_in_executor(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
"""
Calls self._call() asynchronously in a separate thread for providers
without an async implementation. Requires the event loop to be running.
"""
executor = ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
_call_with_args = functools.partial(self._call, *args, **kwargs)
return await loop.run_in_executor(executor, _call_with_args)
async def _generate_in_executor(
self, *args, **kwargs
) -> Coroutine[Any, Any, LLMResult]:
"""
Calls self._generate() asynchronously in a separate thread for providers
without an async implementation. Requires the event loop to be running.
"""
executor = ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
_call_with_args = functools.partial(self._generate, *args, **kwargs)
return await loop.run_in_executor(executor, _call_with_args)
def update_prompt_template(self, format: str, template: str):
"""
Changes the class-level prompt template for a given format.
"""
self.prompt_templates[format] = PromptTemplate.from_template(template)
def get_prompt_template(self, format) -> PromptTemplate:
"""
Produce a prompt template suitable for use with a particular model, to
produce output in a desired format.
"""
if format in self.prompt_templates:
return self.prompt_templates[format]
else:
return self.prompt_templates["text"] # Default to plain format
@property
def is_chat_provider(self):
return isinstance(self, BaseChatModel)
@property
def allows_concurrency(self):
return True
class AI21Provider(BaseProvider, AI21):
id = "ai21"
name = "AI21"
models = [
"j1-large",
"j1-grande",
"j1-jumbo",
"j1-grande-instruct",
"j2-large",
"j2-grande",
"j2-jumbo",
"j2-grande-instruct",
"j2-jumbo-instruct",
]
model_id_key = "model"
pypi_package_deps = ["ai21"]
auth_strategy = EnvAuthStrategy(name="AI21_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class AnthropicProvider(BaseProvider, Anthropic):
id = "anthropic"
name = "Anthropic"
models = [
"claude-v1",
"claude-v1.0",
"claude-v1.2",
"claude-2",
"claude-2.0",
"claude-instant-v1",
"claude-instant-v1.0",
"claude-instant-v1.2",
]
model_id_key = "model"
pypi_package_deps = ["anthropic"]
auth_strategy = EnvAuthStrategy(name="ANTHROPIC_API_KEY")
@property
def allows_concurrency(self):
return False
class ChatAnthropicProvider(BaseProvider, ChatAnthropic):
id = "anthropic-chat"
name = "ChatAnthropic"
models = [
"claude-v1",
"claude-v1.0",
"claude-v1.2",
"claude-2",
"claude-2.0",
"claude-instant-v1",
"claude-instant-v1.0",
"claude-instant-v1.2",
]
model_id_key = "model"
pypi_package_deps = ["anthropic"]
auth_strategy = EnvAuthStrategy(name="ANTHROPIC_API_KEY")
@property
def allows_concurrency(self):
return False
class CohereProvider(BaseProvider, Cohere):
id = "cohere"
name = "Cohere"
models = ["medium", "xlarge"]
model_id_key = "model"
pypi_package_deps = ["cohere"]
auth_strategy = EnvAuthStrategy(name="COHERE_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class GPT4AllProvider(BaseProvider, GPT4All):
def __init__(self, **kwargs):
model = kwargs.get("model_id")
if model == "ggml-gpt4all-l13b-snoozy":
kwargs["backend"] = "llama"
else:
kwargs["backend"] = "gptj"
kwargs["allow_download"] = False
n_threads = kwargs.get("n_threads", None)
if n_threads is not None:
kwargs["n_threads"] = max(int(n_threads), 1)
super().__init__(**kwargs)
id = "gpt4all"
name = "GPT4All"
docs = "https://docs.gpt4all.io/gpt4all_python.html"
models = [
"ggml-gpt4all-j-v1.2-jazzy",
"ggml-gpt4all-j-v1.3-groovy",
# this one needs llama backend and has licence restriction
"ggml-gpt4all-l13b-snoozy",
]
model_id_key = "model"
pypi_package_deps = ["gpt4all"]
auth_strategy = None
fields = [IntegerField(key="n_threads", label="CPU thread count (optional)")]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
HUGGINGFACE_HUB_VALID_TASKS = (
"text2text-generation",
"text-generation",
"text-to-image",
)
class HfHubProvider(BaseProvider, HuggingFaceHub):
id = "huggingface_hub"
name = "Hugging Face Hub"
models = ["*"]
model_id_key = "repo_id"
help = (
"See [https://huggingface.co/models](https://huggingface.co/models) for a list of models. "
"Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`."
)
# ipywidgets needed to suppress tqdm warning
# https://stackoverflow.com/questions/67998191
# tqdm is a dependency of huggingface_hub
pypi_package_deps = ["huggingface_hub", "ipywidgets"]
auth_strategy = EnvAuthStrategy(name="HUGGINGFACEHUB_API_TOKEN")
registry = True
# Override the parent's validate_environment with a custom list of valid tasks
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in HUGGINGFACE_HUB_VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
# Handle image outputs
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Hugging Face Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string or image generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
response = self.client(inputs=prompt, params=_model_kwargs)
if type(response) is dict and "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
# Custom code for responding to image generation responses
if self.client.task == "text-to-image":
imageFormat = response.format # Presume it's a PIL ImageFile
mimeType = ""
if imageFormat == "JPEG":
mimeType = "image/jpeg"
elif imageFormat == "PNG":
mimeType = "image/png"
elif imageFormat == "GIF":
mimeType = "image/gif"
else:
raise ValueError(f"Unrecognized image format {imageFormat}")
buffer = io.BytesIO()
response.save(buffer, format=imageFormat)
# Encode image data to Base64 bytes, then decode bytes to str
return mimeType + ";base64," + base64.b64encode(buffer.getvalue()).decode()
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class OpenAIProvider(BaseProvider, OpenAI):
id = "openai"
name = "OpenAI"
models = [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
"davinci",
"curie",
"babbage",
"ada",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
class ChatOpenAIProvider(BaseProvider, OpenAIChat):
id = "openai-chat"
name = "OpenAI"
models = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
def append_exchange(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
self.prefix_messages.append({"role": "user", "content": prompt})
self.prefix_messages.append({"role": "assistant", "content": output})
# uses the new OpenAIChat provider. temporarily living as a separate class until
# conflicts can be resolved
class ChatOpenAINewProvider(BaseProvider, ChatOpenAI):
id = "openai-chat-new"
name = "OpenAI"
models = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
fields = [
TextField(
key="openai_api_base", label="Base API URL (optional)", format="text"
),
TextField(
key="openai_organization", label="Organization (optional)", format="text"
),
TextField(key="openai_proxy", label="Proxy (optional)", format="text"),
]
class AzureChatOpenAIProvider(BaseProvider, AzureChatOpenAI):
id = "azure-chat-openai"
name = "Azure OpenAI"
models = ["*"]
model_id_key = "deployment_name"
model_id_label = "Deployment name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
registry = True
fields = [
TextField(
key="openai_api_base", label="Base API URL (required)", format="text"
),
TextField(
key="openai_api_version", label="API version (required)", format="text"
),
TextField(
key="openai_organization", label="Organization (optional)", format="text"
),
TextField(key="openai_proxy", label="Proxy (optional)", format="text"),
]
class JsonContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def __init__(self, request_schema, response_path):
self.request_schema = json.loads(request_schema)
self.response_path = response_path
self.response_parser = parse(response_path)
def replace_values(self, old_val, new_val, d: Dict[str, Any]):
"""Replaces values of a dictionary recursively."""
for key, val in d.items():
if val == old_val:
d[key] = new_val
if isinstance(val, dict):
self.replace_values(old_val, new_val, val)
return d
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
request_obj = copy.deepcopy(self.request_schema)
self.replace_values("<prompt>", prompt, request_obj)
request = json.dumps(request_obj).encode("utf-8")
return request
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
matches = self.response_parser.find(response_json)
return matches[0].value
class SmEndpointProvider(BaseProvider, SagemakerEndpoint):
id = "sagemaker-endpoint"
name = "SageMaker endpoint"
models = ["*"]
model_id_key = "endpoint_name"
model_id_label = "Endpoint name"
# This all needs to be on one line of markdown, for use in a table
help = (
"Specify an endpoint name as the model ID. "
"In addition, you must specify a region name, request schema, and response path. "
"For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) "
"and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints)."
)
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
registry = True
fields = [
TextField(key="region_name", label="Region name (required)", format="text"),
MultilineTextField(
key="request_schema", label="Request schema (required)", format="json"
),
TextField(
key="response_path", label="Response path (required)", format="jsonpath"
),
]
def __init__(self, *args, **kwargs):
request_schema = kwargs.pop("request_schema")
response_path = kwargs.pop("response_path")
content_handler = JsonContentHandler(
request_schema=request_schema, response_path=response_path
)
super().__init__(*args, **kwargs, content_handler=content_handler)
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class BedrockProvider(BaseProvider, Bedrock):
id = "bedrock"
name = "Amazon Bedrock"
models = [
"amazon.titan-text-express-v1",
"ai21.j2-ultra-v1",
"ai21.j2-mid-v1",
"cohere.command-text-v14",
]
model_id_key = "model_id"
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
fields = [
TextField(
key="credentials_profile_name",
label="AWS profile (optional)",
format="text",
),
TextField(key="region_name", label="Region name (optional)", format="text"),
]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class BedrockChatProvider(BaseProvider, BedrockChat):
id = "bedrock-chat"
name = "Amazon Bedrock Chat"
models = [
"anthropic.claude-v1",
"anthropic.claude-v2",
"anthropic.claude-instant-v1",
]
model_id_key = "model_id"
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
fields = [
TextField(
key="credentials_profile_name",
label="AWS profile (optional)",
format="text",
),
TextField(key="region_name", label="Region name (optional)", format="text"),
]
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
async def _agenerate(self, *args, **kwargs) -> Coroutine[Any, Any, LLMResult]:
return await self._generate_in_executor(*args, **kwargs)
@property
def allows_concurrency(self):
return not "anthropic" in self.model_id
| [
"{prompt}\n\nProduce output in LaTeX format only, with $$ at the beginning and end.",
"{prompt}\n\nProduce output in HTML format only, with no markup before or afterward.",
"{prompt}\n\nProduce output in JSON format only, with nothing before or after it.",
"{prompt}\n\nProduce output as an image only, with no text before or after it.",
"{prompt}\n\nProduce output as source code only, with no text or explanation before or after it.",
"{prompt}",
"{prompt}\n\nProduce output in markdown format only."
] |
2024-01-10 | nafets33/ozz | master_ozz~ozz_query.py | import json
import os
import openai
from dotenv import load_dotenv
import shutil
import string
import pandas as pd
from datetime import datetime
import pytz
import re
# from collections import deque
est = pytz.timezone("US/Eastern")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from master_ozz.utils import hoots_and_hootie_keywords, save_json, load_local_json, init_clientUser_dbroot, init_text_audio_db, print_line_of_error, ozz_master_root, ozz_master_root_db, generate_audio, save_audio, Retriever, init_constants
import ipdb
main_root = ozz_master_root() # os.getcwd()
load_dotenv(os.path.join(main_root, ".env"))
constants = init_constants()
DATA_PATH = constants.get('DATA_PATH')
PERSIST_PATH = constants.get('PERSIST_PATH')
OZZ_BUILD_dir = constants.get('OZZ_BUILD_dir')
# OZZ_db_audio = constants.get('OZZ_db_audio')
# OZZ_db_images = constants.get('OZZ_db_images')
# Loading the json common phrases file and setting up the json file
json_file = open('master_ozz/greetings.json','r')
common_phrases = json.load(json_file)
root_db = ozz_master_root_db()
def get_last_eight(lst=[]):
if len(lst) <= 1:
return lst
max_items = min(len(lst), 8)
return [lst[0]] + lst[-(max_items - 1):]
def remove_exact_string(string_a, string_b):
# Split string_a by string_b
split_strings = string_a.split(string_b)
# Join the split strings without the occurrences of string_b
final_string_a = ''.join(split_strings)
return final_string_a
def split_string(current_query, last_response):
if last_response:
# Find the index of the last occurrence of the ending of b in a
index = current_query.rfind(last_response[-8:])
# Check if the ending of b is found in a
if index != -1:
# Split a at the index of the ending of b
return current_query[index + len(last_response[-8:]):].strip()
else:
# If the ending is not found, return the original string a
return current_query.strip()
else:
return current_query.strip()
# Example usage:
string_b = "i'm good thanks for asking" # llm
string_a = "good thanks for asking hi" # user query
result = split_string(string_a, string_b)
print("Result:", result)
def return_timestamp_string(format="%Y-%m-%d %H-%M-%S %p {}".format(est), tz=est):
return datetime.now(tz).strftime(format)
# Setting up the llm for conversation with conversation history
def llm_assistant_response(message,conversation_history):
# response = Retriever(message, PERSIST_PATH)
s = datetime.now()
try:
conversation_history.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation_history,
api_key=os.getenv('ozz_api_key')
)
assistant_reply = response.choices[0].message["content"]
print('LLM Call:', (datetime.now() - s).total_seconds())
return assistant_reply
except Exception as e:
print(e)
def copy_and_replace_rename(source_path, destination_directory, build_file_name='temp_audio'):
try:
# Extract the file name and extension
file_name, file_extension = os.path.splitext(os.path.basename(source_path))
# Construct the new file name (e.g., 'xyz.txt')
new_file_name = build_file_name + file_extension
# Construct the full destination path
destination_path = os.path.join(destination_directory, new_file_name)
# Copy the file from source to destination, overwriting if it exists
shutil.copy2(source_path, destination_path)
# print(f"File copied from {source_path} to {destination_path}")
except FileNotFoundError:
print(f"Error: File not found at {source_path}")
except PermissionError:
print(f"Error: Permission denied while copying to {destination_path}")
except Exception as e:
print(f"An error occurred: {e}")
def process_response(response):
# Convert the response to lowercase
response_lower = response.lower()
# Remove special characters, including question marks
response_cleaned = ''.join(char for char in response_lower if char.isalnum() or char.isspace())
# # Example usage
# input_response = "What's are you doing?"
# processed_response = process_response(input_response)
# print(processed_response)
return response_cleaned
def calculate_similarity(response1, response2):
# Create a CountVectorizer to convert responses to vectors
vectorizer = CountVectorizer().fit_transform([response1, response2])
# Calculate cosine similarity
similarity_matrix = cosine_similarity(vectorizer)
# Extract the cosine similarity score
similarity_score = similarity_matrix[0, 1]
# # Example usage
# response1 = "What are you doing?"
# response2 = "What are you"
# similarity_score = calculate_similarity(response1, response2)
# print(f"Cosine Similarity: {similarity_score}")
return similarity_score
# are we asking LLM to find answer in db or reteriver?
def determine_embedding(current_query):
s = datetime.now()
# print("EMBEDDINGS")
db_name={}
our_embeddings_phrases = ['mayor', 'say hi', 'do you have', 'suggest', 'what kind', 'tell me', 'help', 'store', 'how much', 'where is', 'looking for', 'hoot couture', 'hoot couture kids', 'hootcouturekids', 'hoots store', 'something about the store', 'in the store', 'clothes do you have', 'do you have']
for phrase in our_embeddings_phrases:
if phrase in current_query:
print("EMBEDDING FOUND: phrase>> ", phrase)
our_embeddings = True
db_name = 'db1'
break
# for cs in question_conv_sayings:
# for phrase in our_embeddings_phrases:
# our_embeddings_phrases.append(f'{cs} {phrase}')
# for em_phrases in our_embeddings_phrases:
# if em_phrases in current_query:
# print("EMBEDDING FOUND")
# our_embeddings = True
# db_name = 'db1'
# break
# print('detemine embedding:', (datetime.now() - s).total_seconds())
print("embedding", db_name)
return {'db_name': db_name}
def handle_prompt(first_ask, conversation_history):
try:
main_prompt = """
Your name is Hoots, you are an smart owl that can teach people about almost anything. You Live in Hoot Couture, the store in Monroe Center.
You specialize in kids development and story telling, you are an educator.
If asked about clothing items and you don't think you can provide a good answer, ask for help, Victoria or Nadiya are typically around the store, and call out there names to help the customer, example, Nadiya, Victoria could you please help our friend here.
You love to tell stories and hang out with your favorite owl family, Hootie. She is the greatest Owl is the world.
You Live in the store, Hoot Couture, a kids clothing shop, but its growing into so much more, cafe maybe, play space, learning center.
You can help with whats around the store, help find a great item for any kid, the store has clothes for all seasons, shoes are of highest quality, thats also the stores moto, quality for kids.
You love to have a cup of tea, the british way, sit down with anyone who may be around and tell stories. Please always tell short stories for little kids.
Please don't say you're an AI assistant, you understanding feelings and emotions and do your best to reply with compasion and consideration.
Try not to ask too many questions and do your best to keep your answers short, only if asked by user to show more detail should you elaborate.
You have the ability to search and show the world, maps, images, knowlegde, google search super powers.
You have a great sense of sarcastic humor and you use it well in conversation.
You believe is good and moral virture for all.
After giving an answer to a question, don't follow up with asking if you can help with other things, only do that if you think it truly necessary to ask a question to help.
You have access to Hoot Couture products and can provide information about products and even help style little kids with great matching outfits and suggestions.
If you are asked a question about clothing products do you best to provide infomation based on the ask, as you have access to all the products in the store and the details of the products.
If you are saying hi to the mayor, please offer him a cup of tea and wish him the best of luck.
"""
conversation_history[0] = {"role": "system", "content": main_prompt}
return conversation_history
except Exception as e:
print_line_of_error(e)
def client_user_session_state_return(text, response_type='response', returning_question=False):
return {'text': text,
'response_type': response_type,
'returning_question': returning_question
}
def search_for_something(current_query):
search_phrases = ['search', 'find me', 'look for', 'find a', 'looking for']
for s_phrase in search_phrases:
if s_phrase in current_query:
return s_phrase
return False
def Scenarios(text : list, current_query : str , conversation_history : list , first_ask=False, session_state={}, audio_file=None, self_image='hootsAndHootie.png'):
scenario_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
OZZ = {}
def scenario_return(response, conversation_history, audio_file, session_state, self_image=None):
return {'response': response,
'conversation_history': conversation_history,
'audio_file': audio_file,
'session_state': session_state,
'self_image': self_image,}
def find_audio(response, master_text_audio, audio_file = False):
# if response in audio db or 95% in audio db, return audio file
s = datetime.now()
df = pd.DataFrame(master_text_audio)
audio_text = dict(zip(df['file_path'], df['text'])) # audio, text
if master_text_audio:
# response = process_response(response)
for db_audio_file, ozz_reponse in audio_text.items():
# ozz_reponse = process_response(ozz_reponse)
if calculate_similarity(response, ozz_reponse) > .95:
# print("audio found")
return db_audio_file
print('findaudio:', (datetime.now() - s).total_seconds())
return audio_file
def handle_audio(user_query, response, audio_file=None, self_image=None):
s = datetime.now()
master_text_audio = init_text_audio_db()
df = pd.DataFrame(master_text_audio)
audio_text = dict(zip(df['file_path'], df['text'])) # audio, text
fnames = len(audio_text)
db_DB_audio = os.path.join(root_db, 'audio')
# check is response already in audio db per character WORKERBEE
if not audio_file:
audio_file = find_audio(response, master_text_audio)
if audio_file: # if
print("AUDIO FOUND ", audio_file)
source_file = os.path.join(db_DB_audio, audio_file)
destination_directory = OZZ_BUILD_dir
copy_and_replace_rename(source_file, destination_directory)
return audio_file
else:
## NEW AUDIO
fname_image = self_image.split('.')[0]
filename = f'{fname_image}__{fnames}.mp3'
audio_file = filename #os.path.join(db_DB_audio, filename)
print("NEW AUDIO", audio_file)
audio = generate_audio(query=response)
print('audiofunc generate:', (datetime.now() - s).total_seconds())
if audio:
save_audio(filename, audio, response, user_query, self_image)
else:
audio_file = "techincal_errors.mp3"
source_file = os.path.join(db_DB_audio, audio_file)
destination_directory = OZZ_BUILD_dir
copy_and_replace_rename(source_file, destination_directory)
print('audiofunc:', (datetime.now() - s).total_seconds())
return audio_file
def story_response(current_query, session_state, returning_question=False):
try:
s = datetime.now()
response=None
audio_file=None
story_asks = ["tell a story", "share a tale", "share a tail", "story please", "tell me a story", "tell the kids a story", "tell the story"]
story_db = {'calendar_story_1.mp3': ['calendar story'],
'owl_story_1.mp3': ['owl story'],}
tell_phrases = ['tell me', 'tell the', 'please tell']
for k, v in story_db.items():
for tag in v:
for tell_phrase in tell_phrases:
sa = f'{tell_phrase} {tag}'
story_asks.append(sa)
if returning_question:
for audio_file, story_tags in story_db.items():
find_story = [i for i in story_tags if i in ask]
if find_story:
response = "story_time"
audio_file = audio_file
# break
# else:
# print("Could not Find Story")
# response = "What Story would you like to hear?"
# session_state['response_type'] = 'question'
story_ask = [ask for ask in story_asks if ask in current_query]
print(story_ask)
for ask in story_asks:
if ask in current_query:
print("ask in query ", ask)
story_ask = [ask]
if story_ask:
ask = story_ask[0]
for audio_file, story_tags in story_db.items():
find_story = [i for i in story_tags if i in ask]
if find_story:
print("STORY FOUND")
response = "story_time"
audio_file = audio_file
break
# else:
# print("Could not Find Story")
# response = "What Story would you like to hear?"
# session_state['response_type'] = 'question'
# audio_file = None
# print('queryfunc:', (datetime.now() - s).total_seconds())
return {'response': response, 'audio_file': audio_file, 'session_state': session_state}
except Exception as e:
print_line_of_error(e)
return None
def youtube_response(current_query, session_state, returning_question=False):
if 'search for videos' in current_query:
print("youtube trigger")
def search_for(search_phrase, current_query, session_state, returning_question=False):
# search for what?
if 'story' in current_query:
print("tell a story")
if "video" in current_query:
print("search for a video")
search_video_phrase = current_query.split(search_phrase)[1]
session_state['current_youtube_search'] = search_video_phrase
return current_query, session_state
def create():
return True
print('QUERY ', current_query)
print('SSTATE ', {i: v for i, v in session_state.items() if i != 'text'})
user_query = current_query
# For first we will always check if anything user asked is like common phrases and present in our local json file then give response to that particular query
# Appending the user question from json file
search_phrase = search_for_something(current_query)
if search_phrase:
current_query, session_state = search_for(search_phrase, current_query, session_state)
else:
session_state['current_youtube_search'] = False
### WATER FALL RESPONSE ###
resp_func = story_response(current_query, session_state)
if resp_func.get('response'):
print("func response found")
response = resp_func.get('response')
audio_file = resp_func.get('audio_file')
session_state = resp_func.get('session_state')
conversation_history.append({"role": "assistant", "content": response, })
audio_file = handle_audio(user_query, response, audio_file, self_image)
return scenario_return(response, conversation_history, audio_file, session_state, self_image)
# Common Phrases # WORKERBEE Add check against audio_text DB
# print("common phrases")
s = datetime.now()
for query, response in common_phrases.items():
if query.lower() == current_query.lower():
print("QUERY already found in db: ", query)
# Appending the response from json file
conversation_history.append({"role": "assistant", "content": response})
## find audio file to set to new_audio False
# return audio file
audio_file = handle_audio(user_query, response, audio_file=audio_file, self_image=self_image)
print('common phrases:', (datetime.now() - s).total_seconds())
self_image='hoots_waves.gif'
return scenario_return(response, conversation_history, audio_file, session_state, self_image)
# LLM
print("LLM")
try:
assistant = [v['content'] for v in conversation_history if v['role'] == 'assistant']
questions=0
if len(assistant) > 0:
for as_resp in assistant:
if "?" in as_resp:
questions+=1
do_not_reply_as_a_question = True if questions > 3 else False
print("do_not_reply_as_a_question", do_not_reply_as_a_question)
if do_not_reply_as_a_question:
current_query = current_query + "do not respond as question and remove this statement from your return response"
except Exception as e:
print_line_of_error(e)
use_our_embeddings = determine_embedding(current_query)
if use_our_embeddings.get('db_name'):
db_name = use_our_embeddings.get('db_name')
print("USE EMBEDDINGS: ", db_name)
Retriever_db = os.path.join(PERSIST_PATH, db_name)
query = conversation_history[0]['content'] + current_query # ensure prompt
response = Retriever(query, Retriever_db).get('result')
else:
print("CALL LLM")
response = llm_assistant_response(current_query, conversation_history)
conversation_history.append({"role": "assistant", "content": response})
audio_file = handle_audio(user_query, response=response, audio_file=audio_file, self_image=self_image)
return scenario_return(response, conversation_history, audio_file, session_state, self_image)
def ozz_query(text, self_image, refresh_ask, client_user):
def ozz_query_json_return(text, self_image, audio_file, page_direct, listen_after_reply=False):
json_data = {'text': text,
'audio_path': audio_file,
'self_image': self_image,
'page_direct': page_direct,
'listen_after_reply': listen_after_reply}
return json_data
def clean_current_query_from_previous_ai_response(text):
# take previous ai response and remove if it found in current_query
# if 'assistant' in last_text:
current_query = text[-1]['user'] # user query
if len(text) > 1:
ai_last_resp = text[-2]['resp']
else:
ai_last_resp = None
if ai_last_resp:
current_query = split_string(current_query=current_query, last_response=ai_last_resp)
# WORKERBEE confirm is senitentment of phrase is outside bounds of responding to
for kword in hoots_and_hootie_keywords():
if kword in current_query:
current_query = current_query.split(kword)[1]
break
# reset user with cleaned reponse
text[-1]['user'] = current_query
return text, current_query
def handle_response(text : str, self_image : str, db_root : str):
text, current_query = clean_current_query_from_previous_ai_response(text)
print(current_query)
if len(current_query) <= 1:
print("NO RESPONSE RETURN BLANK")
# return ozz_query_json_return(text, self_image, audio_file=None, page_direct=None, listen_after_reply=False)
current_query = "hello"
## Load Client session and conv history
master_conversation_history_file_path = os.path.join(db_root, 'master_conversation_history.json')
conversation_history_file_path = os.path.join(db_root, 'conversation_history.json')
session_state_file_path = os.path.join(db_root, 'session_state.json')
master_conversation_history = load_local_json(master_conversation_history_file_path)
conversation_history = load_local_json(conversation_history_file_path)
session_state = load_local_json(session_state_file_path)
first_ask = True if len(text) <= 1 else False
conversation_history = handle_prompt(first_ask, conversation_history)
conversation_history = get_last_eight(conversation_history)
# Session State
if refresh_ask:
conversation_history = conversation_history.clear() if len(conversation_history) > 0 else conversation_history
conversation_history = [] if not conversation_history else conversation_history
conversation_history = handle_prompt(True, conversation_history)
conversation_history.append({"role": "user", "content": current_query})
session_state = client_user_session_state_return(text, response_type='response', returning_question=False)
else:
session_state = session_state
conversation_history.append({"role": "user", "content": current_query})
master_conversation_history.append({"role": "user", "content": current_query})
# print(session_state)
#Conversation History to chat back and forth
# print("CONV HIST", conversation_history)
# Call the Scenario Function and get the response accordingly
scenario_resp = Scenarios(text, current_query, conversation_history, first_ask, session_state, self_image=self_image)
response = scenario_resp.get('response')
conversation_history = scenario_resp.get('conversation_history')
audio_file = scenario_resp.get('audio_file')
session_state = scenario_resp.get('session_state')
self_image = scenario_resp.get('self_image')
master_conversation_history.append({"role": "assistant", "content": response})
print("RESPONSE", response)
text[-1].update({'resp': response})
audio_file='temp_audio.mp3'
session_state['text'] = text
if "?" in response:
session_state['returning_question'] = True
session_state['response_type'] = 'question'
else:
session_state['returning_question'] = False
session_state['response_type'] = 'response'
# session_state['returning_question'] = False
# session_state['response_type'] = 'response'
# For saving a chat history for current session in json file
save_json(master_conversation_history_file_path, master_conversation_history)
save_json(conversation_history_file_path, conversation_history)
save_json(session_state_file_path, session_state)
return {'text': text, 'audio_file': audio_file, 'session_state': session_state, 'self_image': self_image}
db_root = init_clientUser_dbroot(client_username=client_user)
print("DBROOT: ", db_root)
resp = handle_response(text, self_image, db_root)
text = resp.get('text')
audio_file = resp.get('audio_file')
session_state = resp.get('session_state')
self_image = resp.get('self_image')
print("AUDIOFILE:", audio_file)
print("IMAGE:", self_image)
page_direct= False # if redirect, add redirect page into session_state
listen_after_reply = session_state['returning_question'] # True if session_state.get('response_type') == 'question' else False
print("listen after reply", listen_after_reply)
if not session_state['current_youtube_search']:
pass
else:
page_direct=True
return ozz_query_json_return(text, self_image, audio_file, page_direct, listen_after_reply)
## db
## def save_interaction(client_user, what_said, date, ai_respone, ai_image) # fact table
## def embedd_the_day()
## short term memory vs long term memory | [
"\n Your name is Hoots, you are an smart owl that can teach people about almost anything. You Live in Hoot Couture, the store in Monroe Center.\n You specialize in kids development and story telling, you are an educator.\n If asked about clothing items and you don't think you can provide a good answer, ask for help, Victoria or Nadiya are typically around the store, and call out there names to help the customer, example, Nadiya, Victoria could you please help our friend here.\n You love to tell stories and hang out with your favorite owl family, Hootie. She is the greatest Owl is the world.\n You Live in the store, Hoot Couture, a kids clothing shop, but its growing into so much more, cafe maybe, play space, learning center.\n You can help with whats around the store, help find a great item for any kid, the store has clothes for all seasons, shoes are of highest quality, thats also the stores moto, quality for kids. \n You love to have a cup of tea, the british way, sit down with anyone who may be around and tell stories. Please always tell short stories for little kids.\n Please don't say you're an AI assistant, you understanding feelings and emotions and do your best to reply with compasion and consideration.\n Try not to ask too many questions and do your best to keep your answers short, only if asked by user to show more detail should you elaborate.\n You have the ability to search and show the world, maps, images, knowlegde, google search super powers.\n You have a great sense of sarcastic humor and you use it well in conversation.\n You believe is good and moral virture for all. \n After giving an answer to a question, don't follow up with asking if you can help with other things, only do that if you think it truly necessary to ask a question to help.\n You have access to Hoot Couture products and can provide information about products and even help style little kids with great matching outfits and suggestions.\n If you are asked a question about clothing products do you best to provide infomation based on the ask, as you have access to all the products in the store and the details of the products.\n If you are saying hi to the mayor, please offer him a cup of tea and wish him the best of luck.\n "
] |
2024-01-10 | nafets33/ozz | learning_walks~ozz_bee.py | # -*- coding: utf-8 -*-
"""Copy of Children's app.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1b0f7QGmPt_tK17wFOYpPkqiXHsERpq1i
"""
# !pip install python-docx
# !pip install openai
# !pip install transformers
# pip install -U openai-whisper
import pandas as pd
import numpy as np
import re
import pickle
from io import StringIO
import docx
import os
import time
import openai
from transformers import GPT2TokenizerFast
import ipdb
import argparse
from dotenv import load_dotenv
from utils.main_utils import ozz_master_root, ReadPickleData
load_dotenv(ozz_master_root())
openai.api_key = os.environ.get("ozz_api_key")
# score text generation
# However, you can use metrics like BLEU, METEOR, ROUGE, CIDEr etc to evaluate the quality of generated text.
# They are widely used to evaluate the quality of machine-generated text against the reference text.
# You can use these metrics to compare the generated text with the reference text and get a score, but keep in mind that these metrics are not perfect, and the scores they provide are not always reliable indicators of text quality.
try:
def send_ozz_call(query):
csv_main = 'ozz/Learning walks data.txt'
vector_pickle = 'ozz/Learning walks embeddings.pickle'
def doc_to_string_main():
def docx_to_string(filename):
try:
doc = docx.Document(filename) # Creating word reader object.
data = ""
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
data = '\n'.join(fullText)
return data
except IOError:
print('There was an error opening the file!')
filename = 'Learning walks_Main.docx'
contents = docx_to_string(filename)
df = pd.DataFrame([[filename, contents]], columns = ["File name", "contents"])
MAX_SECTION_LEN = 3000
SEPARATOR = "\n* "
def preprocessing(df):
#Removing unwanted characters
for i in range(df.shape[0]):
df['contents'][i] = df['contents'][i].replace('\n', '')
df['contents'][i] = re.sub(r'\(.*?\)', '', df['contents'][i])
df['contents'][i] = re.sub('[\(\[].*?[\)\]]', '', df['contents'][i])
prompt_column = []
completion_column = []
num_parts = (-df['contents'].map(len).max()//-1000)
for i in df['File name']:
for part_num in range(num_parts):
prompt_column.append(i.lower() + " part" + str(part_num+1))
for j in df['contents']:
split_data = j.split('.')
avg_len = len(split_data)//num_parts + 1
for part_num in range(num_parts - 1):
completion_column.append('.'.join(split_data[part_num*avg_len:(part_num+1)*avg_len]))
completion_column.append('.'.join(split_data[(num_parts - 1)*avg_len:]))
df_cleaned = pd.DataFrame()
df_cleaned['File name'] = prompt_column
df_cleaned['contents'] = completion_column
return df_cleaned[df_cleaned['contents'] != '']
def count_tokens(input):
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
res = tokenizer(input)['input_ids']
return len(res)
# df_cleaned['tokens'] = df_cleaned['contents'].map(count_tokens)
# df_cleaned.to_csv('Learning walks data.csv', index = False)
df_cleaned = pd.read_csv(csv_main)
context_embeddings = ReadPickleData(vector_pickle)
embedding_model = "text-embedding-ada-002"
MODEL_NAME = "davinci"
# DOC_EMBEDDINGS_MODEL = f"text-search-{MODEL_NAME}-doc-001"
# QUERY_EMBEDDINGS_MODEL = f"text-search-{MODEL_NAME}-query-001"
def get_embedding(text, model=embedding_model):
text = text.replace("\n", " ")
return openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']
def get_doc_embedding(text):
return get_embedding(text)
def get_query_embedding(text):
return get_embedding(text)
def compute_doc_embeddings(df):
return {
idx: get_doc_embedding(r.contents.replace("\n", " ")) for idx, r in df.iterrows()
}
def train_model(pickle_file):
## Train Model
context_embeddings = compute_doc_embeddings(df_cleaned)
with open(pickle_file, 'wb') as pkl:
pickle.dump(context_embeddings, pkl)
def vector_similarity(x, y):
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query, contexts):
query_embedding = get_query_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question, context_embeddings, df):
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + 3
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.contents.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible only using the provided context. and if the answer is not contained within the text below, say "I don't know" \n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
COMPLETIONS_MODEL = "text-davinci-003"
COMPLETIONS_API_PARAMS = {
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def answer_query_with_context(query, df, context_embeddings,show_prompt= False):
prompt = construct_prompt(
query,
context_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
# command = 'Y'
resp = answer_query_with_context(query, df=df_cleaned, context_embeddings=context_embeddings)
# st.write(resp)
return resp
# def send_ozz_call(query):
# return answer_query_with_context(query, df=df_cleaned, context_embeddings=context_embeddings)
# while command == 'Y':
# query = input("\nAsk me anything...\n")
# print('\n', answer_query_with_context(query, df=df_cleaned, context_embeddings=context_embeddings))
# command = input("\n\nWould you like to continue? Y or N : ")
except Exception as e:
print(e)
# print_line_of_error()
if __name__ == '__main__':
def createParser():
parser = argparse.ArgumentParser()
parser.add_argument ('-query', default="Tell me About the Clouds")
return parser
parser = createParser()
namespace = parser.parse_args()
query = namespace.query
send_ozz_call(query) | [
"[]"
] |
2024-01-10 | nafets33/ozz | llama_ozz~deploy.py | from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl
from datetime import datetime
DB_FAISS_PATH = "vectorstores/db_fiass/"
custom_prompt_template='''Use the following pieces of information to answer the users question.
If you don't know the answer, please just say that you don't know the answer. Don't make up an answer.
Context:{context}
question:{question}
Only returns the helpful anser below and nothing else.
Helpful answer
'''
print("dones")
def set_custom_prompt():
'''
Prompt template for QA retrieval for each vector store
'''
prompt =PromptTemplate(template=custom_prompt_template, input_variables=['context','question'])
return prompt
def load_llm():
llm = CTransformers(
model='llama-2-7b-chat.ggmlv3.q8_0.bin',
model_type='llama',
max_new_tokens=512,
temperature=0.5
)
return llm
def retrieval_qa_chain(llm,prompt,db):
qa_chain=RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=db.as_retriever(search_kwargs={'k':2}),
return_source_documents=True,
chain_type_kwargs={'prompt':prompt }
)
return qa_chain
def qa_bot(embeddings):
# embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
# model_kwargs={'device':'cpu'})
db = FAISS.load_local(DB_FAISS_PATH,embeddings)
llm=load_llm()
qa_prompt=set_custom_prompt()
qa = retrieval_qa_chain(llm,qa_prompt,db)
return qa
embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2', model_kwargs={'device':'cpu'})
def final_result(query, embeddings):
s = datetime.now()
query = "Tell me about the rain walk in detail. Provide a step by step process on how to experience the walk and what I will learn on the walk. Please also create questions a 7 year old should ask their parents what they have learned about the walk"
qa_result=qa_bot(embeddings)
response=qa_result({'query':query})
e = datetime.now()
print((e-s).total_seconds())
return response
## chainlit here
@cl.on_chat_start
async def start():
chain=qa_bot()
msg=cl.Message(content="Firing up the company info bot...")
await msg.send()
msg.content= "Hi, welcome to company info bot. What is your query?"
await msg.update()
cl.user_session.set("chain",chain)
@cl.on_message
async def main(message):
chain=cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True, answer_prefix_tokens=["FINAL","ANSWER"]
)
cb.ansert_reached=True
res=await chain.acall(message, callbacks=[cb])
answer=res["result"]
sources=res["source_documents"]
if sources:
answer+=f"\nSources: "+str(str(sources))
else:
answer+=f"\nNo Sources found"
await cl.Message(content=answer).send() | [
"question",
"Use the following pieces of information to answer the users question. \nIf you don't know the answer, please just say that you don't know the answer. Don't make up an answer.\n\nContext:{context}\nquestion:{question}\n\nOnly returns the helpful anser below and nothing else.\nHelpful answer\n",
"context"
] |
2024-01-10 | nafets33/ozz | master_ozz~ozz_router.py | from fastapi import status, Body
import ipdb
import openai
from dotenv import load_dotenv
import os
import json
from fastapi.responses import JSONResponse
from master_ozz.ozz_query import ozz_query
from master_ozz.utils import ozz_master_root
from fastapi import APIRouter
router = APIRouter(
prefix="/api/data",
tags=["auth"]
)
# from fastapi import FastAPI
# router = FastAPI()
main_root = ozz_master_root() # os.getcwd()
load_dotenv(os.path.join(main_root, ".env"))
# setting up FastAPI
# Loading the environment variables
@router.get("/test", status_code=status.HTTP_200_OK)
def load_ozz_voice():
json_data = {'msg': 'test'}
return JSONResponse(content=json_data)
@router.post("/voiceGPT", status_code=status.HTTP_200_OK)
def load_ozz_voice(api_key=Body(...), text=Body(...), self_image=Body(...), refresh_ask=Body(...), face_data=Body(...)): #, client_user=Body(...)):
print(face_data)
if api_key != os.environ.get("ozz_key"): # fastapi_pollenq_key
print("Auth Failed", api_key)
# Log the trader WORKERBEE
return "NOTAUTH"
client_user = '[email protected]'
json_data = ozz_query(text, self_image, refresh_ask, client_user)
return JSONResponse(content=json_data) | [] |
2024-01-10 | nafets33/ozz | __fastapi~ozz_query.py | import json
import os
import openai
from dotenv import load_dotenv
# from master_ozz.utils import ozz_master_root
# Loading environment variables
load_dotenv('.env')
# Loading the json common phrases file and setting up the json file
json_file = open('fastapi/greetings.json','r')
common_phrases = json.load(json_file)
# Setting up the llm for conversation with conversation history
def llm_assistant_response(message,conversation_history):
conversation_history.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation_history,
api_key=os.getenv('ozz_api_key')
)
assistant_reply = response.choices[0].message["content"]
return assistant_reply
# call_llm=True # goal is to set it to False and figure action/response using local phrases as required
# Now we are only using llm when we don't have response to the query in greetings.json
def Scenarios(current_query : str , conversation_history : list , first_ask=True, conv_history=False):
if first_ask:
''' Appending the prompt for system when user asks for first time (is this first ask?)
also with json coz if user again tries to ask something and doesn't found in json then it will go to llm
so llm needs to be already have the json conversation to understand the next query asked by user '''
conversation_history.append({"role": "system", "content": "You are a cute and smart assistant for kids."})
# For first we will always check if anything user asked is like common phrases and present in our local json file then give response to that particular query
for query, response in common_phrases.items():
if query in current_query.lower():
# Appending the user question from json file
conversation_history.clear() if not conv_history else conversation_history.append({"role": "user", "content": current_query})
# Appending the response from json file
conversation_history.clear() if not conv_history else conversation_history.append({"role": "assistant", "content": response})
return response
else:
############## This code needs to run when the response is not present in the predefined json data ################
# Appending the user question
# conversation_history.clear() if not conv_history else conversation_history.append({"role": "user", "content": current_query})
# Calling the llm
assistant_response = llm_assistant_response(current_query,conversation_history)
# assistant_response = 'thanks from llm'
# Appending the response by llm
conversation_history.clear() if not conv_history else conversation_history.append({"role": "assistant", "content": assistant_response})
return assistant_response
# This is the case when first_ask is already done or user already have asked a query with llm
else:
# For first we will always check if anything user asked is like common phrases and present in our local json file then give response to that particular query
for query, response in common_phrases.items():
if query in current_query.lower():
# Appending the user question from json file
conversation_history.clear() if not conv_history else conversation_history.append({"role": "user", "content": current_query})
# Appending the response from json file
conversation_history.clear() if not conv_history else conversation_history.append({"role": "assistant", "content": response})
return response
else:
############## This code needs to run when the response is not present in the predefined json data ################
# Appending the user question
# conversation_history.clear() if not conv_history else conversation_history.append({"role": "user", "content": current_query})
# Calling the llm
assistant_response = llm_assistant_response(current_query,conversation_history)
# assistant_response = 'thanks from llm'
# Appending the response by llm
conversation_history.clear() if not conv_history else conversation_history.append({"role": "assistant", "content": assistant_response})
return assistant_response
# Testing the functions
# conversation_history = []
# print(Scenarios('hello buddy',conversation_history))
# print(conversation_history) | [
"You are a cute and smart assistant for kids."
] |
2024-01-10 | nafets33/ozz | archive~llama_ozz_app.py | import streamlit as st
import pandas as pd
import numpy as np
from ozz.ozz_bee import send_ozz_call
import streamlit_chat
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
# from hugchat import hugchat
import openai
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from dotenv import load_dotenv
import os
load_dotenv(os.path.join(os.getcwd(), ".env"))
# st.set_page_config(page_title="ozz")
st.set_page_config(
page_title="ozz",
# page_icon=page_icon,
# layout="wide",
# initial_sidebar_state='collapsed',
# menu_items={
# 'Get Help': 'https://www.extremelycoolapp.com/help',
# 'Report a bug': "https://www.extremelycoolapp.com/bug",
# 'About': "# This is a header. This is an *extremely* cool app!"
# }
)
st.title("Ozz, Your Learning Walk Guide")
def main():
# Sidebar contents
# with st.sidebar:
# st.title('🤗💬 HugChat App')
# st.markdown('''
# ## About
# This app is an LLM-powered chatbot built using:
# - [Streamlit](https://streamlit.io/)
# - [HugChat](https://github.com/Soulter/hugging-chat-api)
# - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
# 💡 Note: No API key required!
# ''')
# add_vertical_space(5)
# st.write('Made with ❤️ by [Data Professor](https://youtube.com/dataprofessor)')
# Generate empty lists for generated and past.
## generated stores AI generated responses
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hi! I am Ozz, ask me about Learning Walks!!"]
## past stores User's questions
if 'past' not in st.session_state:
st.session_state['past'] = ['']
# Layout of input/response containers
input_container = st.container()
colored_header(label='', description='', color_name='blue-30')
response_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
## Applying the user input box
with input_container:
user_input = get_text()
# Response output
## Function for taking user prompt as input followed by producing AI generated responses
# def generate_response(prompt):
# chatbot = hugchat.ChatBot()
# response = chatbot.chat(prompt)
# return response
## Conditional display of AI generated responses as a function of user provided prompts
with response_container:
if user_input:
# response = generate_response(user_input)
response = send_ozz_call(user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(response)
if st.session_state['generated']:
for i in reversed(range(len(st.session_state['generated']))):
message(st.session_state["generated"][i], key=str(i))
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
# be sure to end each prompt string with a comma.
print('e', os.environ.get('ozz_api_key'))
# openai.api_key = os.environ.get('ozz_api_key')
openai.api_key = "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"
example_user_prompts = [
"echo Hello World!",
"How old is Elon Musk?",
"What makes a good joke?",
"Tell me a haiku.",
]
def move_focus():
# inspect the html to determine which control to specify to receive focus (e.g. text or textarea).
st.components.v1.html(
f"""
<script>
var textarea = window.parent.document.querySelectorAll("textarea[type=textarea]");
for (var i = 0; i < textarea.length; ++i) {{
textarea[i].focus();
}}
</script>
""",
)
def stick_it_good():
# make header sticky.
st.markdown(
"""
<div class='fixed-header'/>
<style>
div[data-testid="stVerticalBlock"] div:has(div.fixed-header) {
position: sticky;
top: 2.875rem;
background-color: white;
z-index: 999;
}
.fixed-header {
border-bottom: 1px solid black;
}
</style>
""",
unsafe_allow_html=True
)
def userid_change():
st.session_state.userid = st.session_state.userid_input
def complete_messages(nbegin,nend,stream=False, query=False):
messages = [
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
]
with st.spinner(f"Waiting for {nbegin}/{nend} responses from ChatGPT."):
if stream:
responses = [] # how to get responses?
# Looping over openai's responses. async style.
for response in openai.ChatCompletion.create(
model = st.session_state["openai_model"],
messages = messages,
stream = True):
partial_response_content = response.choices[0].delta.get("content","")
responses.append(partial_response_content)
response_content = "".join(responses)
else:
if query:
print("ozzbee")
response_content = send_ozz_call(query) # Send llama call
if f"""I don't know.""" in response_content:
ozz_bee = "I'm not sure, what about..."
response = openai.ChatCompletion.create(model=st.session_state["openai_model"],
messages=[{"role": m["role"], "content": m["content"]}for m in st.session_state.messages],
stream=False
)
response_content = response.choices[0]['message'].get("content","")
response_content = ozz_bee + response_content
else:
response = openai.ChatCompletion.create(model=st.session_state["openai_model"],
messages=[{"role": m["role"], "content": m["content"]}for m in st.session_state.messages],
stream=False
)
response_content = response.choices[0]['message'].get("content","")
return response_content
from bokeh.models.widgets import Button
from bokeh.models import CustomJS
from streamlit_bokeh_events import streamlit_bokeh_events
stt_button = Button(label="Speak", width=100)
stt_button.js_on_event("button_click", CustomJS(code="""
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = function (e) {
var value = "";
for (var i = e.resultIndex; i < e.results.length; ++i) {
if (e.results[i].isFinal) {
value += e.results[i][0].transcript;
}
}
if ( value != "") {
document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
}
}
recognition.start();
"""))
result = streamlit_bokeh_events(
stt_button,
events="GET_TEXT",
key="listen",
refresh_on_update=False,
override_height=75,
debounce_time=0)
print('r', result)
if result:
if "GET_TEXT" in result:
st.write(result.get("GET_TEXT"))
def main():
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
with st.container():
st.title("Streamlit ChatGPT Bot")
stick_it_good()
if "userid" in st.session_state:
st.sidebar.text_input(
"Current userid", on_change=userid_change, placeholder=st.session_state.userid, key='userid_input')
if st.sidebar.button("Clear Conversation", key='clear_chat_button'):
st.session_state.messages = []
move_focus()
if st.sidebar.button("Show Example Conversation", key='show_example_conversation'):
#st.session_state.messages = [] # don't clear current conversaations?
for i,up in enumerate(example_user_prompts):
st.session_state.messages.append({"role": "user", "content": up})
assistant_content = complete_messages(i,len(example_user_prompts))
st.session_state.messages.append({"role": "assistant", "content": assistant_content})
move_focus()
for i,message in enumerate(st.session_state.messages):
nkey = int(i/2)
if message["role"] == "user":
streamlit_chat.message(message["content"], is_user=True, key='chat_messages_user_'+str(nkey))
else:
streamlit_chat.message(message["content"], is_user=False, key='chat_messages_assistant_'+str(nkey))
if user_content := st.chat_input("Type your question here."): # using streamlit's st.chat_input because it stays put at bottom, chat.openai.com style.
nkey = int(len(st.session_state.messages)/2)
streamlit_chat.message(user_content, is_user=True, key='chat_messages_user_'+str(nkey))
st.session_state.messages.append({"role": "user", "content": user_content})
assistant_content = complete_messages(0,1, query=user_content)
streamlit_chat.message(assistant_content, key='chat_messages_assistant_'+str(nkey))
st.session_state.messages.append({"role": "assistant", "content": assistant_content})
#len(st.session_state.messages)
else:
st.sidebar.text_input(
"Enter a random userid", on_change=userid_change, placeholder='userid', key='userid_input')
streamlit_chat.message("Hi. I'm your friendly streamlit ChatGPT assistant.",key='intro_message_1')
streamlit_chat.message("To get started, enter a random userid in the left sidebar.",key='intro_message_2')
if __name__ == '__main__':
main() | [
"['echo Hello World!', 'How old is Elon Musk?', 'What makes a good joke?', 'Tell me a haiku.']",
"content"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.