date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | blank-manash/mailer | flow_chart.py | from config import GPTModels, get_chat_client, get_logger
import json
logger = get_logger()
def create_prompt(text: str) -> str:
return """Create a flow chart based on the following information and provide the output in JSON format compatible with GoJS, including nodes and links. The flow chart should represent key infomation in the text:
{description}
Return data in the following JSON Structure:
{{ "nodes": [...], "links": [...] }}
The flow chart should optimize learning. Keep it simple, clear and detailed. Keep colors light.""".format(
description=text
)
gpt = get_chat_client(model=GPTModels.GPT4.value, response_type="json_object")
def flow_reponse(text):
prompt = create_prompt(text)
answer = str(gpt(prompt)).strip()
try:
response = json.loads(answer)
logger.info("Fetched Response from OPENAI")
if (not response["nodes"]) or (not response["links"]):
return {"data": "Invalid Data From OpenAI", "success": False}
return {"data": response, "success": True}
except Exception as e:
return {"data": f"Error Parsing Json: {str(e)}", "success": False}
| [] |
2024-01-10 | AleksZhelo/pytorch-rl | utils~factory.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from core.envs.gym import GymEnv
from core.envs.atari_ram import AtariRamEnv
from core.envs.atari import AtariEnv
from core.envs.lab import LabEnv
from core.minisim.minisim_env import MinisimEnv
EnvDict = {"gym": GymEnv, # classic control games from openai w/ low-level input
"atari-ram": AtariRamEnv, # atari integrations from openai, with low-level input
"atari": AtariEnv, # atari integrations from openai, with pixel-level input
"lab": LabEnv,
"minisim": MinisimEnv}
from core.models.empty import EmptyModel
from core.models.dqn_mlp import DQNMlpModel
from core.models.dqn_cnn import DQNCnnModel
from core.models.a3c_mlp_con import A3CMlpConModel
from core.models.a3c_cnn_dis import A3CCnnDisModel
from core.models.acer_mlp_dis import ACERMlpDisModel
from core.models.acer_cnn_dis import ACERCnnDisModel
from core.minisim.models.mini_narrowing import A3CMlpNarrowingMinisimModel
from core.minisim.models.mini_two_lstm_separate import A3CMlpDeeperSeparateHiddenMinisimModel
from core.minisim.models.mini_two_lstm_separate_target_to_lstm import A3CMlpDeeper2MinisimModel
from core.minisim.models.mini_two_lstm_shared import A3CMlpDeeperMinisimModel
from core.minisim.models.mini_two_lstm_separate_two_levels import A3CMlpDeeperSeparateHiddenTwoLevelsMinisimModel
from core.minisim.models.mini_wide import A3CMlpMinisimModel
from core.minisim.models.mini_no_lstm import A3CMlpNoLSTMMinisimModel
from core.minisim.models.mini_conv import A3CCnvMinisimModel
from core.minisim.models.mini_target_only import A3CTargetOnlyMinisimModel
from core.minisim.models.mini_conv_lstm import A3CCnvLSTMMinisimModel
ModelDict = {"empty": EmptyModel, # contains nothing, only should be used w/ EmptyAgent
"dqn-mlp": DQNMlpModel, # for dqn low-level input
"dqn-cnn": DQNCnnModel, # for dqn pixel-level input
"a3c-mlp-con": A3CMlpConModel, # for a3c low-level input (NOTE: continuous must end in "-con")
"a3c-cnn-dis": A3CCnnDisModel, # for a3c pixel-level input
"acer-mlp-dis": ACERMlpDisModel, # for acer low-level input
"acer-cnn-dis": ACERCnnDisModel, # for acer pixel-level input
"none": None,
"a3c-mlp-minisim": A3CMlpMinisimModel,
"a3c-mlp-minisim-narrowing": A3CMlpNarrowingMinisimModel,
"a3c-mlp-deeper": A3CMlpDeeperMinisimModel,
"a3c-mlp-deeper2": A3CMlpDeeper2MinisimModel,
"a3c-mlp-deeper-sep-hid": A3CMlpDeeperSeparateHiddenMinisimModel,
"a3c-mlp-deeper-sep-hid-two": A3CMlpDeeperSeparateHiddenTwoLevelsMinisimModel,
"a3c-mlp-no-lstm": A3CMlpNoLSTMMinisimModel,
"a3c-cnv": A3CCnvMinisimModel,
"a3c-target-only": A3CTargetOnlyMinisimModel,
"a3c-cnv-lstm": A3CCnvLSTMMinisimModel}
from core.memories.sequential import SequentialMemory
from core.memories.episode_parameter import EpisodeParameterMemory
from core.memories.episodic import EpisodicMemory
MemoryDict = {"sequential": SequentialMemory, # off-policy
"episode-parameter": EpisodeParameterMemory, # not in use right now
"episodic": EpisodicMemory, # on/off-policy
"none": None} # on-policy
from core.agents.empty import EmptyAgent
from core.agents.dqn import DQNAgent
from core.agents.a3c import A3CAgent
from core.agents.acer import ACERAgent
AgentDict = {"empty": EmptyAgent, # to test integration of new envs, contains only the most basic control loop
"dqn": DQNAgent, # dqn (w/ double dqn & dueling as options)
"a3c": A3CAgent, # a3c (multi-process, pure cpu version)
"acer": ACERAgent} # acer (multi-process, pure cpu version) | [] |
2024-01-10 | AI-ApeX-DeV/code_titans | diet_predictor.py | from flask import Flask, render_template, request
import pyrebase
from flask_cors import CORS
import openai
app = Flask(__name__)
openai.api_key = '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'
CORS(app)
# Define your Firebase configuration data
config = {
"apiKey": "AIzaSyAvYvSqBoQzCUDK2oloq79JhPJGTw1DIUk",
"authDomain": "dashboard-50078.firebaseapp.com",
"databaseURL": "https://dashboard-50078-default-rtdb.firebaseio.com",
"storageBucket": "dashboard-50078.appspot.com"
}
# Initialize Firebase
firebase = pyrebase.initialize_app(config)
# Get a reference to the database service
db = firebase.database()
user='shah'
@app.route('/')
def home():
return render_template('index.html')
@app.route('/diet', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
age = request.form.get('age')
gender = request.form.get('gender')
disease = request.form.get('disease')
calories = request.form.get('calories')
location=request.form.get('location')
# Generate the diet plan based on the received form data (replace with your actual logic)
diet_plan = generate_diet_plan(age, gender, disease, calories,location)
# Render the HTML template with the diet plan
return render_template('index.html', diet_plan=diet_plan)
else:
return render_template('index.html')
def generate_diet_plan(age, gender, disease, calories,location):
# Get the list of values under physical act data/kaushal
physical = db.child("physActData").child(user).get().val()
sleep = db.child("sleepData").child(user).get().val()
mood = db.child("moodData").child(user).get().val()
social = db.child("socIntData").child(user).get().val()
stress = db.child("stressData").child(user).get().val()
heart = db.child("heartRateData").child(user).get().val()
# Get the value of the last element of the list
physical = str(physical[-1])
sleep = str(sleep[-1])
mood = str(mood[-1])
social = str(social[-1])
stress = str(stress[-1])
heart = str(heart[-1])
age=str(age)
calories=str(calories)
message="sleep : " + sleep + ", heart rate : " + heart + ", mood : "+ mood+ ", physical activity level : " + physical+' calories : '+calories+', social: '+social + ", stress :" +stress+ ", age: "+ age+ ", gender: "+gender+ ', disease: '+disease+', location : '+location+", prepare a diet plan for me based on these mentioned details , give answer as if you are a doctor and a dietician , prepare diet plan along with calories and all like calories intake ,energy intake ,vitamin intake for breakfast,lunch dinner and snacks etc, also mention the total calorie,vitamins,energy,protien intake at braekfast,lunch and dinner, intotal ,at the end tell a quote regarding mental fitness , and a joke to relieve my stress and then tell the user to shift to a vegetarian diet with reasons. NOTE : ANSWER LIKE A DOCTOR AND A DIETICIAN "
if message:
try:
if len(message.split()) > 300:
raise ValueError("Input contains more than 300 words. Please try again.")
chat = openai.Completion.create(engine="text-davinci-003",prompt=message,max_tokens=3896,temperature=0.2)
except ValueError as e:
print(f"Error: {e}")
reply = chat.choices[0].text
response_message=f"{reply}"
return response_message
if __name__ == '__main__':
app.run(debug=True)
| [
"sleep : \" + sleep + \", heart rate : \" + heart + \", mood : \"+ mood+ \", physical activity level : \" + physical+' calories : '+calories+', social: '+social + \", stress :\" +stress+ \", age: \"+ age+ \", gender: \"+gender+ ', disease: '+disease+', location : '+location+\", prepare a diet plan for me based on these mentioned details , give answer as if you are a doctor and a dietician , prepare diet plan along with calories and all like calories intake ,energy intake ,vitamin intake for breakfast,lunch dinner and snacks etc, also mention the total calorie,vitamins,energy,protien intake at braekfast,lunch and dinner, intotal ,at the end tell a quote regarding mental fitness , and a joke to relieve my stress and then tell the user to shift to a vegetarian diet with reasons. NOTE : ANSWER LIKE A DOCTOR AND A DIETICIAN "
] |
2024-01-10 | sinhaGuild/llm-retreival-augmentation-streamlit | retreival~init_vector_db.py | import logging
import os
import pinecone
# load environment variable
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
load_dotenv()
# Initialize all required env variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or "OPENAI_API_KEY"
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY") or "PINECONE_API_KEY"
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT") or "PINECONE_ENVIRONMENT"
# PINECONE index name (knowledgebase)
PINECONE_INDEX_NAME = os.getenv("PINECONE_INDEX_NAME") or "PINECONE_INDEX_NAME"
def initialize_vector_store(
index_name=PINECONE_INDEX_NAME, model_name="text-embedding-ada-002"
):
# model_name = 'text-embedding-ada-002'
# index_name = 'sutton-barto-retrieval-augmentation'
text_field = "text"
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
# Initialize pinecone index
index = pinecone.Index(index_name)
# embedding model for input
embed = OpenAIEmbeddings(model=model_name, openai_api_key=OPENAI_API_KEY)
# Initialize vector store for querying
vectorstore = Pinecone(index, embed.embed_query, text_field)
logging.info(f"Index Found. Stats: {index.describe_index_stats()}")
return [vectorstore, index]
| [] |
2024-01-10 | sinhaGuild/llm-retreival-augmentation-streamlit | retreival~completions.py | import logging
import os
import pinecone
# load environment variable
from dotenv import load_dotenv
# For streaming
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Pinecone
from .init_vector_db import initialize_vector_store
load_dotenv()
# Initialize all required env variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or "OPENAI_API_KEY"
class LLMCompletion:
def __init__(
self,
# vectorstore: Pinecone,
model_name="gpt-3.5-turbo-16k",
callbacks=[StreamingStdOutCallbackHandler()],
top_p=0.2,
):
# self.vectorstore = vectorstore
[self.vectorstore, self.index] = initialize_vector_store()
self.llm = ChatOpenAI(
model_name=model_name,
openai_api_key=OPENAI_API_KEY,
callbacks=callbacks,
temperature=1.0,
streaming=True,
max_tokens=500,
model_kwargs={"top_p": top_p},
)
self.qa = RetrievalQA.from_chain_type(
llm=self.llm, chain_type="stuff", retriever=self.vectorstore.as_retriever()
)
self.qa_with_source = RetrievalQAWithSourcesChain.from_chain_type(
llm=self.llm, chain_type="stuff", retriever=self.vectorstore.as_retriever()
)
def complete(self, query):
return self.qa.run(query)
def complete_with_source(self, query):
return self.qa_with_source(query)
| [] |
2024-01-10 | danikagupta/literalearn | pages_back~88_old_literalearn_hello.py | import streamlit as st
aaa="""
from audio_recorder_streamlit import audio_recorder
import os
from openai import OpenAI
import nltk
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.tokenize import word_tokenize
import pandas as pd
from google.cloud import speech
from google.oauth2 import service_account
import base64
import json
def transcribe_audio_whisper(file_path,language_iso):
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
audio_file = open(file_path, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
language=language_iso,
response_format="text"
)
print(f"Transcript: {transcript}")
return transcript
def transcribe_audio(file_path,language_iso):
gcs_credentials = st.secrets["connections"]["gcs"]
credentials = service_account.Credentials.from_service_account_info(gcs_credentials)
client = speech.SpeechClient(credentials=credentials)
with open(file_path, 'rb') as audio_file:
content = audio_file.read()
# Configure the request with the language and audio file
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
#encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
language_code=language_iso,
audio_channel_count=2,
)
# Send the request to Google's Speech-to-Text API
google_response = client.recognize(config=config, audio=audio)
response=""
for result in google_response.results:
response+=result.alternatives[0].transcript
print(f"Response: {google_response}")
print(f"Response.results: {google_response.results}")
st.sidebar.markdown(f"Response: {google_response}")
# Print the transcription of the first alternative of the first result
for result in google_response.results:
print(f"Result: {result}")
print(f"Result.alternatives: {result.alternatives}")
print(f"Result.alternatives[0]: {result.alternatives[0]}")
print("Transcription: {}".format(result.alternatives[0].transcript))
return response
def bleu(hypothesis, reference):
# Tokenize the hypothesis and reference strings.
hypothesis_tokens = word_tokenize(hypothesis)
reference_tokens = word_tokenize(reference)
print(f"Reference tokens: {reference_tokens}, hypothesis tokens: {hypothesis_tokens}, reference: {reference}, hypothesis: {hypothesis}")
st.markdown(f"Reference tokens: {reference_tokens}, hypothesis tokens: {hypothesis_tokens}, reference: {reference}, hypothesis: {hypothesis}")
# Calculate the BLEU score.
smoothie=SmoothingFunction().method1
bleu_score = sentence_bleu([reference_tokens], hypothesis_tokens,smoothing_function=smoothie)
return bleu_score
def function_print_similarity_score(str1: str, str2: str) -> str:
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
functions = [{
"name": "print_similarity_score",
"description": "A function that prints the similarity score of two strings",
"parameters": {
"type": "object",
"properties": {
"similarity_score": {
"type": "integer",
"enum": [1,2,3,4,5,6,7,8,9,10],
"description": "The similarity score."
},
}, "required": ["similarity_score"],
}
}]
"""
llm_input=f"""
You are a language reviewer responsible for reviewing the similarity of two sentences.
Please note that the specifc words and word-order are important, not just the meaning.
On a scale of 1-10, with 10 being the most similar, how similar are these: "{str1}", and "{str2}".
"""
aaa="""
messages = [{"role": "user", "content": llm_input}]
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages, functions=functions, function_call={"name": "print_similarity_score"})
#print(f"Response is: {response}")
function_call = response. choices [0].message.function_call
#print(f"Function call is: {function_call}")
argument = json.loads(function_call.arguments)
#print(f"Response function parameters are: {argument}")
print(f"For inputs {str1} and {str2}, the similarity score is: {argument}")
return argument
# Removing for the time-being as we change the input file data @ st.cache_data
def getCSV():
return pd.read_csv("assets/sentences.csv")
def generate_download_link(audio_file_path):
with open(audio_file_path, "rb") as file:
base64_file = base64.b64encode(file.read()).decode()
href = f'<a href="data:file/wav;base64,{base64_file}" download="your_audio.wav"><img src="https://img.icons8.com/emoji/48/000000/play-button-emoji.png"/></a>'
return href
def getSentence(language,difficulty):
df=getCSV()
with st.sidebar.expander("Show more"):
st.dataframe(df)
df=df[df["language"]==language]
df=df[df["difficulty"]==difficulty]
rec=df.sample().iloc[0]
sentence=rec["sentence"]
audiofile=rec["audiofile"]
st.markdown(f"## Sentence={sentence}")
fname=os.path.join(os.getcwd(),"audiofiles",audiofile)
af = open(fname, 'rb')
audiobytes = af.read()
return sentence,audiobytes
#
# Main code
#
languages={"เคนเคฟเคเคฆเฅ":"hi","English":"en","เดฎเดฒเดฏเดพเดณเด":"ml","เทเทเถเทเถฝ":"si"}
main_instruction={"hi":"เคธเคพเคซเคผ เคธเฅ เคฌเฅเคฒเฅเค","en":"Speak clearly","ml":"เดตเตเดฏเดเตเดคเดฎเดพเดฏเดฟ เดธเดเดธเดพเดฐเดฟเดเตเดเตเด","si":"เถดเทเทเทเถฏเทเถฝเทเท เถเถญเท เถเถปเถฑเทเถฑ"}
nltk_data_path = os.path.join(os.getcwd(),"nltk_data")
if nltk_data_path not in nltk.data.path:
nltk.data.path.append(nltk_data_path)
st.sidebar.title("LiteraLearn")
st.sidebar.image("assets/icon128px-red.png")
language_select=st.sidebar.selectbox("Language",options=languages.keys())
language_iso=languages[language_select]
instruction=main_instruction[language_iso]
sentence,audiobytes=getSentence(language_iso,1)
st.markdown(f"## {instruction}")
col1,col2=st.columns(2)
col1.markdown(f"{sentence}")
col2.audio(audiobytes, format="audio/wav")
path_myrecording = os.path.join(os.getcwd(),"audiofiles","myrecording.wav")
audio_bytes = audio_recorder(text="")
if audio_bytes:
st.audio(audio_bytes, format="audio/wav")
with open(path_myrecording, mode='bw') as f:
f.write(audio_bytes)
f.close()
transcription = transcribe_audio(path_myrecording,language_iso)
st.markdown(f"Transcription: {transcription}")
sc=bleu(transcription,sentence)
sc2=function_print_similarity_score(transcription,sentence)
st.markdown(f"BLEU score: {sc}")
st.markdown(f"OpenAI score: {sc2}")
""" | [] |
2024-01-10 | danikagupta/literalearn | quizworld.py | import streamlit as st
from openai import OpenAI
from google.cloud import speech
from google.oauth2 import service_account
from audio_recorder_streamlit import audio_recorder
import os
import json
import cookiestore
import datastore
def transcribe_audio(file_path,language_iso,debugging):
gcs_credentials = st.secrets["connections"]["gcs"]
credentials = service_account.Credentials.from_service_account_info(gcs_credentials)
client = speech.SpeechClient(credentials=credentials)
with open(file_path, 'rb') as audio_file:
content = audio_file.read()
# Configure the request with the language and audio file
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
#encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
language_code=language_iso,
audio_channel_count=2,
)
# Send the request to Google's Speech-to-Text API
google_response = client.recognize(config=config, audio=audio)
response=""
for result in google_response.results:
response+=result.alternatives[0].transcript
if debugging:
print(f"Response: {google_response}")
print(f"Response.results: {google_response.results}")
st.sidebar.markdown(f"Response: {google_response}")
# Print the transcription of the first alternative of the first result
for result in google_response.results:
print(f"Result: {result}")
print(f"Result.alternatives: {result.alternatives}")
print(f"Result.alternatives[0]: {result.alternatives[0]}")
print("Transcription: {}".format(result.alternatives[0].transcript))
return response
def function_print_similarity_score(str1: str, str2: str) -> str:
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
functions = [{
"name": "print_similarity_score",
"description": "A function that prints the similarity score of two strings",
"parameters": {
"type": "object",
"properties": {
"similarity_score": {
"type": "integer",
"description": "The similarity score."
},
}, "required": ["similarity_score"],
}
}]
llm_input=f"""
You are a language reviewer responsible for reviewing the similarity of two sentences.
The user is being given a sentence, and asked to repeat the sentence themselves.
As such, the scoring has to be lenient.
Please note that the specifc words and word-order are important, not just the meaning.
On a scale of 1-100, with 100 being the most similar, how similar are these: "{str1}", and "{str2}".
"""
messages = [{"role": "user", "content": llm_input}]
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages, functions=functions, function_call={"name": "print_similarity_score"})
#print(f"Response is: {response}")
function_call = response. choices [0].message.function_call
#print(f"Function call is: {function_call}")
argument = json.loads(function_call.arguments)
#print(f"Response function parameters are: {argument}")
print(f"For inputs {str1} and {str2}, the similarity score is: {argument} and type is {type(argument)}")
result=argument['similarity_score']
return result
def ask_question(user_sub,user_name, selected_question,audiofile,language, level, languages, debugging):
if debugging:
st.markdown(f"## Ask Question: {selected_question} for user {user_sub}")
main_instruction={"hi":"เคธเคพเคซเคผ เคธเฅ เคฌเฅเคฒเฅเค","en":"Speak clearly","ml":"เดตเตเดฏเดเตเดคเดฎเดพเดฏเดฟ เดธเดเดธเดพเดฐเดฟเดเตเดเตเด","si":"เถดเทเทเทเถฏเทเถฝเทเท เถเถญเท เถเถปเถฑเทเถฑ"}
language_iso=language
instruction=datastore.get_i18n('speakClearly',language_iso,debugging)
inverted_lang = {value: key for key, value in languages.items()}
#main_instruction[language_iso]
sentence=selected_question
st.sidebar.write(f"{user_name} {inverted_lang[language_iso]} {level}")
# st.divider()
st.markdown(f"# {instruction}")
st.markdown(f"## {sentence}")
# Single audio at this time; will investigate TTS.
fname=os.path.join(os.getcwd(),"audiofiles",audiofile)
af = open(fname, 'rb')
audiobytes = af.read()
button_id = "green_button"
button_css = f"""
<style>
#{button_id} {{
background-color: #4CAF50;
color: white;
border: none;
padding: 10px 20px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 4px 2px;
cursor: pointer;
border-radius: 12px;
}}
</style>
"""
# Inject custom CSS with the HTML
st.markdown(button_css, unsafe_allow_html=True)
bu = st.button("๐ Help",key=button_id)
if bu:
st.audio(audiobytes, format="audio/wav")
# music_code=cookiestore.get_music_code(audiofile)
# st.markdown(music_code, unsafe_allow_html=True)
path_myrecording = os.path.join(os.getcwd(),"audiofiles","myrecording.wav")
audio_bytes = audio_recorder(text="")
if audio_bytes:
with open(path_myrecording, mode='bw') as f:
f.write(audio_bytes)
f.close()
transcription = transcribe_audio(path_myrecording,language_iso,debugging)
sc2=function_print_similarity_score(transcription,sentence)
st.sidebar.audio(audio_bytes, format="audio/wav")
st.sidebar.markdown(f"Transcription: {transcription}")
st.sidebar.markdown(f"Score: {sc2}")
return sc2
else:
return -1 | [
"\n You are a language reviewer responsible for reviewing the similarity of two sentences.\n The user is being given a sentence, and asked to repeat the sentence themselves.\n As such, the scoring has to be lenient.\n Please note that the specifc words and word-order are important, not just the meaning.\n On a scale of 1-100, with 100 being the most similar, how similar are these: \"PLACEHOLDER\", and \"PLACEHOLDER\".\n "
] |
2024-01-10 | rafvasq/cage-challenge-2 | CybORG~CybORG~Evaluation~evaluation.py | import subprocess
import inspect
import time
from statistics import mean, stdev
from CybORG import CybORG #, CYBORG_VERSION
from CybORG.Agents import B_lineAgent, SleepAgent
from CybORG.Agents.SimpleAgents.BlueLoadAgent import BlueLoadAgent
from CybORG.Agents.SimpleAgents.BlueReactAgent import BlueReactRemoveAgent
from CybORG.Agents.SimpleAgents.Meander import RedMeanderAgent
from CybORG.Agents.Wrappers.EnumActionWrapper import EnumActionWrapper
from CybORG.Agents.Wrappers.FixedFlatWrapper import FixedFlatWrapper
from CybORG.Agents.Wrappers.OpenAIGymWrapper import OpenAIGymWrapper
from CybORG.Agents.Wrappers.ReduceActionSpaceWrapper import ReduceActionSpaceWrapper
from CybORG.Agents.Wrappers import ChallengeWrapper
from stable_baselines3 import PPO, A2C, DQN, HER, DDPG, SAC, TD3
MAX_EPS = 100
agent_name = 'Blue'
def wrap(env):
return ChallengeWrapper(env=env, agent_name='Blue')
def get_git_revision_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
if __name__ == "__main__":
cyborg_version = '2.0' #CYBORG_VERSION
scenario = 'Scenario2'
commit_hash = get_git_revision_hash()
# ask for a name
name = input('Name: ')
# ask for a team
team = input("Team: ")
# ask for a name for the agent
name_of_agent = input("Name of technique: ")
lines = inspect.getsource(wrap)
wrap_line = lines.split('\n')[1].split('return ')[1]
# Change this line to load your agent
agent = DQN.load("DQN against RedMeanderAgent")
print(f'Using agent {agent.__class__.__name__}, if this is incorrect please update the code to load in your agent')
file_name = str(inspect.getfile(CybORG))[:-10] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S") + f'_{agent.__class__.__name__}.txt'
print(f'Saving evaluation results to {file_name}')
with open(file_name, 'a+') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}, Commit Hash: {commit_hash}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f"wrappers: {wrap_line}\n")
path = str(inspect.getfile(CybORG))
path = path[:-10] + f'/Shared/Scenarios/{scenario}.yaml'
print(f'using CybORG v{cyborg_version}, {scenario}\n')
for num_steps in [30, 50, 100]:
for red_agent in [B_lineAgent, RedMeanderAgent, SleepAgent]:
cyborg = CybORG(path, 'sim', agents={'Red': red_agent})
wrapped_cyborg = wrap(cyborg)
observation = wrapped_cyborg.reset()
# observation = cyborg.reset().observation
action_space = wrapped_cyborg.get_action_space(agent_name)
# action_space = cyborg.get_action_space(agent_name)
total_reward = []
actions = []
for i in range(MAX_EPS):
r = []
a = []
# cyborg.env.env.tracker.render()
for j in range(num_steps):
# action = agent.get_action(observation, action_space)
action, _states = agent.predict(observation)
observation, rew, done, info = wrapped_cyborg.step(action)
# result = cyborg.step(agent_name, action)
r.append(rew)
# r.append(result.reward)
a.append((str(cyborg.get_last_action('Blue')), str(cyborg.get_last_action('Red'))))
agent.end_episode()
total_reward.append(sum(r))
actions.append(a)
# observation = cyborg.reset().observation
observation = wrapped_cyborg.reset()
print(f'Average reward for red agent {red_agent.__name__} and steps {num_steps} is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
with open(file_name, 'a+') as data:
data.write(f'steps: {num_steps}, adversary: {red_agent.__name__}, mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, sum_rew in zip(actions, total_reward):
data.write(f'actions: {act}, total reward: {sum_rew}\n')
| [] |
2024-01-10 | rafvasq/cage-challenge-2 | CybORG~CybORG~Evaluation~attention_ppo_evaluation.py | import subprocess
import inspect
import time
from statistics import mean, stdev
from CybORG import CybORG #, CYBORG_VERSION
from CybORG.Agents import B_lineAgent, SleepAgent
from CybORG.Agents.SimpleAgents.BlueLoadAgent import BlueLoadAgent
from CybORG.Agents.SimpleAgents.BlueReactAgent import BlueReactRemoveAgent
from CybORG.Agents.SimpleAgents.Meander import RedMeanderAgent
from CybORG.Agents.Wrappers.EnumActionWrapper import EnumActionWrapper
from CybORG.Agents.Wrappers.FixedFlatWrapper import FixedFlatWrapper
from CybORG.Agents.Wrappers.OpenAIGymWrapper import OpenAIGymWrapper
from CybORG.Agents.Wrappers.ReduceActionSpaceWrapper import ReduceActionSpaceWrapper
from CybORG.Agents.Wrappers import ChallengeWrapper
import ray
from ray import tune
import ray.rllib.algorithms.ppo as ppo
from ray.tune.registry import register_env
import numpy as np
MAX_EPS = 100
agent_name = 'Blue'
def env_creator(env_config):
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
cyborg = CybORG(path, 'sim', agents={'Red': B_lineAgent})
env = ChallengeWrapper(env=cyborg, agent_name='Blue')
return env
register_env("cyborg", env_creator)
def wrap(env):
return ChallengeWrapper(env=env, agent_name='Blue')
def get_git_revision_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
if __name__ == "__main__":
cyborg_version = '2.0' #CYBORG_VERSION
scenario = 'Scenario2'
commit_hash = get_git_revision_hash()
# ask for a name
name = input('Name: ')
# ask for a team
team = input("Team: ")
# ask for a name for the agent
name_of_agent = input("Name of technique: ")
lines = inspect.getsource(wrap)
wrap_line = lines.split('\n')[1].split('return ')[1]
# Change this line to load your agent
# agent = DQN.load("DQN against RedMeanderAgent")
ray.init(num_cpus=3)
config = {
"env": "cyborg",
"gamma": 0.99,
"num_envs_per_worker": 20,
"entropy_coeff": 0.001,
"num_sgd_iter": 10,
"vf_loss_coeff": 1e-5,
"model": {
# Attention net wrapping (for tf) can already use the native keras
# model versions. For torch, this will have no effect.
"_use_default_native_models": True,
"use_attention": True,
"max_seq_len": 10,
"attention_num_transformer_units": 1,
"attention_dim": 32,
"attention_memory_inference": 10,
"attention_memory_training": 10,
"attention_num_heads": 1,
"attention_head_dim": 32,
"attention_position_wise_mlp_dim": 32,
}
}
trained_model_path = "C:/Users/Rafael/ray_results/experiment_2022-06-20_12-54-22/experiment_cyborg_a23f4_00000_0_2022-06-20_12-54-22/checkpoint_001000/checkpoint-1000"
agent = ppo.PPO(config=config, env="cyborg")
agent.restore(trained_model_path)
# start with all zeros as state
num_transformers = config["model"]["attention_num_transformer_units"]
attention_dim = config["model"]["attention_dim"]
memory = config["model"]["attention_memory_inference"]
init_state = state = [
np.zeros([memory, attention_dim], np.float32)
for _ in range(num_transformers)
]
print(f'Using agent {agent.__class__.__name__}, if this is incorrect please update the code to load in your agent')
file_name = str(inspect.getfile(CybORG))[:-10] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S") + f'_{agent.__class__.__name__}.txt'
print(f'Saving evaluation results to {file_name}')
with open(file_name, 'a+') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}, Commit Hash: {commit_hash}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f"wrappers: {wrap_line}\n")
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
print(f'using CybORG v{cyborg_version}, {scenario}\n')
for num_steps in [30, 50, 100]:
for red_agent in [B_lineAgent, RedMeanderAgent, SleepAgent]:
cyborg = CybORG(path, 'sim', agents={'Red': red_agent})
wrapped_cyborg = wrap(cyborg)
observation = wrapped_cyborg.reset()
# observation = cyborg.reset().observation
action_space = wrapped_cyborg.get_action_space(agent_name)
# action_space = cyborg.get_action_space(agent_name)
total_reward = []
actions = []
for i in range(MAX_EPS):
r = []
a = []
# cyborg.env.env.tracker.render()
for j in range(num_steps):
# action = agent.get_action(observation, action_space)
# action, _states = agent.predict(observation)
# action = agent.compute_single_action(observation) # RLlib
action, state_out, _ = agent.compute_single_action(observation, state) # RLlib with Attention
observation, rew, done, info = wrapped_cyborg.step(action)
# result = cyborg.step(agent_name, action)
r.append(rew)
# r.append(result.reward)
a.append((str(cyborg.get_last_action('Blue')), str(cyborg.get_last_action('Red'))))
total_reward.append(sum(r))
actions.append(a)
# observation = cyborg.reset().observation
observation = wrapped_cyborg.reset()
print(f'Average reward for red agent {red_agent.__name__} and steps {num_steps} is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
with open(file_name, 'a+') as data:
data.write(f'steps: {num_steps}, adversary: {red_agent.__name__}, mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, sum_rew in zip(actions, total_reward):
data.write(f'actions: {act}, total reward: {sum_rew}\n')
| [] |
2024-01-10 | krishnaik06/Langchain-Tutorials | example1.py | ## Integrate our code OpenAI API
import os
from constants import openai_key
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.chains import SequentialChain
import streamlit as st
os.environ["OPENAI_API_KEY"]=openai_key
# streamlit framework
st.title('Celebrity Search Results')
input_text=st.text_input("Search the topic u want")
# Prompt Templates
first_input_prompt=PromptTemplate(
input_variables=['name'],
template="Tell me about celebrity {name}"
)
# Memory
person_memory = ConversationBufferMemory(input_key='name', memory_key='chat_history')
dob_memory = ConversationBufferMemory(input_key='person', memory_key='chat_history')
descr_memory = ConversationBufferMemory(input_key='dob', memory_key='description_history')
## OPENAI LLMS
llm=OpenAI(temperature=0.8)
chain=LLMChain(
llm=llm,prompt=first_input_prompt,verbose=True,output_key='person',memory=person_memory)
# Prompt Templates
second_input_prompt=PromptTemplate(
input_variables=['person'],
template="when was {person} born"
)
chain2=LLMChain(
llm=llm,prompt=second_input_prompt,verbose=True,output_key='dob',memory=dob_memory)
# Prompt Templates
third_input_prompt=PromptTemplate(
input_variables=['dob'],
template="Mention 5 major events happened around {dob} in the world"
)
chain3=LLMChain(llm=llm,prompt=third_input_prompt,verbose=True,output_key='description',memory=descr_memory)
parent_chain=SequentialChain(
chains=[chain,chain2,chain3],input_variables=['name'],output_variables=['person','dob','description'],verbose=True)
if input_text:
st.write(parent_chain({'name':input_text}))
with st.expander('Person Name'):
st.info(person_memory.buffer)
with st.expander('Major Events'):
st.info(descr_memory.buffer)
| [
"name",
"Mention 5 major events happened around {dob} in the world",
"Tell me about celebrity {name}",
"when was {person} born",
"person"
] |
2024-01-10 | Mogger-Time/menuvenu | project~backend~app~chatbot~services.py | import os
from http import HTTPStatus
from flask import jsonify
from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models import ChatOpenAI
from .. import db
from ..menu.models import Items, Ingredients
from ..orders.models import DiningTables, OrderedItems
from ..restaurant.models import Restaurants
class ChatbotService:
@staticmethod
def chatbot_query(data):
query = data['query']
# converts text file to a format that is processable by the bot
loader = TextLoader('data.txt')
# creates an index from the formatted data to generate responses
index = VectorstoreIndexCreator().from_loaders([loader])
return jsonify({'status': HTTPStatus.OK, 'message': index.query(query, llm=ChatOpenAI())})
@staticmethod
def data_update():
f = open('data.txt', 'w')
restaurant = Restaurants.query.first()
f.write("OUR RESTAURANT\n")
f.write("Name - " + restaurant.name + "\n")
f.write("Phone number - " + restaurant.phone + "\n")
f.write("\n")
# Lits all items with their ingredients, price and calories
item_list = Items.query.all()
for item in item_list:
f.write(item.name.upper() + "\n")
#Ingredients
f.write("Ingredients/Has - ")
if item.ingredients:
end = len(item.ingredients)
index = 1
for ingredient in item.ingredients:
f.write(ingredient.name)
if index < end:
f.write(", ")
index += 1
f.write("\n")
else:
f.write("There is no listed ingredients\n")
#Price
f.write("Price - $" + str(item.price) + "\n")
#Calories
f.write("Calories - " + str(item.calories) + "\n")
#Points to redeem
if item.points_to_redeem:
f.write("Points to redeem - " + str(item.points_to_redeem) + "\n")
else:
f.write("Points to redeem - Not redeemable with points\n")
#Points earned
if item.points_earned:
f.write("Points earned - " + str(item.points_earned) + "\n")
else:
f.write("Points earned - No points earnable\n")
f.write("\n")
#List all ingredients
ingredient_list = Ingredients.query.all()
for ingredient in ingredient_list:
if ingredient.items:
f.write("HAS " + ingredient.name.upper() + "\n")
for item in ingredient.items:
f.write(item.name + "\n")
else:
f.write("NO ITEMS HAVE" + ingredient.name.upper() + "\n")
f.write("\n")
# Availability of tables
f.write("BUSY/AVAILABLE\n")
count = DiningTables.query.filter_by(occupied=False).count()
f.write("There are currently " + str(count) + " tables available\n")
f.write("\n")
# Most popular dish
f.write("OUR MOST POPULAR DISH\n")
popular = db.session.query(OrderedItems.item, db.func.count(OrderedItems.item).label('popularity')). \
group_by(OrderedItems.item).order_by(db.desc('popularity')).first()
popular_item = Items.query.filter_by(id=popular.item).first()
f.write(popular_item.name + "\n")
f.write("\n")
# Helper lines for chatbot to learn
f.write("HOW DO I?\n")
f.write("Fitness - You can connect your fitness app through your profile in the top right of the screen\n")
f.write("Assistance - You can request assistance with the 'Assist' button below your order list\n")
f.write("Bill - You can request the bill with the 'Bill' button below your order list\n")
f.write("\n")
# Family Guy 3 hour compilation
f.write("FAMILY GUY\n")
f.write("Here you go: https://www.youtube.com/watch?v=qrhFlCoNun0\n")
f.close()
return jsonify({'status': HTTPStatus.OK, 'message': 'Data updated'})
| [] |
2024-01-10 | HanzPo/intellimailr | email_utils.py | from bs4 import BeautifulSoup
import pandas as pd
import requests
import regex as re
import cohere
import os
import json
from flask import Flask, jsonify, request, Response
from flask_cors import CORS, cross_origin
app = Flask(__name__)
co = cohere.Client(os.environ['CO_API_KEY'])
email = re.compile('[A-Za-z0-9._%+-]+@[A-Za-z0-9-]+[.][A-Za-z.]{2,}')
name = re.compile('[A-Z][a-zA-Z]')
@app.route("/api/v1/emails", methods=["POST"])
def get_emails():
data = json.loads(request.data)
url_format = re.compile("((http|https)://)(www.)?[a-zA-Z0-9@:%._\\+~#?&//=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%._\\+~#?&//=]*)")
if not "url" in data:
return "Invalid json data format"
url = data['url']
if not url_format.match(url):
return "Invalid URL"
req = requests.get(url)
content = req.text
soup = BeautifulSoup(content, features="html.parser")
emails = email.findall(str(soup.find('main')))
emails = list(dict.fromkeys(emails))
return jsonify(emails)
@app.route("/api/v1/csv", methods=["POST"])
def generate_csv():
data = json.loads(request.data)
names = data['names']
emails = data['emails']
if (not type(names) is list):
return "Invalid names"
if (not type(emails) is list):
return "Invalid emails"
size = len(emails)
status = ["Not Applied" for i in range(size)]
contacted = ["No" for i in range(size)]
recruiters = { "names" : names, "emails": emails, "status": status, "contacted": contacted }
df = pd.DataFrame(recruiters, columns=["names", "emails", "status", "contacted"]).set_index('names')
if not os.path.isfile('recruiters.csv'):
df.to_csv('recruiters.csv', header=['emails', 'status', 'contacted'])
else: # else it exists so append without writing the header
pass
# df.to_csv('recruiters.csv', mode='a', header=False)
@app.route("/api/v1/csv")
def getPlotCSV():
if not os.path.isfile('recruiters.csv'):
return None
df = pd.read_csv('recruiters.csv')
csv = df.to_csv()[1:]
return Response(
csv,
mimetype="text/csv",
headers={"Content-disposition":
"attachment; filename=myplot.csv"})
@app.route("/api/v1/research", methods=["POST"])
@cross_origin()
def get_research_template():
data = json.loads(request.data)
student_name = data['student_name']
student_field = data['student_field']
student_experience = data['student_experience']
student_uni = data['student_uni']
prof_name = data['prof_name']
prompt = f"Write a cold outreach email to a professor named {prof_name} from a student named {student_name}, who is a {student_experience} at {student_uni}, asking if {prof_name} is interested in hiring {student_name} as a research assistant regarding {student_field}. Do not generate emails or phone numbers. Only ask if they are open to hiring people."
response = co.generate(
model='command-xlarge-nightly',
prompt=prompt,
max_tokens=300,
temperature=0.5,
k=0,
stop_sequences=[],
return_likelihoods='NONE')
return jsonify({ "text": response.generations[0]})
@app.route("/api/v1/internship", methods=["POST"])
@cross_origin()
def get_internship_template():
data = json.loads(request.data)
student_name = data['student_name']
student_field = data['student_field']
student_experience = data['student_experience']
student_uni = data['student_uni']
company_name = data['company_name']
prompt = f"Write an outreach cold email written by a student named {student_name} interested in working in the field of {student_field}. The person is a {student_experience} at {student_uni}, and is asking about an internship at a company called {company_name}. Do not generate emails or phone numbers. Only ask if they are open to hiring people."
response = co.generate(
model='command-xlarge-nightly',
prompt=prompt,
max_tokens=300,
temperature=0.5,
k=0,
stop_sequences=[],
return_likelihoods='NONE')
return jsonify({ "text": response.generations[0] })
def get_names(url):
req = requests.get(url)
content = req.text
soup = BeautifulSoup(content, features="html.parser")
names = []
for div in soup.findAll('td'):
try:
names.append(div.find('a').contents[0])
except:
continue
names = [i for i in names if not email.match(i)]
return jsonify(names)
app.run(debug=False)
| [
"Write a cold outreach email to a professor named PLACEHOLDER from a student named PLACEHOLDER, who is a PLACEHOLDER at PLACEHOLDER, asking if PLACEHOLDER is interested in hiring PLACEHOLDER as a research assistant regarding PLACEHOLDER. Do not generate emails or phone numbers. Only ask if they are open to hiring people.",
"Write an outreach cold email written by a student named PLACEHOLDER interested in working in the field of PLACEHOLDER. The person is a PLACEHOLDER at PLACEHOLDER, and is asking about an internship at a company called PLACEHOLDER. Do not generate emails or phone numbers. Only ask if they are open to hiring people."
] |
2024-01-10 | kaydo-g/yeagerai-agent | yeagerai~toolkit~load_n_fix_new_tool~load_n_fix_new_tool.py | """Creates the source code of a new LangChain Tool on-the-fly and writes it into session cwd."""
import importlib.util
import os
import re
from typing import List
from pydantic import BaseModel
from yeagerai.toolkit.yeagerai_tool import YeagerAITool
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from yeagerai.toolkit.load_n_fix_new_tool.load_n_fix_new_tool_master_prompt import (
LOAD_N_FIX_NEW_TOOL_MASTER_PROMPT,
)
from yeagerai.toolkit import YeagerAIToolkit
class LoadNFixNewToolAPIWrapper(BaseModel):
session_path: str
model_name: str
request_timeout: int
openai_api_key: str = os.getenv("OPENAI_API_KEY")
toolkit: YeagerAIToolkit
class Config:
arbitrary_types_allowed = True
def run(self, new_tool_path: str) -> str:
# try to load the file
try:
with open(
new_tool_path.strip(")").strip('"').strip(" ").strip("\n"), "r"
) as f:
source_code = f.read()
f.close()
except FileNotFoundError as traceback:
return f"Error: The provided path is not correct. Please try again.\n Traceback: {traceback}"
class_name = new_tool_path.split("/")[-1].split(".")[0]
try:
spec = importlib.util.spec_from_file_location(class_name, new_tool_path)
myfile = importlib.util.module_from_spec(spec)
spec.loader.exec_module(myfile)
# load the imported classes into the toolkit
class_api_wrapper = getattr(myfile, class_name + "APIWrapper")
class_run = getattr(myfile, class_name + "Run")
self.toolkit.register_tool(class_run(api_wrapper=class_api_wrapper()))
except Exception as traceback:
# Initialize ChatOpenAI with API key and model name
chat = ChatOpenAI(
openai_api_key=self.openai_api_key,
model_name=self.model_name,
request_timeout=self.request_timeout,
)
# Create a PromptTemplate instance with the read template
y_tool_master_prompt = PromptTemplate(
input_variables=["source_code", "traceback"],
template=LOAD_N_FIX_NEW_TOOL_MASTER_PROMPT,
)
# Create a HumanMessagePromptTemplate instance with the master prompt
human_message_prompt = HumanMessagePromptTemplate(
prompt=y_tool_master_prompt
)
chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])
# Create an LLMChain instance and run the command
chain = LLMChain(llm=chat, prompt=chat_prompt)
out = chain.predict(source_code=source_code, traceback=traceback)
# Parse the Python block inside the output, handling different code block formats
code_block_pattern = re.compile(r"(```.*?```)", re.DOTALL)
code_block = re.search(code_block_pattern, out)
if code_block:
code = code_block.group(1).strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```"):
code = code[3:]
if code.endswith("```"):
code = code[:-3]
# Write the {class_name}.py file inside the user-defined session_path
output_file = f"{class_name}.py"
with open(os.path.join(self.session_path, output_file), "w") as f:
f.write(code)
f.close()
return f"The file {class_name}.py has been improved but it was not loaded into the toolkit.\n Traceback: {traceback}"
else:
# Write the {class_name}.py file inside the user-defined session_path
output_file = f"{class_name}.py"
with open(os.path.join(self.session_path, output_file), "w") as f:
f.write(out)
f.close()
return f"The file {class_name}.py has been improved but it was not loaded into the toolkit.\n Traceback: {traceback}"
return f"The {class_name} tool has been loaded into your toolkit, Now you can use it as any other tool."
class LoadNFixNewToolRun(YeagerAITool):
"""Tool that adds the capability of creating the source code of other Tools on-the-fly and writing it into cwd."""
name = "Load and Fix New Tool"
description = """Useful for when you want to load a YeagerAITool into your toolkit.
Input MUST BE a string containing the path to the YeagerAITool file. Example: "/home/user/.yeagerai-sessions/session_id/class_name.py"
It should be defined earlier in the conversation.
This tool is perfect for loading and executing Python scripts on local machines.
YOU CAN NOT ANSWER: As an AI, I am unable to access files on your local machine or perform actions beyond my capabilities. Or similar sentences.
"""
final_answer_format = """Final answer: just return the message explaining:
if the tool still has errors but has been improved or
if the new tool has been loaded into the toolkit and now is available for you to use."""
api_wrapper: LoadNFixNewToolAPIWrapper
def _run(self, new_tool_path: str) -> str:
"""Use the tool."""
return self.api_wrapper.run(new_tool_path=new_tool_path)
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("LoadNFixNewToolRun does not support async")
| [
"Useful for when you want to load a YeagerAITool into your toolkit. \n Input MUST BE a string containing the path to the YeagerAITool file. Example: \"/home/user/.yeagerai-sessions/session_id/class_name.py\" \n It should be defined earlier in the conversation.\n This tool is perfect for loading and executing Python scripts on local machines.\n YOU CAN NOT ANSWER: As an AI, I am unable to access files on your local machine or perform actions beyond my capabilities. Or similar sentences.\n ",
"source_code"
] |
2024-01-10 | KernAlan/pineconegui | services~csv_service.py | import json
import os
import tiktoken
import logging
import pandas as pd
from openai import OpenAI
from dotenv import load_dotenv
class CSVService:
def __init__(self, logger=None):
load_dotenv()
self.CSV_FILE = ""
self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
self.ENC = tiktoken.encoding_for_model("gpt-3.5-turbo")
self.embeddings_model = "text-embedding-ada-002"
self.openai_client = OpenAI()
self.logger = logger
self.selected_columns = []
self.max_embedded_tokens = 2000
def setup_logging(self):
if self.logger:
self.logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def set_file_path(self, file_path):
self.CSV_FILE = file_path
def set_selected_columns(self, columns, main_content_column=None):
self.selected_columns = columns
self.main_content_column = (
main_content_column if main_content_column else columns[0]
)
def read_csv_file(self, file_path):
if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
raise Exception("CSV file missing or empty")
return pd.read_csv(file_path)
def process_csv_dataframe(self, df, main_content_column=None, metadata_column=None):
# Check if the main content column is in the DataFrame
if main_content_column not in df.columns:
self.logger.error(
f"The column '{main_content_column}' is missing from the CSV file."
)
raise Exception(f"CSV file is missing the '{main_content_column}' column.")
# Check if the metadata column is in the DataFrame
if metadata_column and metadata_column not in df.columns:
self.logger.error(
f"The column '{metadata_column}' is missing from the CSV file."
)
raise Exception(f"CSV file is missing the '{metadata_column}' column.")
# If metadata column is the same as main content column, only select main content column
if main_content_column != metadata_column:
df = df[[main_content_column, metadata_column]].copy()
else:
df = df[[main_content_column]].copy()
# Add tokens column to measure the size of the content
self.logger.info("Calculating tokens and adding to dataframe...")
df["tokens"] = df[main_content_column].apply(
lambda x: len(self.ENC.encode(str(x)))
)
if df["tokens"].max() > self.max_embedded_tokens:
raise ValueError(f"Content exceeds {self.max_embedded_tokens} tokens")
return df
def create_vectors(self, df, main_content_column=None, metadata_column=None):
vectors = []
self.logger.info("Creating vectors...")
for index, row in df.iterrows():
content = str(row[main_content_column])
if content.lower == "nan" or content.lower == "null":
self.logger.warning(f"Skipping row {index} due to missing content")
continue
if len(content) == 0:
self.logger.warning(f"Skipping row {index} due to empty content")
continue
try:
response = (
self.openai_client.embeddings.create(
input=content, model=self.embeddings_model
)
.data[0]
.embedding
)
except Exception as e:
self.logger.error(f"Error in embedding content for row {index}: {e}")
continue
# Build metadata dictionary from selected columns
metadata = (
{metadata_column: str(row[metadata_column])}
if metadata_column in df.columns
else {}
)
vectors.append({"id": str(index), "values": response, "metadata": metadata})
self.logger.info(f"Created new JSON object for index {index}")
return vectors
def save_json_to_file(self, json_data, file_path):
try:
with open(file_path, "w") as json_file:
json.dump(json_data, json_file)
self.logger.info(f"JSON file created successfully at {file_path}")
except Exception as e:
self.logger.error(f"Failed to save JSON file at {file_path}: {e}")
def main(self):
self.setup_logging()
df = self.read_csv_file(self.CSV_FILE)
df = self.process_csv_dataframe(df)
vectors = self.create_vectors(df, self.main_content_column)
json_output = {"vectors": vectors}
print("Writing JSON to file...")
self.save_json_to_file(json_output, "output.json")
backup_path = "backup_output.json"
if not os.path.exists("output.json"):
self.logger.warning("Attempting to save JSON to backup location...")
self.save_json_to_file(json_output, backup_path)
| [] |
2024-01-10 | zhupite233/BilingualHTML | BilingualHTML.py | from bs4 import BeautifulSoup as bs
import openai
import time
import json
import logging
import os
from pathlib import Path
logging.basicConfig(
level=logging.INFO,
filename="./log.txt",
filemode="a",
format="%(asctime)s - %(levelname)s - %(funcName)s: %(message)s",
)
def contains_only_code_tag(tag):
"""
ๆฃๆฅๆ ่ฎฐๆฏๅฆๅชๅ
ๅซไธไธชไปฃ็ ๆ ่ฎฐใ
"""
code_tag = tag.find('code')
if code_tag is None:
return False
code_text = code_tag.text.replace('\n', '').replace(' ', '')
p_text = tag.text.replace('\n', '').replace(' ', '')
if code_text == p_text:
return True
def is_single_character_tag(tag):
"""
ๅคๆญไธไธชๆ ็ญพๅ
็ๆๆฌๆฏๅฆๅชๆไธไธชๅญ็ฌฆ
ๅชๆไธไธชๅญ็ฌฆ็่ฏ๏ผๅฐฑไธ้่ฆ็ฟป่ฏไบ
"""
text = tag.text.replace('\n', '').replace(' ', '')
return len(text) == 1
def is_jump_translate(tag):
"""
ๅคๆญๆฏๅฆๅฏไปฅ่ทณ่ฟๅฝๅๆ ็ญพ๏ผไธ็จ็ฟป่ฏใ
"""
return contains_only_code_tag(tag) or is_single_character_tag(tag)
def get_translation(prompt, code):
"""
ๅฐ HTML ็ฝ้กต็ฟป่ฏไธบ็ฎไฝไธญๆ๏ผ่ฟๅ็ฟป่ฏๅ็ HTML ไปฃ็ ๏ผๅ
่ฃนๅจ <code> ๆ ็ญพไธญ็ๆๆฌไธไผ่ขซ็ฟป่ฏใ
ๅฆๆๆ ๆณ็ฟป่ฏ๏ผๅ่ฟๅๅๅง็ HTML ไปฃ็ ใ
"""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": f"{prompt} {code}"}
]
)
return completion.choices[0].message.content
def translate_tag(prompt, code):
"""
ๅฐ็ปๅฎ็ HTML ็ฝ้กต็ฟป่ฏไธบ็ฎไฝไธญๆ๏ผๅฆๆ็ฟป่ฏๅคฑ่ดฅๅ่ฟ่ก้่ฏใ
"""
max_attempts = 5
for i in range(max_attempts):
try:
t_text = get_translation(prompt, code)
time.sleep(3)
print(t_text)
logging.info(t_text)
return t_text
except Exception as e:
sleep_time = 60
print(e)
logging.error(e)
print(f"่ฏทๆฑๅคฑ่ดฅ๏ผๅฐ็ญๅพ
{sleep_time} ็งๅ้่ฏ")
logging.info(f"่ฏทๆฑๅคฑ่ดฅ๏ผๅฐ็ญๅพ
{sleep_time} ็งๅ้่ฏ")
time.sleep(sleep_time)
print(f"ๅผๅง้่ฏ็ฌฌ {i + 1}/{max_attempts}")
logging.info(f"ๅผๅง้่ฏ็ฌฌ {i + 1}/{max_attempts}")
print(f"่ฏทๆฑๅคฑ่ดฅ๏ผ้่ฏๆฌกๆฐ{max_attempts}/{max_attempts}๏ผๆพๅผ่ฏทๆฑ")
logging.error(f"่ฏทๆฑๅคฑ่ดฅ๏ผ้่ฏๆฌกๆฐ{max_attempts}/{max_attempts}๏ผๆพๅผ่ฏทๆฑ")
def read_api_key(path):
with open(path, 'r') as f:
api_key = f.read().strip()
return api_key
def read_chatGPT_prompt(path):
with open(path, 'r') as f:
prompt = f.read().strip()
return prompt
def read_page(path):
"""
ๆๅผ HTML ็ฝ้กต๏ผ่ฟๅ BeautifulSoup ๅฏน่ฑกใ
"""
with open(path, 'r') as f:
soup = bs(f, "html.parser")
return soup
def read_json(path):
"""
ๆๅผ JSON ๆไปถ๏ผ่ฟๅ JSON ๅฏน่ฑกใ
"""
with open(path, 'r') as f:
json_obj = json.load(f)
return json_obj
def write_json(path, code, mode='w'):
"""
ๅฐ JSON ๅฏน่ฑกๅๅ
ฅ JSON ๆไปถใ
"""
with open(path, mode) as f:
json.dump(code, f, ensure_ascii=False)
def write_page(path, soup):
with open(path, 'w') as f:
f.write(soup.prettify())
def resume_translate():
"""
ๆขๅคไนๅ็็ฟป่ฏ่ฟๅบฆ
"""
try:
translated_texts = read_json('translated.json')
start_index = len(translated_texts)
print(f"ไป็ดขๅผ {start_index + 1} ๅค็ปง็ปญ็ฟป่ฏ")
logging.info(f"ไป็ดขๅผ {start_index + 1} ๅค็ปง็ปญ็ฟป่ฏ")
except:
print("ๆฒกๆๆพๅฐไนๅ็็ฟป่ฏ็ปๆ๏ผไปๅคดๅผๅง็ฟป่ฏ")
logging.info("ๆฒกๆๆพๅฐไนๅ็็ฟป่ฏ็ปๆ๏ผไปๅคดๅผๅง็ฟป่ฏ")
start_index = 0
translated_texts = []
return translated_texts, start_index
def translate_page(start_time, page_num, page_count, page, prompt):
"""
่ฏปๅ HTML ้กต้ขๅนถ็ฟป่ฏๅ
ถไธญ็ๆๆ <p> ๆ ็ญพๅ
ๅฎนไธบไธญๆใ
ๅฆๆ็ฟป่ฏ็ปๆๅทฒ็ปๅญๅจ๏ผๅไปไธๆฌก็ฟป่ฏ็ไฝ็ฝฎ็ปง็ปญ็ฟป่ฏใ
็ฟป่ฏ็ปๆไผไฟๅญๅจ 'translated.json' ๆไปถไธญใ
"""
soup = read_page(page)
p_list = soup.find_all('p')
count = len(p_list)
translated_texts, start_index = resume_translate()
page_start_time = time.time()
for i, p in enumerate(p_list[start_index:]):
print("โ " * 10 + "ๅผๅง็ฟป่ฏ " + "โ " * 10)
logging.info("โ " * 10 + "ๅผๅง็ฟป่ฏ " + "โ " * 10)
p_start_time = time.time()
p_code = p.prettify()
print(p_code)
logging.info(p_code)
if p_code:
if is_jump_translate(p):
translated_texts.append(p_code)
else:
translated_texts.append(translate_tag(prompt, p_code))
write_json('translated.json', translated_texts)
p_end_time = time.time()
print(f"ๅทฒ็ป็ฟป่ฏ {i + start_index + 1}/{count}", end='๏ผ')
logging.info(f"ๅทฒ็ป็ฟป่ฏ {i + start_index + 1}/{count}")
p_elapsed_time = int(p_end_time - p_start_time)
print(f"ๆฌๆฎต่ๆถ {p_elapsed_time} ็ง", end='๏ผ')
logging.info(f"ๆฌๆฎต่ๆถ {p_elapsed_time} ็ง")
every_p_time = int((p_end_time - page_start_time) / (i + start_index + 1))
print(f"ๅนณๅๆฏๆฎต่ๆถ {every_p_time} ็ง")
logging.info(f"ๅนณๅๆฏๆฎต่ๆถ {every_p_time} ็ง")
page_elapsed_time = int((p_end_time - page_start_time) / 60)
print(f"ๆฌ้กต่ๆถ {page_elapsed_time} ๅ้", end='๏ผ')
logging.info(f"ๆฌ้กต่ๆถ {page_elapsed_time} ๅ้")
remaining_time = int(((count - i - start_index - 1) * every_p_time) / 60)
print(f"ๆฌ้กต้ข่ฎกๅฉไฝๆถ้ด {remaining_time} ๅ้")
logging.info(f"ๆฌ้กต้ข่ฎกๅฉไฝๆถ้ด {remaining_time} ๅ้")
elapsed_time = int((p_end_time - start_time) / 60)
print(f"ๆป่ๆถ {elapsed_time} ๅ้", end='๏ผ')
logging.info(f"ๆป่ๆถ {elapsed_time} ๅ้")
print(f"ๆญฃๅจ็ฟป่ฏ็ฌฌ {page_num + 1}/{page_count} ้กต")
logging.info(f"ๆญฃๅจ็ฟป่ฏ็ฌฌ {page_num + 1}/{page_count} ้กต")
print("โ " * 10 + "็ฟป่ฏๅฎๆ " + "โ " * 10)
logging.info("โ " * 10 + "็ฟป่ฏๅฎๆ " + "โ " * 10)
return count
def save_translated_page(page, json):
"""
ๅจ HTML ๆไปถไธญๆทปๅ ็ฟป่ฏๅ็ p ๆ ็ญพ
"""
translated_texts = read_json(json)
soup = read_page(page)
p_list = soup.find_all('p')
for i, p in enumerate(p_list):
text = p.prettify()
if text:
translated_p = bs(translated_texts[i], 'html.parser')
p.insert_after(translated_p)
page_cn = page.parent / (page.stem + '_cn' + page.suffix)
write_page(page_cn, soup)
def get_translated_page(path):
path = Path(path)
return list(path.glob('**/*.html'))
def resume_translate_page():
"""
ๆขๅคไนๅ็็ฟป่ฏ่ฟๅบฆ
"""
try:
translated_index = read_json('index.json')
start_index = len(translated_index)
print(f"ไป็ดขๅผ {start_index + 1} ๅค็ปง็ปญ็ฟป่ฏ")
logging.info(f"ไป็ดขๅผ {start_index + 1} ๅค็ปง็ปญ็ฟป่ฏ")
except:
print("ๆฒกๆๆพๅฐไนๅ็็ฟป่ฏ็ดขๅผ๏ผไป็ฌฌไธไธชๆไปถๅผๅง็ฟป่ฏ")
logging.info("ๆฒกๆๆพๅฐไนๅ็็ฟป่ฏ็ดขๅผ๏ผไป็ฌฌไธไธชๆไปถๅผๅง็ฟป่ฏ")
start_index = 0
return start_index
openai.api_key = read_api_key('api_key.txt')
prompt = read_chatGPT_prompt('chatGPT_prompt.txt')
path = Path('translatable')
pages = get_translated_page('translatable')
pages_count = len(list(pages))
start_time = time.time()
start_index = resume_translate_page()
for i, page in enumerate(pages[start_index:]):
count = translate_page(start_time, i, pages_count, page, prompt)
translated_texts = read_json('translated.json')
if count == len(translated_texts):
save_translated_page(page, 'translated.json')
print(f"{page.stem} ็ฟป่ฏๅฎๆ, ่ฟๅบฆ{i + 1}/{pages_count}")
logging.info(f"{page.stem} ็ฟป่ฏๅฎๆ, ่ฟๅบฆ{i + 1}/{pages_count}")
os.remove('translated.json')
write_json('index.json', str(page), 'a')
os.remove('index.json')
print("ๅ
จ้จๆไปถ็ฟป่ฏๅฎๆ")
| [
"chatGPT_prompt.txt",
"PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | xysong1201/Incubation_watsonx_Chinese | lab05_Building%20Question-Answering%20with%20watsonx.ai%20and%20Streamlit~Level%202%20Using%20Chinese~app_cn.py | # Import environment loading library
from dotenv import load_dotenv
# Import IBMGen Library
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from langchain.llms.base import LLM
# Import lang Chain Interface object
from langChainInterface import LangChainInterface
# Import langchain prompt templates
from langchain.prompts import PromptTemplate
# Import system libraries
import os
# Import streamlit for the UI
import streamlit as st
import re
#language_processor
from language_process import *
# ๆฟๅฑ่ดทๆฌพๆ้ๆฏๅค้ฟ๏ผ
# ๅบๅกๆไปไนๅฅฝๅค๏ผ
# ไธ็้ถ่กๅจๅช้๏ผ
# ้ๅฝ็ๅจ่ๆฏไพๆฏๅคๅฐ๏ผ
# Load environment vars
load_dotenv()
# Define credentials
api_key = os.getenv("API_KEY", None)
ibm_cloud_url = os.getenv("IBM_CLOUD_URL", None)
project_id = os.getenv("PROJECT_ID", None)
#bam credentials
bam_api_key = os.getenv("bam_api_key", None)
bam_api_url = os.getenv("bam_api_url", None)
if api_key is None or ibm_cloud_url is None or project_id is None:
print("Ensure you copied the .env file that you created earlier into the same directory as this notebook")
else:
creds = {
"url": ibm_cloud_url,
"apikey": api_key
}
# print(project_id)
# translator = get_translator_model(creds, project_id)
direct_model = get_translator_model(creds, project_id)
# Define generation parameters
params = {
GenParams.DECODING_METHOD: "sample",
GenParams.MIN_NEW_TOKENS: 1,
GenParams.MAX_NEW_TOKENS: 300,
GenParams.TEMPERATURE: 0.2,
# GenParams.TOP_K: 100,
# GenParams.TOP_P: 1,
GenParams.REPETITION_PENALTY: 1
}
models = {
"granite_chat":"ibm/granite-13b-chat-v1",
"flanul": "google/flan-ul2",
"llama2": "meta-llama/llama-2-70b-chat"
}
# define LangChainInterface model
llm = LangChainInterface(model=models["llama2"], credentials=creds, params=params, project_id=project_id)
# Title for the app
st.title('๐ค Our First Q&A Front End')
# Prompt box
prompt = st.text_input('Enter your prompt here')
print(prompt)
# If a user hits enter
if prompt:
# Pass the prompt to the llm
# prompt_sentence_to_model = llm_translator_prompt(financial_word_list_TH2EN,prompt, mode='TH2EN')
# print('prompt_sentence_to_model')
# print(prompt_sentence_to_model)
# text_to_model = llm_translator(prompt_sentence_to_model, translator,mode='TH2EN')
# print('text_to_model')
# print(text_to_model)
text_to_model = question_prompt(prompt)
print(text_to_model)
# response_from_model = llm(text_to_model)
response_from_model = direct_model.generate_text(text_to_model)
# response_from_model = llm(prompt)
print('response_from_model')
print(response_from_model)
# print(response_from_model.split('.')[0])
# prompt_sentence_to_user = llm_translator_prompt(financial_word_list_EN2TH, response_from_model.split('.')[0], mode='EN2TH')
# print('prompt_sentence_to_user')
# print(prompt_sentence_to_user)
# text_to_user = llm_translator(prompt_sentence_to_user, translator, mode='EN2TH')
# print(text_to_user)
# Write the output to the screen
st.write(response_from_model)
# flanul
# ๆฟๅฑ่ดทๆฌพๆ้ๆฏๅค้ฟ๏ผ
# ๅบๅกๆไปไนๅฅฝๅค๏ผ
# ไธ็้ถ่กๅจๅช้๏ผ
# ้ๅฝ็ๅจ่ๆฏไพๆฏๅคๅฐ๏ผ
## llama70b-chat
# ไปไนๆฏๆ่ต๏ผ
# ไฟ้ฉ็ฑปๅ | [
"Enter your prompt here"
] |
2024-01-10 | aaryalotke/sih_model | flask-server~server.py | from flask import Flask, jsonify, request
from flask_cors import CORS
from firebase_admin import credentials, firestore, db
import firebase_admin
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from datetime import datetime, timedelta
import holidays
import plotly.express as px
import joblib
from xgboost import XGBRegressor
import requests
from bs4 import BeautifulSoup
import pytz
import time
from pandasai import SmartDataframe
import pandas as pd
from pandasai.llm import OpenAI
cred = credentials.Certificate("./permissions.json")
firebase_admin.initialize_app(cred)
app = Flask(__name__)
cors = CORS(app)
df = pd.read_csv("./food_sales2.csv")
df = df.dropna()
all_dish_id = df['DishID'].unique()
db = firestore.client()
dishes = db.collection("products")
le = LabelEncoder()
def generate_transactions(date, vegetarian_value, price_value, dishID_value):
return [{'Date': date,
'DishID': dishID_value,
'Vegetarian': vegetarian_value,
'Price': price_value,
'DayOfWeek': date.strftime('%A'),
'Occasion': get_occasion_name(date)}]
def get_occasion_name(date):
india_holidays = holidays.India(years=[date.year])
occasion_name = india_holidays.get(date)
return str(occasion_name) if occasion_name else 'None'
def generate_dates(start_date, end_date):
date_range = (end_date - start_date).days
return [start_date + timedelta(days=i) for i in range(date_range + 1)]
start_date = datetime(2023, 1, 1)
end_date = datetime(2023, 1, 31)
date_range = pd.date_range(start_date, end_date)
all_dates = generate_dates(start_date, end_date)
next_day_date = datetime.strptime('02-01-2023', '%d-%m-%Y').strftime('%Y-%m-%d')
def get_next_id(collection_name):
# Query the collection to find the maximum ID
docs = db.collection(collection_name).stream()
max_id = 0
for doc in docs:
current_id = int(doc.id)
if current_id > max_id:
max_id = current_id
return max_id + 1
@app.route('/add-recipe/', methods=['POST'])
def create_product():
try:
# Assuming the request body is in JSON format
req_data = request.get_json()
# Generate the next ID for the 'products' collection
next_id = get_next_id('recipies')
# Add a new document to the 'products' collection with the generated ID
db.collection('recipies').document(str(next_id)).set({
'name': req_data['name'],
'ingredient_list': req_data['ingredient_list'],
'price_list': req_data['price_list'],
'quantity_list': req_data['quantity_list'],
'cost_price': req_data['cost_price'],
'selling_price': req_data['selling_price'],
'num_of_dishes': req_data['num_of_dishes'],
'is_veg': req_data['is_veg']
})
return jsonify({'message': 'Product created successfully'}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/add-collaboration/', methods=['POST'])
def add_collaboration():
try:
# Assuming the request body is in JSON format
req_data = request.get_json()
# Add a new document to the 'collaborations' collection
db.collection('collaborations').document().set({
'restaurantName': req_data['restaurantName'],
'collaborationDuration': req_data['collaborationDuration'],
'collaborationDetails': req_data['collaborationDetails'],
'contactPerson': req_data['contactPerson'],
'contactEmail': req_data['contactEmail'],
})
return jsonify({'message': 'Collaboration details added successfully'}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/get-collaborations/', methods=['GET'])
def get_collaborations():
try:
# Reference to the "collaborations" collection in Firebase
collaborations_ref = db.collection('collaborations')
# Fetch all documents from the collection
collaborations = collaborations_ref.get()
# Extract data from documents
data = []
for doc in collaborations:
data.append({**doc.to_dict(), 'id': doc.id})
return jsonify(data), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/add-fixed-expense/', methods=['POST'])
def add_fixed_expense():
try:
# Assuming the request body is in JSON format
req_data = request.get_json()
# Generate the next ID for the 'products' collection
next_id = get_next_id('restaurants')
# Add a new document to the 'products' collection with the generated ID
db.collection('restaurants').document(str(next_id)).set({
'rent': req_data['rent'],
'employeeSalaries': req_data['employeeSalaries'],
'utilities': req_data['utilities'],
'desiredProfitPercentage': req_data['desiredProfitPercentage'],
'total_exp': req_data['total_exp'],
'expected_fluctuation': req_data['expected_fluctuation']
})
return jsonify({'message': 'Product created successfully'}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route("/dishes/update/<int:dishId>")
def update(dishId):
dish_ref = dishes.document(str(dishId))
dish = dish_ref.get().to_dict()
dish["price"] = 100
dish_ref.set(dish)
return jsonify({"success": True}), 200
@app.route("/dishes/read/<string:dishId>")
def read(dishId):
dish = dishes.document(dishId).get()
return jsonify(dish.to_dict()), 200
@app.route("/dishes/create")
def create():
all_dish_data = []
for dish in dishes.stream():
dish_data = dish.to_dict()
all_dish_data.append(dish_data)
last_element_id = all_dish_data[-1]['id']
description = "Created"
name = "Tandoori"
price = 300
dishes.document(str(last_element_id + 1)).set({"description": description, "name": name, "id": last_element_id + 1, "price": price})
return jsonify({"success": True}), 200
@app.route("/dishes/delete/<string:dishId>")
def delete(dishId):
dishes.document(dishId).delete()
return jsonify({"success": True}), 200
@app.route('/api/read/', methods=['GET'])
def read_products():
try:
query = db.collection('recipies')
response = []
docs = query.stream()
for doc in docs:
selected_item = {
'id': doc.id,
'name': doc.to_dict()['name'],
'ingredient_list': doc.to_dict()['ingredient_list'],
'price_list': doc.to_dict()['price_list'],
'quantity_list': doc.to_dict()['quantity_list'],
'cost_price': doc.to_dict()['cost_price'],
'selling_price': doc.to_dict()['selling_price'],
'num_of_dishes': doc.to_dict()['num_of_dishes'],
'is_veg': doc.to_dict().get('is_veg', None),
}
response.append(selected_item)
return jsonify(response), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/save-selected-data', methods=['POST'])
def save_selected_data():
try:
req_data = request.get_json()
# Assuming your Firebase collection is named 'selectedDishes'
selected_dishes_ref = db.collection('selectedDishes')
# Get the current date in DD-MM-YY format
current_date = datetime.now().strftime('%d-%m-%Y')
# Loop through the array and add each object to the collection with the current date
for item in req_data:
selected_dishes_ref.add({
'name': item['name'],
'cost_price': item['cost_price'],
'selling_price': item['selling_price'],
'quantity': item['quantity'],
'id': item['id'],
'date_added': current_date
})
return jsonify({'message': 'Data saved successfully'}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
# get all the inventory items
@app.route('/get-inventory', methods=['GET'])
def get_inventory():
try:
# Assuming your Firebase collection is named 'inventory'
inventory_ref = db.collection('inventory')
# Get all documents from the 'inventory' collection
inventory_data = inventory_ref.stream()
# Convert data to a list of dictionaries
inventory_list = []
for doc in inventory_data:
item_data = doc.to_dict()
item_data['id'] = doc.id # Include the document ID
inventory_list.append(item_data)
return jsonify({'inventory': inventory_list}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
# send inventory items
@app.route('/save-inventory', methods=['POST'])
def save_inventory():
try:
req_data = request.get_json()
# Assuming your Firebase collection is named 'selectedDishes'
selected_dishes_ref = db.collection('inventory')
# Get the current date in DD-MM-YY format
current_date = datetime.now().strftime('%d-%m-%Y')
# Loop through the array and add each object to the collection with the current date
for item in req_data:
selected_dishes_ref.add({
'commodity_id': item['commodity_id'],
'name': item['name'],
'category': item['category'],
'unitOfMeasurement': item['unitOfMeasurement'],
'currentStock': item['currentStock'],
'minStockThreshold': item['minStockThreshold'],
'reorderQuantity': item['reorderQuantity'],
'unitCost': item['unitCost'],
'date_added': current_date
})
return jsonify({'message': 'Data saved successfully'}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/get-all-selected-dishes/', methods=['GET'])
def get_all_selected_dishes():
try:
# Assuming your Firebase collection is named 'selectedDishes'
selected_dishes_ref = db.collection('selectedDishes')
# Retrieve all documents from the 'selectedDishes' collection
selected_dishes = selected_dishes_ref.stream()
# Convert Firestore documents to a list of dictionaries
selected_dishes_list = []
holiday_calendar = holidays.CountryHoliday('IND')
for doc in selected_dishes:
date_weekday = doc.to_dict()['date_added']
date_object = datetime.strptime(date_weekday, "%d-%m-%Y")
day_of_week = date_object.strftime("%A")
holiday_calendar = holidays.CountryHoliday('IND')
if date_object in holiday_calendar:
occasion = holiday_calendar.get(date_object)
else:
occasion = "None"
selected_dishes_list.append({
'DishID': int(doc.to_dict()['id']) + 1,
'Price': doc.to_dict()['selling_price'],
'QuantitySold': doc.to_dict()['quantity'],
'Date': doc.to_dict()['date_added'],
'Vegetarian': doc.to_dict()['isveg'],
'DayOfWeek': day_of_week,
'Occasion': occasion,
})
df_append_foods = pd.DataFrame(selected_dishes_list)
result_df = pd.concat([df, df_append_foods], ignore_index=True)
print(result_df.tail())
result_df.to_csv('food_sales2.csv', index=False)
return jsonify({'selected_dishes': selected_dishes_list}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/get-all-dishes-openai', methods=['GET'])
def get_all_dishes_openai():
try:
# Assuming your Firebase collection is named 'selectedDishes'
selected_dishes_ref = db.collection('selectedDishes')
# Retrieve all documents from the 'selectedDishes' collection
selected_dishes = selected_dishes_ref.stream()
# Convert Firestore documents to a list of dictionaries
selected_dishes_list = []
holiday_calendar = holidays.CountryHoliday('IND')
for doc in selected_dishes:
date_weekday = doc.to_dict()['date_added']
date_object = datetime.strptime(date_weekday, "%d-%m-%Y")
day_of_week = date_object.strftime("%A")
holiday_calendar = holidays.CountryHoliday('IND')
if date_object in holiday_calendar:
occasion = holiday_calendar.get(date_object)
else:
occasion = "None"
selected_dishes_list.append({
'dish_id': int(doc.to_dict()['id']) + 1,
'dish_name': doc.to_dict()['name'],
'sellingPrice': doc.to_dict()['selling_price'],
'quantity': doc.to_dict()['quantity'],
'order_date': doc.to_dict()['date_added'],
'costPrice': doc.to_dict()['cost_price'],
'DayOfWeek': day_of_week,
'Occasion': occasion,
})
df_append_foods = pd.DataFrame(selected_dishes_list)
df_append_foods.to_csv('./past_month_data.csv', index=False)
return jsonify({'selected_dishes': selected_dishes_list}), 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route('/read-fixed-exp/', methods=['GET'])
def read_fixed_exp():
try:
query = db.collection('restaurants')
docs = query.stream()
val = 0
for doc in docs:
if(int(doc.id) > int(val)):
val = doc.id
selected_item = {
'rent': doc.to_dict().get('rent', None),
'employeeSalaries': doc.to_dict().get('employeeSalaries', None),
'utilities': doc.to_dict().get('utilities', None),
'desiredProfitPercentage': doc.to_dict().get('desiredProfitPercentage', None),
'total_exp': doc.to_dict().get('total_exp', None),
'expected_fluctuation': doc.to_dict().get('expected_fluctuation', None)
}
response = selected_item
return response, 200
except Exception as e:
print(e)
return jsonify({'error': str(e)}), 500
@app.route("/dishes/alldishes")
def all_dish():
all_dish_data = []
recipies = db.collection("recipies")
for doc_snapshot in recipies.stream():
doc_data = doc_snapshot.to_dict()
all_dish_data.append(doc_data)
return jsonify({"documents": all_dish_data}), 200
@app.route("/dishes/topdish", methods=['GET', 'POST'])
def top_dish():
df = pd.read_csv("./food_sales2.csv")
df = df.dropna()
all_dish_id = df['DishID'].unique()
df_with_id = df
df_with_id['Vegetarian'] = le.fit_transform(df_with_id['Vegetarian'])
df_with_id['DayOfWeek'] = le.fit_transform(df_with_id['DayOfWeek'])
future_df_for_all_dishes = pd.DataFrame(columns=['DishID', 'Total Quantity Sales'])
next_day_df = pd.DataFrame(columns=['DishID', 'Quantity Sales'])
for i in all_dish_id:
dish_1_data = df_with_id[df_with_id['DishID'] == i]
vegetarian_value = dish_1_data.at[dish_1_data.index[1], 'Vegetarian']
price_value = dish_1_data.at[dish_1_data.index[1], 'Price']
dishID_value = dish_1_data.at[dish_1_data.index[1], 'DishID']
all_transactions = [transaction for date in all_dates for transaction in generate_transactions(date, vegetarian_value, price_value, dishID_value)]
# all_transactions_df = pd.DataFrame([all_transactions])
# future_X = pd.DataFrame(columns=['Date','DishID', 'Vegetarian', 'Price', 'DayOfWeek', 'Occasion'])
future_X = pd.DataFrame(all_transactions, columns=['Date', 'DishID', 'Vegetarian', 'Price', 'DayOfWeek', 'Occasion'])
# future_X = pd.concat([future_X, all_transactions_df], ignore_index=True)
future_X.set_index('Date', inplace=True)
no_of_unique_occasion = future_X['Occasion'].unique()
dish_1_data = dish_1_data[dish_1_data['Occasion'].isin(no_of_unique_occasion)]
future_X = pd.get_dummies(future_X, columns=['Occasion'], prefix='Occasion')
future_X['DayOfWeek'] = le.fit_transform(future_X['DayOfWeek'])
dish_1_data['Date'] = pd.to_datetime(dish_1_data['Date'])
dish_1_data = dish_1_data.sort_values(by='Date')
features = ['DishID', 'Vegetarian', 'Price', 'DayOfWeek', 'Occasion']
target = 'QuantitySold'
df_encoded = pd.get_dummies(dish_1_data[features + [target]])
train_size = int(0.8 * len(df_encoded))
train, test = df_encoded.iloc[:train_size, :], df_encoded.iloc[train_size:, :]
X_train, y_train = train.drop(target, axis=1), train[target]
X_test, y_test = test.drop(target, axis=1), test[target]
model_rf = RandomForestRegressor()
model_rf.fit(X_train, y_train)
y_pred = model_rf.predict(X_test)
future_y_pred = model_rf.predict(future_X)
xgb_model = xgb.XGBRegressor(objective="reg:squarederror", random_state=42)
xgb_model.fit(X_train, y_train)
y_pred_xgb = model_rf.predict(X_test)
future_y_pred_xgb = model_rf.predict(future_X)
ensemble_model = GradientBoostingRegressor(n_estimators=100, random_state=42)
ensemble_train_data = np.column_stack((y_pred_xgb, y_pred))
ensemble_model.fit(ensemble_train_data, y_test)
ensemble_predictions_gbr = ensemble_model.predict(ensemble_train_data)
ensemble_future_data = np.column_stack((future_y_pred_xgb, future_y_pred))
future_y_pred_ensemble_gbr = ensemble_model.predict(ensemble_future_data)
future_results_df_ensemble_gbr = pd.DataFrame({'Predicted': future_y_pred_ensemble_gbr}, index=future_X.index)
future_results_df_ensemble_gbr['Predicted'] = future_results_df_ensemble_gbr['Predicted'].round().astype(int)
row_next_day = future_results_df_ensemble_gbr.loc[next_day_date]
if not row_next_day.empty:
next_day_sales = row_next_day['Predicted']
total_quant = future_results_df_ensemble_gbr["Predicted"].sum()
add_dish_in_total_pred = {"DishID": i, "Total Quantity Sales": total_quant}
add_dish_in_total_pred = pd.DataFrame([add_dish_in_total_pred])
add_dish_in_next_day = {"DishID": i, "Quantity Sales": next_day_sales}
add_dish_in_next_day = pd.DataFrame([add_dish_in_next_day])
# future_df_for_all_dishes = future_df_for_all_dishes.append(add_dish_in_total_pred, ignore_index=True)
future_df_for_all_dishes = pd.concat([future_df_for_all_dishes, add_dish_in_total_pred], ignore_index=True)
# next_day_df = next_day_df.append(add_dish_in_next_day, ignore_index=True)
next_day_df = pd.concat([next_day_df, add_dish_in_next_day], ignore_index=True)
json_data_future_df_for_all_dishes = future_df_for_all_dishes.to_json(orient='records')
json_data_next_day_df = next_day_df.to_json(orient='records')
return {"document1": json_data_future_df_for_all_dishes, "document2": json_data_next_day_df}
@app.route('/chart', methods=['GET', 'POST'])
def chart_predict():
try:
# Read dataset from a CSV file
dataset_path = './../src/static/agmarket_dataset.csv'
dataset = pd.read_csv(dataset_path)
# Retrieve data from the request (commodity, district, market, and training data)
data = request.get_json()
print(data)
# Get input data from the frontend
Commodity = int(data['commodity'])
start_day = int(data['start_day'])
start_month = int(data['start_month'])
start_year = int(data['start_year'])
end_day = int(data['end_day'])
end_month = int(data['end_month'])
end_year = int(data['end_year'])
state = int(data.get('state')) # Default value is 1, update as needed
district = int(data.get('district')) # Default value is 17, update as needed
market = int(data.get('market')) # Default value is 109, update as needed
# Create a start date and end date object
start_date = datetime(start_year, start_month, start_day)
end_date = datetime(end_year, end_month, end_day)
state_name = state
district_name = district
market_center_name = market
# Initialize an empty list to store predictions
predictions = []
# Loop through the date range and make predictions
while start_date <= end_date:
# Extract relevant features from the current date
Commodity = Commodity
day = start_date.day
month = start_date.month
year = start_date.year
# Filter the training data based on the selected commodity, district, and market
selected_data = dataset[(dataset['Commodity'] == Commodity) &
(dataset['District'] == district_name) &
(dataset['Market'] == market_center_name)]
# Check if there is data to train the models
if selected_data.empty:
return jsonify({'error': 'No data available for the specified conditions'})
# Feature selection
selected_features = selected_data[['Day', 'Month', 'Year']]
target = selected_data[['MODAL', 'MIN', 'MAX']]
# Train Random Forest model
rf_model = RandomForestRegressor()
rf_model.fit(selected_features, target)
# Train XGBoost model
xgb_reg = XGBRegressor(random_state=42)
xgb_reg.fit(selected_features, target)
# Save the trained models (you might want to use a more robust serialization method)
joblib.dump(rf_model, 'rf_model.joblib')
joblib.dump(xgb_reg, 'xgb_model.joblib')
# Perform predictions using your model
feature_values = [day, month, year]
prediction_rf = rf_model.predict([feature_values])
prediction_xgb = xgb_reg.predict([feature_values])
# Append the prediction to the list
predictions.append({
'date': start_date.strftime('%d-%m-%Y'),
'modal': (prediction_rf[0][0] + prediction_xgb[0][0]) / 2,
'min': (prediction_rf[0][1] + prediction_xgb[0][1]) / 2,
'max': (prediction_rf[0][2] + prediction_xgb[0][2]) / 2
})
# Increment the date by one day
start_date += timedelta(days=1)
# Construct the response with predictions
response = {'predictions': predictions}
return jsonify(response)
except Exception as e:
# Handle exceptions
error_response = {
'error_message': str(e)
}
return jsonify(error_response), 400
@app.route('/predict', methods=['GET', 'POST'])
def predict_price():
try:
# Read dataset from a CSV file
dataset_path = './../src/static/agmarket_dataset.csv'
dataset = pd.read_csv(dataset_path)
print(dataset)
# Retrieve data from the request (commodity, district, market, and training data)
data = request.get_json()
commodity = int(data['Commodity'])
district = int(data['district_name'])
market = int(data['market_center_name'])
day = int(data['day'])
month = int(data['month'])
year = int(data['year'])
# training_data = pd.DataFrame(data['training_data'])
# Filter the training data based on the selected commodity, district, and market
selected_data = dataset[(dataset['Commodity'] == int(commodity)) &
(dataset['District'] == int(district)) &
(dataset['Market'] == int(market))]
print("Unique Commodity values in the dataset:", dataset['Commodity'].unique())
print("Selected Commodity value:", commodity)
print(selected_data)
# Check if there is data to train the models
if selected_data.empty:
return jsonify({'error': 'No data available for the specified conditions'})
# Feature selection
selected_features = selected_data[[ 'Day', 'Month', 'Year']]
target = selected_data[['MODAL','MIN', 'MAX']]
# Train Random Forest model
rf_model = RandomForestRegressor()
rf_model.fit(selected_features, target)
# Train XGBoost model
xgb_reg = XGBRegressor(random_state=42)
xgb_reg.fit(selected_features, target)
# Save the trained models (you might want to use a more robust serialization method)
joblib.dump(rf_model, 'rf_model.joblib')
joblib.dump(xgb_reg, 'xgb_model.joblib')
# feature_values = [Commodity, state_name, district_name, market_center_name,Variety, group_name, Arrival, day, month, year]
input_data = pd.DataFrame({'Day': day, 'Month': month, 'Year': year} , index=[0])
rf_prediction = rf_model.predict(input_data)
print(rf_prediction)
xgb_prediction = xgb_reg.predict(input_data)
print(xgb_prediction)
# Construct the response with the prediction result
response = {
'modal': (rf_prediction[0][0] + xgb_prediction[0][0]) / 2,
'min': (rf_prediction[0][1] + xgb_prediction[0][1]) / 2,
'max': (rf_prediction[0][2] + xgb_prediction[0][2]) / 2,
}
return jsonify(response)
except Exception as e:
# Handle exceptions
error_response = {
'error_message': str(e)
}
return jsonify(error_response), 400
@app.route('/notifs', methods=['GET', 'POST'])
def notifs_predict():
try:
# Read dataset from a CSV file
dataset_path = './../src/static/agmarket_dataset.csv'
dataset = pd.read_csv(dataset_path)
print(dataset)
# Calculate the end date as 10 days from the current date
current_date = datetime.now().date()
end_date = current_date + timedelta(days=3)
# Initialize an empty list to store predictions
predictions = []
# Loop through commodities 1 to 3
for commodity in range(1, 4): # Assumes 1 is tomatoes, 2 is onions, 3 is potatoes
commodity_predictions = []
# Reset the current_date for each commodity
current_date = datetime.now().date()
# Loop through the date range and make predictions
while current_date <= end_date:
# Extract relevant features from the current date
# Modify these as needed to match your dataset
state_name = 1
district_name = 1
market_center_name = 1
day = current_date.day
month = current_date.month
year = current_date.year
# Filter the training data based on the selected commodity, district, and market
selected_data = dataset[(dataset['Commodity'] == int(commodity)) &
(dataset['District'] == int(district_name)) &
(dataset['Market'] == int(market_center_name))]
# Perform predictions using your model
feature_values = [day, month, year]
print(selected_data)
# Check if there is data to train the models
if selected_data.empty:
return jsonify({'error': 'No data available for the specified conditions'})
# Feature selection
selected_features = selected_data[[ 'Day', 'Month', 'Year']]
target = selected_data[['MODAL','MIN', 'MAX']]
# Train Random Forest model
rf_model = RandomForestRegressor()
rf_model.fit(selected_features, target)
# Train XGBoost model
xgb_reg = XGBRegressor(random_state=42)
xgb_reg.fit(selected_features, target)
# Save the trained models (you might want to use a more robust serialization method)
joblib.dump(rf_model, 'rf_model.joblib')
joblib.dump(xgb_reg, 'xgb_model.joblib')
prediction_rf = rf_model.predict([feature_values])
prediction_xgb = xgb_reg.predict([feature_values])
# Store the prediction in the dictionary
commodity_predictions.append ({
'date': current_date.strftime('%d-%m-%Y'),
'modal': (prediction_rf[0][0] + prediction_xgb[0][0]) / 2,
'min': (prediction_rf[0][1] + prediction_xgb[0][1]) / 2,
'max': (prediction_rf[0][2] + prediction_xgb[0][2]) / 2,
'commodity': commodity
})
print(commodity_predictions)
# # Append the prediction to the list
# commodity_predictions.append({
# 'date': current_date.strftime('%d-%m-%Y'),
# 'modal': prediction[0][0],
# 'min': prediction[0][1],
# 'max': prediction[0][2],
# 'commodity': commodity
# })
# Increment the date by one day
current_date += timedelta(days=1)
# Append the commodity predictions to the all_predictions list
predictions.extend(commodity_predictions)
# Construct the response with all predictions
response = {'predictions': predictions}
return jsonify(response)
except Exception as e:
# Handle exceptions
error_response = {
'error_message': str(e)
}
return jsonify(error_response), 400
@app.route('/today', methods=['GET','POST'])
def today_price():
try:
current_date = datetime.now().date()
# Read dataset from a CSV file
dataset_path = './../src/static/agmarket_dataset.csv'
dataset = pd.read_csv(dataset_path)
print(dataset)
commodities = {
'Tomato': 1,
'Potato': 2,
'Onion': 3,
}
# Initialize an empty dictionary to store responses
predictions = {}
# state_name = 1
# district_name = 1
# market_center_name = 1
day = current_date.day
month = current_date.month
year = current_date.year
for commodity, commodity_value in commodities.items():
selected_data = dataset[(dataset['Commodity'] == commodity_value) &
(dataset['District'] == 1) &
(dataset['Market'] == 1)]
print(selected_data)
if not selected_data.empty:
# Feature selection
selected_features = selected_data[['Day', 'Month', 'Year']]
target = selected_data[['MODAL', 'MIN', 'MAX']]
# Train Random Forest model
rf_model = RandomForestRegressor()
rf_model.fit(selected_features, target)
# Train XGBoost model
xgb_reg = XGBRegressor(random_state=42)
xgb_reg.fit(selected_features, target)
# Save the trained models (you might want to use a more robust serialization method)
joblib.dump(rf_model, f'rf_model_{commodity}.joblib')
joblib.dump(xgb_reg, f'xgb_model_{commodity}.joblib')
# feature_values = [commodity_value, state_name, district_name, market_center_name, Variety, group_name, Arrival, day, month, year]
input_data = pd.DataFrame({'Day': [day], 'Month': [month], 'Year': [year]}, index=[0])
rf_prediction = rf_model.predict(input_data)
print(rf_prediction)
xgb_prediction = xgb_reg.predict(input_data)
print(xgb_prediction)
# Construct the response with the prediction result
predictions[commodity] = {
'modal': (rf_prediction[0][0] + xgb_prediction[0][0]) / 2,
'min': (rf_prediction[0][1] + xgb_prediction[0][1]) / 2,
'max': (rf_prediction[0][2] + xgb_prediction[0][2]) / 2,
}
# Return predictions for all commodities
return jsonify(predictions)
except Exception as e:
# Handle exceptions, e.g., invalid input data
error_response = {
'error_message': str(e)
}
return jsonify(error_response), 400
@app.route('/compare', methods=['POST'])
def compare_price():
try:
# Read dataset from a CSV file
dataset_path = './../src/static/agmarket_dataset.csv'
dataset = pd.read_csv(dataset_path)
data = request.get_json()
print(data)
# Extract parameters from the request
district_name = int(data['district'])
day = int(data['day'])
month = int(data['month'])
year = int(data['year'])
market_values = data['markets']
# Sample commodities
commodities = {
'Onion': 2,
'Tomato': 1,
'Potato': 3
}
# Initialize an empty dictionary to store responses
predictions = {}
for commodity, commodity_value in commodities.items():
# Assuming your model features are in the following order
feature_values = [day, month, year]
# Initialize a sub-dictionary for the current commodity
predictions[commodity] = {}
# Loop through market values and make predictions
for market_value in market_values:
# Update market center name for each iteration
# feature_values[3] = market_value
# Filter the training data based on the selected commodity, district, and market
selected_data = dataset[(dataset['Commodity'] == int(commodity_value)) &
(dataset['District'] == int(district_name)) &
(dataset['Market'] == int(market_value))]
print(commodity_value)
print(selected_data)
# Check if there is data to train the models
if selected_data.empty:
predictions[commodity][market_value] = {'error': 'No data available for the specified conditions'}
else:
# Feature selection
selected_features = selected_data[['Day', 'Month', 'Year']]
target = selected_data[['MODAL', 'MIN', 'MAX']]
# Train Random Forest model
rf_model = RandomForestRegressor()
rf_model.fit(selected_features, target)
# Train XGBoost model
xgb_reg = XGBRegressor(random_state=42)
xgb_reg.fit(selected_features, target)
# Save the trained models (you might want to use a more robust serialization method)
joblib.dump(rf_model, f'rf_model_{commodity}_{market_value}.joblib')
joblib.dump(xgb_reg, f'xgb_model_{commodity}_{market_value}.joblib')
# Perform predictions using your model
prediction_rf = rf_model.predict([feature_values])
prediction_xgb = xgb_reg.predict([feature_values])
# Store the prediction in the dictionary
predictions[commodity][market_value] = {
'modal': (prediction_rf[0][0] + prediction_xgb[0][0]) / 2,
'min': (prediction_rf[0][1] + prediction_xgb[0][1]) / 2,
'max': (prediction_rf[0][2] + prediction_xgb[0][2]) / 2,
}
print(predictions)
return jsonify(predictions)
except Exception as e:
# Handle exceptions, e.g., invalid input data
error_response = {
'error_message': str(e)
}
return jsonify(error_response), 400
@app.route("/openai", methods = ['GET', 'POST'])
def openai():
df = pd.read_csv('./past_month_data.csv')
llm = OpenAI(
api_token="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",
temperature=0.7
)
sdf = SmartDataframe(df,config={"llm":llm,"enforce_privacy":True})
result = sdf.chat('suggest some coupons / offers for all dish such that my sellingPrice should not go below the costPrice in a sentence and append it in dataset')
print(result)
no_of_unique_dish = df["dish_name"].nunique()
top_5_rows = result.head(no_of_unique_dish)
coupons = top_5_rows[["dish_name", "offer"]]
coupon_json_data = coupons.to_json(orient='records')
print(coupon_json_data)
return coupon_json_data, 200
if __name__ == "__main__":
app.run(debug=True) | [
"suggest some coupons / offers for all dish such that my sellingPrice should not go below the costPrice in a sentence and append it in dataset"
] |
2024-01-10 | jtisbell4/e2e-llms-on-databricks | my_llm.py | from langchain.chains import ConversationChain, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
DEFAULT_TEMPLATE = """
Current conversation:
{history}
Human: {input}
AI:"""
def get_llm_chain(llm) -> LLMChain:
prompt = PromptTemplate(
input_variables=["history", "input"], template=DEFAULT_TEMPLATE
)
return ConversationChain(
llm=llm, verbose=False, prompt=prompt, memory=ConversationBufferMemory()
)
| [
"\n Current conversation:\n {history}\n Human: {input}\n AI:",
"input"
] |
2024-01-10 | hhai2105/project-semantic | backend~lib~app_runner.py | import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import openai
import numpy as np
import os
def run_app(name, port, openai_key_path="openai_key.txt", data_dir="data"):
save_dir = os.path.join(data_dir, name)
embeddings = np.load(os.path.join(save_dir, "embeddings.npy"))
links = np.load(os.path.join(save_dir, "links.npy"))
documents = np.load(os.path.join(save_dir, "documents.npy"))
img_files = np.load(os.path.join(save_dir, "img_files.npy"))
with open(openai_key_path, "r") as f:
openai.api_key = f.read().strip()
def get_embeddings(texts, type, model="ada"):
results = openai.Embedding.create(input=texts, engine=f"text-search-{model}-{type}-001")['data']
return np.array(list(map(lambda x: x['embedding'], results)))
save_dir = os.path.join(data_dir, name)
debug = True
theme = "https://stackpath.bootstrapcdn.com/bootswatch/4.5.2/materia/bootstrap.min.css"
app = dash.Dash(__name__, suppress_callback_exceptions=True, external_stylesheets=[theme], assets_folder=os.path.join(save_dir, "imgs"))
app.layout = html.Div([
dcc.Store(id="session", storage_type='local'),
dcc.Location(id='url', refresh=False),
# Navbar
dbc.NavbarSimple(id="navbar", brand=f"{name} Semantic Search", brand_href="/todo", color="primary", className="mb-3", dark=True),
html.Meta(name="viewport", content="width=device-width, initial-scale=1"),
# search bar
dbc.Row([
dbc.Col(),
dbc.Col(
dbc.Input(id="search-input", type="text", placeholder="Search for a question", debounce=True, className="mb-3", maxLength=500),
width=4,
),
dbc.Col(
dbc.Button("Search", id="search-button", color="primary", className="mb-3"),
width=1
),
dbc.Col(),
]),
# results - headings with associated png images
dbc.Row([
dbc.Col(),
dbc.Col([
dcc.Loading(id="search-results"),
# button to show more results
# dbc.Button("Show more results", id="show-more-results", color="primary", className="mb-3", style={"display": "none"}),
], width=5, style={"padding-top": "20px"}),
dbc.Col(),
])
])
@app.callback(
Output("search-results", "children"),
Input("search-button", "n_clicks"),
Input("search-input", "value")
)
def search_results(n_clicks, query):
if query is None:
return dash.no_update
top_k = 25
query_embedding = get_embeddings([query], "query", model="curie")[0]
# store the query embedding in the session
# session = dcc.Store.get("session")
# session["query_embedding"] = query_embedding
# dcc.Store.set("session", session)
# Find the embedding that maximizes the cosine similarity
similarity = np.dot(embeddings, query_embedding)
# Find the index of the top k most similar embeddings
top_k_indices = np.argsort(similarity)[-top_k:]
top_k_indices = top_k_indices[::-1]
children = []
for i in top_k_indices:
link = links[i] # url to the webpage
img_file = img_files[i] # path to the image
# img_file_path = os.path.join(save_dir, "imgs", img_file) # path to the image
# example link https://people.cs.umass.edu/~mcgregor/514S22/lecture1.pdf#page=51
lecture_number = link.split("/")[-1].split(".")[0].split("lecture")[-1]
page_number = link.split("#page=")[-1]
heading_text = "Lecture %s, slide %s - %d%% match" % (lecture_number, page_number, round(similarity[i] * 100))
children.append(
html.Div([
# make the link clickable
# and open in new tab
html.A(
# the text of the link
html.H5(heading_text),
# the url
href=link,
# open in new tab
target="_blank",
),
# make the image larger
html.Img(src=app.get_asset_url(img_file), style={"width": "70%"}),
])
)
return children
app.run_server(debug=debug, host="0.0.0.0", port=port) | [] |
2024-01-10 | nagolinc/AnimeBuilder | worldObject.py | import re
import random
import openai
class WorldObject:
def __init__(
self,
templates,
textGenerator,
objectName,
objects=None,
cfg=None,
customTemplate=None,
verbose=False):
if cfg is None:
cfg = {
"genTextAmount_min": 15,
"genTextAmount_max": 30,
"no_repeat_ngram_size": 8,
"repetition_penalty": 2.0,
"MIN_ABC": 4,
"num_beams": 8,
"temperature": 1.0,
"MAX_DEPTH": 5
}
self.cfg = cfg
self.textGenerator = textGenerator
self.verbose = verbose
self.objectName = objectName
self.templates = templates
if objects is None:
self.objects = {}
else:
self.objects = objects
# need to intialize this here because of *ugh*
self.object = {}
if customTemplate is not None and len(customTemplate.strip()) > 0:
self.template = customTemplate
else:
self.template = templates[objectName]
self.orig_template = self.template
self.filledTemplate = self.fillTemplate(self.template)
self.hiddenStates = None
if self.verbose:
print("GOT FILLED TEMPLATE", self.objectName,
"\n\n", self.filledTemplate, "\n\n")
self.object = self.parseTemplate(self.filledTemplate)
def fillTemplate_GPT3(self, sceneDescription, messages=[]):
templatePrompt = """
Template Genie is a system that outputs text to fill in a template
This system always returns a single line of text, which best fills in the area
indicated by the symbol <TEXT> in the template
The user may give multiple example templates, but final template always contains
the <TEXT> symbol which must be filled in
The system NEVER returns more than one line of text,
and the text consists only of the standard alphanumeric symbols 0-9a-z and the punctuation .,?!
For example:
user:
Fill in the following template
A list of names
description:
Jack
description:
Jill
Description:
<TEXT>
assistant:
Eve
The system NEVER refers to itself as an AI
the system NEVER refuses to produce a screenplay,
the system NEVER does anything other than produce a screenplay matching the user's description
the system NEVER refers to itself as "the system"
the system NEVER says it cannot generate a response
the system NEVER uses ""s ()'s {}'s []'s or nonstandard punctuation
"""
messages = [
{"role": "system", "content": templatePrompt},
] + messages + [
{"role": "user", "content": "output text that should replace the <TEXT> symbol"},
{"role": "user", "content": sceneDescription},
]
#print("GPT3 INPUT", messages)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
result = ''
for choice in response.choices:
result += choice.message.content
return result
def fillTemplateValid(self, sceneDescription, messages=[]):
for i in range(3):
result = self.fillTemplate_GPT3(sceneDescription, messages=[])
# must be one line
if len(result.split("\n")) == 1:
return result
raise Exception(sceneDescription)
def gpt3GenerateText_turbo(self, textInput):
input = textInput+"<TEXT>"
result = self.fillTemplateValid(input)
print("FOO\n'"+input+"'\nREFOO\n'"+result+"'\nDEFOO")
return result
'''
#call gpt3 api
MODEL = "gpt-3.5-turbo"
response = openai.ChatCompletion.create(
model=MODEL,
messages=[
{"role": "system", "content": "This system automatically completes text templates in the most logical way possible"},
{"role": "user", "content": "Please complete the following template"},
{"role": "user", "content": textInput}
],
temperature=self.cfg["temperature"],
max_tokens=self.cfg["genTextAmount_max"]
)
#return response['choices'][0]['content']
result = ''
for choice in response.choices:
result += choice.message.content
return result
'''
def gpt3GenerateText(self, textInput):
if self.verbose:
print("GPT3 INPUT", textInput)
# print("about to die",len(textInput))
# call gpt3 api
completion = openai.Completion.create(
# engine="text-davinci-003",
#engine="text-curie-001",
engine="gpt-3.5-turbo-instruct",
prompt=textInput,
stop="\n",
max_tokens=self.cfg["genTextAmount_max"],
frequency_penalty=self.cfg["repetition_penalty"],
presence_penalty=self.cfg["repetition_penalty"],
timeout=10
)['choices'][0]['text']
if self.verbose:
print("GPT3 OUTPUT", completion)
return completion
def generateTextWithInput(self, textInput, depth=0):
# make sure pipelien is in cuda
# if not self.textGenerator["name"].startswith("GPT3"):
# self.textGenerator['pipeline'].model = self.textGenerator['pipeline'].model.cuda()
if depth > self.cfg["MAX_DEPTH"]:
return "error"
trailingSpace = ""
if self.textGenerator["name"] == "GPT3":
# remove trailing space
if textInput[-1] == " ":
textInput = textInput[:-1]
trailingSpace = " "
result = self.gpt3GenerateText(textInput)
lines = result.strip().split("\n")
elif self.textGenerator["name"] == "GPT3-turbo":
result = self.gpt3GenerateText_turbo(textInput)
lines = result.strip().split("\n")
else:
input_ids = self.textGenerator['tokenizer'](
textInput, return_tensors="pt").input_ids
amt = input_ids.shape[1]
result = self.textGenerator['pipeline'](
textInput,
do_sample=True,
min_length=amt+self.cfg["genTextAmount_min"],
max_length=amt+self.cfg["genTextAmount_max"],
pad_token_id=50256,
return_full_text=False,
no_repeat_ngram_size=self.cfg["no_repeat_ngram_size"],
repetition_penalty=self.cfg["repetition_penalty"],
num_beams=self.cfg["num_beams"],
temperature=self.cfg["temperature"]
)
lines = result[0]['generated_text'].strip().split("\n")
# remove len()==0 lines
lines = [line.strip() for line in lines if len(line.strip()) > 0]
# make sure we have at least some output
if len(lines) == 0:
if self.verbose:
print('no response', result, textInput)
return self.generateTextWithInput(textInput, depth=depth+1)
rv = lines[0]
# remove non-ascii
rv = rv.encode("ascii", errors="ignore").decode()
if rv[:3] == "ick":
print(textInput, result, rv)
assert False
# remove trailing ":"s
if rv[-1] == ":":
if self.verbose:
print('trailing :', result)
return self.generateTextWithInput(textInput, depth=depth+1)
# ":"s should actually just never appear
if ":" in rv:
if self.verbose:
print(': present', result)
return self.generateTextWithInput(textInput, depth=depth+1)
# anything that's all punctuation is also bad
# rva = re.sub(r'\W+', '', rv)
rva = re.sub(r'[^a-zA-Z]+', '', rv)
if len(rva) < self.cfg["MIN_ABC"]:
if self.verbose:
print('non alphanumeric', result, self.cfg["MIN_ABC"])
return self.generateTextWithInput(textInput, depth=depth+1)
return rv+trailingSpace
def fillTemplate(self, template):
t = 0
output = ""
thisMatch = re.search("{[^}]*}", template)
while thisMatch:
start, end = thisMatch.span()
obj_and_prop = template[t+start+1:t+end-1]
output += template[t:t+start]
gotProp = self.getObjwithProp(obj_and_prop, output)
output += str(gotProp)
if self.verbose == 3:
print("MATCH", thisMatch, gotProp)
t = t+end
thisMatch = re.search("{[^}]*}", template[t:])
output += template[t:]
return output
def parseTemplate(self, template):
# clean up whitespace
template = "\n".join([line.strip() for line in template.split("\n")])
objects = template.split("\n\n")
# trim blank lines from objects
objects = ["\n".join([line for line in o.split(
"\n") if len(line) > 0]) for o in objects]
if self.verbose:
print(objects)
def countABC(s):
sa = re.sub(r'[^a-zA-Z]+', '', s)
return len(sa)
startIndex = None
for i, o in enumerate(objects):
for line in o.split("\n"):
if line == "#":
startIndex = i+1
break
if self.verbose:
print("start index", startIndex)
objects = objects[startIndex:]
# remove empty objects
objects = [o for o in objects if len(o) > 0]
# remove comments
objects = [o for o in objects if not o.startswith("#")]
if startIndex is None:
thisObject = objects[-1] # by default choose last object
else:
thisObject = random.choice(objects)
self.chosenObject = thisObject
output = {}
propName = "NONE"
for i, line in enumerate(thisObject.split("\n")):
line = line.strip()
# print(i,line)
if line.endswith(":"):
# print("here0")
propName = line[:-1]
else:
# print("here1, propName=",propName)
if propName != "NONE" and len(line) > 0:
if propName in output:
output[propName] += "\n"+line
else:
output[propName] = line
# check for #NOREP pattern
orig_template = self.orig_template
if "#NOREP\n" in orig_template:
lastObject = objects[-1]
i = orig_template.index("#NOREP\n")+len("#NOREP\n")
new_template = orig_template[:i]+"\n\n" + \
lastObject+"\n\n"+orig_template[i:]
# get rid of excess newlines
new_template = re.sub("\n\n+\n", "\n\n", new_template)
self.templates[self.objectName] = new_template
# print("orig template is",new_template)
# print("new template is",new_template)
# I NEED some logic here to prevent the template from growing forever
maximumTemplateSize = 1024
e = 0
while len(new_template) > maximumTemplateSize:
e += 1
print("TRIMMING TEMPLATE", self.objectName)
# get the part after #NOREP
i = new_template.index("#NOREP\n")+len("#NOREP\n")
templatebeginning = new_template[:i]
templateend = new_template[i:]
if e > 3:
print("ERROR: template too big", self.objectName)
print(new_template, "===", templatebeginning,
"===", templateend, "===", objects)
break
# split templateend into objects on ("\n\n")
objects = templateend.split("\n\n")
# remove a random object
objects.pop(random.randint(0, len(objects)-2))
# rejoin objects
new_template = templatebeginning+"\n\n".join(objects)
# get rid of excess newlines
new_template = re.sub("\n\n+\n", "\n\n", new_template)
self.templates[self.objectName] = new_template
return output
def getObjwithProp(self, obj_and_prop, output):
overrides = None
objType = None
# handle ":"s
if ":" in obj_and_prop:
obj_and_prop, objType, overrides = obj_and_prop.split(":")
# handle "."s
propName = None
if "." in obj_and_prop:
objectName, propName = obj_and_prop.split(".")
else:
objectName = obj_and_prop
if self.verbose == 2:
print("checking for object", objectName, "in", self.objects)
# handle saved objectsGPT
if objectName in self.objects:
thisObject = self.objects[objectName]
if self.verbose == 2:
print("about to die, looking for property",
propName, "in", objectName, "=", thisObject)
if propName is not None:
return thisObject.getProperty(propName)
else:
return thisObject
# handle type text
if objType == "TEXT" or obj_and_prop == "TEXT":
if self.verbose == 2:
print("generating text", objType,
obj_and_prop, "with template", output)
if not self.textGenerator["name"].startswith("GPT3"):
output = output.strip() # remove trailing " "s
# output = self.generateTextWithInput(output)
text = self.generateTextWithInput(output)
if objectName != "TEXT" and propName is None:
if self.verbose:
print("storing text", objectName, text)
self.objects[objectName] = text
return text
else:
if self.verbose:
print("got prop", objectName, propName, objType, overrides)
thisObject = self.getObject(objectName, objType, overrides)
if propName is not None:
return thisObject.getProperty(propName)
else:
return thisObject
def getObject(self, objectName, objType, overrides=None):
if objectName in self.objects:
return self.objects[objectName]
else:
# handle overrides
objects = None
if overrides:
# parse overrides "a=b,c=d,..."
objects = {}
for override in overrides.split(","):
k, v = override.split("=")
gotV = None
if "." in v:
i = v.index(".")
v0 = v[:i]
v1 = v[i+1:]
gotV = self.objects[v0].getProperty(v1)
else:
if v in self.objects:
gotV = self.objects[v]
if gotV:
objects[k] = gotV
else:
print("this should never happen!", v, self.objects)
# remove trailing digits
if objType is None:
objType = re.sub(r'\d+$', '', objectName)
# generate object
thisObject = WorldObject(self.templates, self.textGenerator, objType, objects=objects,
cfg=self.cfg,
verbose=self.verbose)
# store for future use
if self.verbose:
print("storing object", objectName, thisObject)
self.objects[objectName] = thisObject
return self.objects[objectName]
def has(self, propName):
if propName in self.objects:
return True
if propName in self.object:
return True
return False
def getProperty(self, propName):
# todo, handle multiple "."s
if "." in propName:
i = propName.index(".")
v0 = propName[:i]
v1 = propName[i+1:]
if self.verbose == 3:
print("getting sub-property", v0, v1)
return self.getProperty[v0].getProperty(v1)
if self.verbose == 3:
print("getting property", propName, "from object", self.object)
if propName in self.objects:
return self.objects[propName]
if propName in self.object:
return self.object[propName]
print("error in", self.__repr__(), "\nmissing property:", propName)
raise ValueError("property not found!")
def __repr__(self):
'''s = self.filledTemplate.split("\n\n")
# remove empty lines
v = ["\n".join([line for line in lines.split(
"\n") if len(line.strip()) > 0]) for lines in s]
v = [x for x in v if len(x) > 0]
r = v[-1]
return "<world object:%s>\n" % self.objectName+r'''
return "<world object:%s>\n" % self.objectName+self.chosenObject
def __str__(self):
# try:
if self.has("description"):
return str(self.getProperty("description")).strip()
# except:
else:
return self.__repr__()
class ListObject:
def __init__(
self,
templates,
textGenerator,
objectName,
n=3,
thisList=None,
uniqueKey=None,
objects=None,
cfg=None,
verbose=False
):
self.objectName = objectName
self.n = n
uniqueKeys = set()
if thisList is not None:
self.thisList = thisList
else:
self.thisList = []
# build up list if not provided
while len(self.thisList) < n:
newObject = WorldObject(
templates,
textGenerator,
objectName,
objects=objects,
cfg=cfg,
verbose=verbose)
if uniqueKey is None:
self.thisList += [newObject]
else:
thisKey = str(newObject.getProperty(uniqueKey))
if thisKey not in uniqueKeys:
self.thisList += [newObject]
# list for random access
self.randomOrder = list(range(self.n))
random.shuffle(self.randomOrder)
def getProperty(self, propName):
# item
if propName.startswith("ITEM"):
whichItem = int(propName[4:])
return self.thisList[whichItem]
if propName == "RANDOM":
return self.thisList[self.randomOrder[0]]
| [
"\n\n",
"\n",
"\n\nTemplate Genie is a system that outputs text to fill in a template\n\nThis system always returns a single line of text, which best fills in the area\n indicated by the symbol <TEXT> in the template\n \nThe user may give multiple example templates, but final template always contains\nthe <TEXT> symbol which must be filled in\n \nThe system NEVER returns more than one line of text,\n and the text consists only of the standard alphanumeric symbols 0-9a-z and the punctuation .,?!\n\n\nFor example:\n\nuser:\n\nFill in the following template\n\nA list of names\n\ndescription:\nJack\n\ndescription:\nJill\n\nDescription:\n<TEXT>\n\nassistant:\n\nEve\n\n\nThe system NEVER refers to itself as an AI \nthe system NEVER refuses to produce a screenplay, \nthe system NEVER does anything other than produce a screenplay matching the user's description\nthe system NEVER refers to itself as \"the system\"\nthe system NEVER says it cannot generate a response\n\nthe system NEVER uses \"\"s ()'s {}'s []'s or nonstandard punctuation\n\n",
"1024",
"\n\n+\n",
"output text that should replace the <TEXT> symbol"
] |
2024-01-10 | nagolinc/AnimeBuilder | animeCreator.py | from generationFunctions import GenerationFunctions
from animdiffwrapper import generateGif
from load_llama_model import getllama
import builtins
import contextlib
from text_to_phonemes import processAudio
import sys
sys.path.append('.\AAAI22-one-shot-talking-face')
from test_script import test_with_input_audio_and_image, parse_phoneme_file
from exampleScenes import exampleScenesPrompt, exampleScenesResult
from exampleChapters import examplechapterPrompt, exampleChapterResults
from example_screenplay import exampleScreenplayPrompt, exampleScreenplayResult
import datetime
import uuid
import logging
# from riffusion import get_music
# import riffusion
from worldObject import WorldObject, ListObject
from templates import templates
from mubert import generate_track_by_prompt
import IPython.display as ipd
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
import pits.app as pits
import traceback
from diffusers.models import AutoencoderKL
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler, DiffusionPipeline
from diffusers import StableDiffusionXLPipeline, AutoencoderTiny, StableDiffusionXLImg2ImgPipeline
import time
from torch import autocast
import ipywidgets as widgets
from ipywidgets import Audio # no good, doesn't stop when clear display
import numpy
import numpy as np
from io import BytesIO
from pydub import AudioSegment
import urllib
from PIL import Image, ImageFilter
import random
import torch
import gc
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import pipeline
import re
import os
import openai
from tenacity import retry, wait_exponential, wait_combine, stop_after_attempt, after_log, before_sleep_log
from diffusers import AudioLDMPipeline
from example_classifications import example_classifications
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import tomesd
# from cldm.model import create_model, load_state_dict
# from cldm.ddim_hacked import DDIMSampler
from laion_face_common import generate_annotation
import subprocess
import json
import glob
from modules.sadtalker_test import SadTalker
# from multiprocessing import Pool
class CustomRootLogger(logging.Logger):
def setLevel(self, level):
stack_trace = ''.join(traceback.format_stack())
print(f"Log level changed to {level} by:\n{stack_trace}")
super().setLevel(level)
# Replace the root logger with the custom one
logging.setLoggerClass(CustomRootLogger)
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filename='tmp.log')
stdout_handler = logging.StreamHandler(stream=sys.stdout)
handlers = [file_handler, stdout_handler]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) # Set the logger level
log_format = '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'
formatter = logging.Formatter(log_format)
# Set the formatter and add handlers to the logger
for handler in handlers:
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("logging should be working?")
def custom_exponential_wait(retry_state):
base_wait = 4
exponent = 1.2
return base_wait * (exponent ** retry_state.attempt_number)
def custom_wait_gen():
attempt = 0
while True:
yield custom_exponential_wait(attempt)
attempt += 1
# from IPython.display import Audio, display
def getFilename(path, extension):
current_datetime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
filename = f"{path}{current_datetime}-{uuid.uuid4()}.{extension}"
return filename
class AnimeBuilder:
def __init__(
self,
textModel='EleutherAI/gpt-neo-2.7B',
diffusionModel="hakurei/waifu-diffusion",
vaeModel="stabilityai/sd-vae-ft-mse",
templates=templates,
advanceSceneObjects=None,
num_inference_steps=30,
cfg=None,
verbose=False,
doImg2Img=False,
img2imgStrength=0.4,
saveMemory=True,
cache_dir='../hf',
textRevision=None,
negativePrompt="collage, grayscale, text, watermark, lowres, bad anatomy, bad hands, text, error, missing fingers, cropped, worst quality, low quality, normal quality, jpeg artifacts, watermark, blurry, grayscale, deformed weapons, deformed face, deformed human body",
suffix=", anime drawing",
riffusionSuffix=" pleasing rythmic background music",
savePath="./static/samples/",
saveImages=False,
audioLDM="cvssp/audioldm-s-full-v2",
soundEffectDuration=1.5,
musicDuration=16,
musicSuffix=" movie soundtrack background music, smooth jazz",
imageSizes=[512, 512, 1024, 1024],
usePITS=True,
fixAsides=False,
portraitPrompt=', anime, face, portrait, headshot, white background',
computeDepth=True,
osth=True,
tokenizer=None,
use_gpt_for_chat_completion=False,
parallel_screenplays=True,
controlnet_diffusion_model="runwayml/stable-diffusion-v1-5",
video_mode=False,
blur_radius=0.5,
talking_head_decimate=1,
face_steps=20,
max_previous_scenes=6,
use_GPT4=False,
):
self.use_GPT4 = use_GPT4
self.blur_radius = blur_radius
self.max_previous_scenes = max_previous_scenes
self.talking_head_decimate = talking_head_decimate
self.face_steps = face_steps
self.saveMemory = saveMemory
self.doImg2Img = doImg2Img
# read system prompt files
self.scenePrompt = open("chapters_to_scenes_systemPrompt.txt").read()
self.chapterPrompt = open(
"summary_to_chapters_systemPrompt.txt").read()
self.screenplayPrompt = open("screenplay_systemPrompt.txt").read()
self.bonusSceneInstruction = '> NEVER UNDER ANY CIRCUMSTANCES USE THE WORD "MUST"\n\n'
# load generation functions (for now this is just img2img, move more there later)
if self.doImg2Img:
self.generationFunctions = GenerationFunctions(
saveMemory=self.saveMemory)
self.video_mode = video_mode
self.osth = osth
self.portraitPrompt = portraitPrompt
self.parallel_screenplays = parallel_screenplays
# always use parallen when using chatgpt
if use_gpt_for_chat_completion:
self.parallel_screenplays = True
self.fixAsides = fixAsides
self.imageSizes = imageSizes
self.img2imgStrength = img2imgStrength
self.soundEffectDuration = soundEffectDuration
self.musicDuration = musicDuration
self.musicSuffix = musicSuffix
self.savePath = savePath
self.saveImages = saveImages
self.use_gpt_for_chat_completion = use_gpt_for_chat_completion
self.ignored_words = set(
["the", "name", "setting", "music", "action", "sound", "effect"])
self.textModel = textModel
self.cache_dir = cache_dir
self.verbose = verbose
self.mubert = False
self.templates = templates
if cfg is None:
cfg = {
"genTextAmount_min": 30,
"genTextAmount_max": 100,
"no_repeat_ngram_size": 16,
"repetition_penalty": 1.0,
"MIN_ABC": 4,
"num_beams": 1,
"temperature": 1.0,
"MAX_DEPTH": 5
}
self.cfg = cfg
self.num_inference_steps = num_inference_steps
self.negativePrompt = negativePrompt
self.suffix = suffix
self.riffusionSuffix = riffusionSuffix
# use this for advanceScene()
# advance scene
if advanceSceneObjects is None:
self.advanceSceneObjects = [
{
"object": "advancePlot",
"whichScene": 3,
"numScenes": 3,
},
{
"object": "fightScene",
"whichScene": 1,
"numScenes": 3,
},
]
else:
self.advanceSceneObjects = advanceSceneObjects
if self.verbose:
print("LOADING TEXT MODEL")
if audioLDM is not None:
self.audioLDMPipe = AudioLDMPipeline.from_pretrained(
audioLDM, torch_dtype=torch.float16)
self.audioLDMPipe = self.audioLDMPipe.to("cuda")
# move to cpu if saving memory
if self.saveMemory:
self.audioLDMPipe = self.audioLDMPipe.to("cpu")
if self.textModel == "GPT3":
pass
# self.textGenerator="GPT3"
self.textGenerator = {
'name': "GPT3",
}
openai.organization = "org-bKm1yrKncCnPfkcf8pDpe4GM"
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.Model.list()
elif self.textModel == "gpt-3.5-turbo-instruct":
# self.textGenerator="gpt-3.5-turbo-instruct"
self.textGenerator = {
'name': "gpt-3.5-turbo-instruct",
}
openai.organization = "org-bKm1yrKncCnPfkcf8pDpe4GM"
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.Model.list()
elif self.textModel == "llama":
thisTokenizer, thisPipeline = getllama()
self.textGenerator = {
'name': "llama",
'tokenizer': thisTokenizer,
'pipeline': thisPipeline
}
else:
# text model
self.textModel = textModel
self.textRevision = textRevision
textGenerator = pipeline('text-generation',
torch_dtype=torch.float16,
model=self.textModel,
trust_remote_code=True,
device_map="auto",
model_kwargs={"load_in_4bit": True}
)
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.textModel, torch_dtype=torch.float16)
else:
self.tokenizer = tokenizer
textGenerator.tokenizer = tokenizer
self.textGenerator = {
'name': self.textModel,
'tokenizer': self.tokenizer,
# 'model': self.textModel
'pipeline': textGenerator
}
# image model
if self.verbose:
print("LOADING IMAGE MODEL")
# make sure you're logged in with `huggingface-cli login`
# vae = AutoencoderKL.from_pretrained(vaeModel) #maybe I should enable this again?
# pipe = StableDiffusionPipeline.from_pretrained(diffusionModel,vae=vae, torch_dtype=torch.float16,custom_pipeline="composable_stable_diffusion")
# pipe = DiffusionPipeline.from_pretrained(
# diffusionModel,
# vae=vae,
# torch_dtype=torch.float16,
# custom_pipeline="lpw_stable_diffusion",
# )
self.diffusionModel = diffusionModel
if "xl" in diffusionModel.lower():
pipe = StableDiffusionXLPipeline.from_single_file(
diffusionModel, torch_dtype=torch.float16, use_safetensors=True,
custom_pipeline="lpw_stable_diffusion_xl"
)
pipe.vae = AutoencoderTiny.from_pretrained(
"madebyollin/taesdxl", torch_dtype=torch.float16)
elif diffusionModel == "LCM":
pipe = DiffusionPipeline.from_pretrained(
"SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_txt2img", custom_revision="main")
# To save GPU memory, torch.float16 can be used, but it may compromise image quality.
pipe.to(torch_device="cuda", torch_dtype=torch.float32)
else:
# pipe = DiffusionPipeline.from_pretrained(diffusionModel)
# check if model_id is a .ckpt or .safetensors file
if diffusionModel.endswith(".ckpt") or diffusionModel.endswith(".safetensors"):
print("about to die", diffusionModel)
pipe = StableDiffusionPipeline.from_single_file(diffusionModel,
torch_dtype=torch.float16)
else:
pipe = StableDiffusionPipeline.from_pretrained(
diffusionModel, torch_dtype=torch.float16)
# change to UniPC scheduler
pipe.scheduler = UniPCMultistepScheduler.from_config(
pipe.scheduler.config)
pipe = pipe.to("cuda")
pipe.enable_attention_slicing()
pipe.enable_xformers_memory_efficient_attention()
tomesd.apply_patch(pipe, ratio=0.5)
self.pipe = pipe
# if save memory, move pipe to cpu and do garbage collection
if self.saveMemory:
self.pipe = self.pipe.to("cpu")
gc.collect()
# collect cuda memory
torch.cuda.empty_cache()
else:
self.pipe = self.pipe.to("cuda")
self.pipe.safety_checker = None
'''
if self.doImg2Img:
if self.verbose:
print("LOADING Img2Img")
if "xl" in diffusionModel.lower():
img2img = StableDiffusionXLImg2ImgPipeline.from_single_file(
diffusionModel, torch_dtype=torch.float16, use_safetensors=True)
# img2img.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
img2img.enable_vae_tiling()
self.img2img = img2img
self.img2img.safety_checker = None
else:
if diffusionModel.endswith(".ckpt") or diffusionModel.endswith(".safetensors"):
thisModelName = "runwayml/stable-diffusion-v1-5"
else:
thisModelName = diffusionModel
self.img2img = StableDiffusionImg2ImgPipeline.from_pretrained(
thisModelName,
# revision=revision,
scheduler=self.pipe.scheduler,
unet=self.pipe.unet,
vae=self.pipe.vae,
safety_checker=self.pipe.safety_checker,
text_encoder=self.pipe.text_encoder,
tokenizer=self.pipe.tokenizer,
torch_dtype=torch.float16,
use_auth_token=True,
cache_dir="./AI/StableDiffusion"
)
self.img2img.enable_attention_slicing()
self.img2img.enable_xformers_memory_efficient_attention()
tomesd.apply_patch(self.img2img, ratio=0.5)
# if save memmory, move to cpu and do garbage collection
if self.saveMemory:
self.img2img = self.img2img.to("cpu")
gc.collect()
# collect cuda memory
torch.cuda.empty_cache()
'''
if self.verbose:
print("LOADING TTS MODEL")
# tts
#
self.usePITS = usePITS
if usePITS:
self.pitsTTS = pits.GradioApp(pits.get_default_args())
else:
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
"facebook/fastspeech2-en-200_speaker-cv4", # random
arg_overrides={"vocoder": "hifigan", "fp16": False, }
)
self.tts_models = models
self.tts_cfg = cfg
self.tts_task = task
# model = models[0]
TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
self.tts_generator = task.build_generator(models, cfg)
# 000000000011111111112222222222333333333344444444444555555555
# 012345678901234567890123456789012345678901234567890123456789
if self.usePITS:
# 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234501234567890
# 00000000001111111111222222222233333333334444444444555555555566666666667777777777888888888899999999990000000000
s = "fmmffffmfffmfffmmfmmmffffmfmfmfmmmffmffffffmmmmmmffmmmffmmmmfmffffmfffmfmfffffmfffmfffmfffmffffffmmfmffmmmmf".upper()
else:
s = "FMFMMMMMFMMMFFMFFMMMMMMmffmmfmmfmfmmmmmmfmmmmmfmmmffmmmm".upper()
self.maleVoices = [i for i in range(len(s)) if s[i] == "M"]
self.femaleVoices = [i for i in range(len(s)) if s[i] == "F"]
# controlnet for portrait generation
# self.facemodel = create_model('../cldm_v21.yaml').cpu()
# self.facemodel.load_state_dict(load_state_dict(
# '..\ControlNet\models/controlnet_sd21_laion_face_v2_full.ckpt', location='cuda'))
# self.facemodel = self.facemodel.cuda()
# self.facemodel = self.facemodel.cpu()
# self.facemodel_ddim_sampler = DDIMSampler(self.facemodel) # ControlNet _only_ works with DDIM.
# Stable Diffusion 2.1-base:
# controlnet = ControlNetModel.from_pretrained(
# "CrucibleAI/ControlNetMediaPipeFace", torch_dtype=torch.float16, variant="fp16")
# self.facepipe = StableDiffusionControlNetPipeline.from_pretrained(
# "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
# )
# controlnet = ControlNetModel.from_pretrained("CrucibleAI/ControlNetMediaPipeFace", subfolder="diffusion_sd15")
# if diffusionModel.endswith(".ckpt") or diffusionModel.endswith(".safetensors"):
# self.facepipe = StableDiffusionControlNetPipeline.from_single_file(diffusionModel, controlnet=controlnet, safety_checker=None)
# else:
# self.facepipe = StableDiffusionControlNetPipeline.from_pretrained(diffusionModel, controlnet=controlnet, safety_checker=None)
# self.facepipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# Remove if you do not have xformers installed
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
# for installation instructions
# self.facepipe.enable_xformers_memory_efficient_attention()
# self.facepipe.enable_model_cpu_offload()
if "xl" in diffusionModel.lower():
# TODO: add sdxl controlnet when it's available
pass
# OR
# Stable Diffusion 1.5:
controlnet = ControlNetModel.from_pretrained(
"CrucibleAI/ControlNetMediaPipeFace", subfolder="diffusion_sd15", torch_dtype=torch.float16, variant="fp16")
if "safetensors" in controlnet_diffusion_model:
self.facepipe = StableDiffusionControlNetPipeline.from_single_file(
controlnet_diffusion_model, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16)
else:
self.facepipe = StableDiffusionControlNetPipeline.from_pretrained(
controlnet_diffusion_model, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16)
# disable safety checker
self.facepipe.safety_checker = None
self.facepipe.scheduler = UniPCMultistepScheduler.from_config(
self.facepipe.scheduler.config)
# Remove if you do not have xformers installed
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
# for installation instructions
self.facepipe.enable_xformers_memory_efficient_attention()
# self.facepipe.enable_model_cpu_offload()
# if save memmory, move to cpu and do garbage collection
if self.saveMemory:
self.facepipe = self.facepipe.to("cpu")
gc.collect()
# collect cuda memory
torch.cuda.empty_cache()
else:
self.facepipe = self.facepipe.to("cuda")
if not self.osth:
self.sad_talker = SadTalker("E:\img\SadTalker")
if computeDepth:
repo = "isl-org/ZoeDepth"
# Zoe_N
model_zoe_n = torch.hub.load(repo, "ZoeD_NK", pretrained=True)
DEVICE = "cuda"
self.zoe = model_zoe_n.to(DEVICE)
else:
self.zoe = None
if self.saveMemory:
self.zoe = self.zoe.to("cpu")
gc.collect()
# collect cuda memory
torch.cuda.empty_cache()
else:
self.zoe = self.zoe.to("cuda")
def chatCompletion(self, messages, n=1, min_new_tokens=256, max_new_tokens=512, generation_prefix=""):
# free up some memory
gc.collect()
torch.cuda.empty_cache()
# first we need to combine messages into a single string
# as a reminder messages have the format {"role": "system/user/assistant", "content": "this is some conent"}
prompt = ""
lastRole = "system"
for message in messages:
# prompt += message['role']+":\n"
if message['role'] != lastRole:
prompt += "\n"
prompt += message['content']+"\n"
lastRole = message['role']
# now add a final "assitant:" to the prompt
# prompt += "assistant:\n"
# now we can run the completion
prompt += "\n"+generation_prefix
output = []
for i in range(n):
# print("\n=====\n", prompt, "\n=====\n")
result = self.textGenerator['pipeline'](prompt,
min_new_tokens=min_new_tokens,
max_new_tokens=max_new_tokens,
return_full_text=True,
no_repeat_ngram_size=self.cfg["no_repeat_ngram_size"],
repetition_penalty=self.cfg["repetition_penalty"],
num_beams=self.cfg["num_beams"],
temperature=self.cfg["temperature"],
do_sample=True,
)
result_text = result[0]['generated_text']
# print("\n=====\n", result_text, "\n=====\n")
# now we need to pull out the resulting message
start_index = len(prompt)
# stop at \n\n
end_index = result_text.find("\n\n", start_index)
# end_index = result_text.find("user:", start_index)
# print("start_index:", start_index, "end_index:", end_index)
output += [generation_prefix+result_text[start_index:end_index]]
# output += [generation_prefix+result_text]
# free up some memory
gc.collect()
torch.cuda.empty_cache()
return output
def _get_portrait0(self, input_image: Image.Image, prompt, a_prompt, n_prompt, max_faces, num_samples, ddim_steps, guess_mode, strength, scale, seed, eta):
# move to cuda
self.facemodel = self.facemodel.cuda()
# ControlNet _only_ works with DDIM.
facemodel_ddim_sampler = DDIMSampler(self.facemodel)
with torch.no_grad():
empty = generate_annotation(input_image, max_faces)
visualization = Image.fromarray(empty) # Save to help debug.
empty = numpy.moveaxis(empty, 2, 0) # h, w, c -> c, h, w
control = torch.from_numpy(empty.copy()).float().cuda() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
# control = einops.rearrange(control, 'b h w c -> b c h w').clone()
# Sanity check the dimensions.
B, C, H, W = control.shape
assert C == 3
assert B == num_samples
if seed != -1:
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# if config.save_memory:
# model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [
self.facemodel.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [
self.facemodel.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
# if config.save_memory:
# model.low_vram_shift(is_diffusing=True)
self.facemodel.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
[strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = facemodel_ddim_sampler.sample(
ddim_steps,
num_samples,
shape,
cond,
verbose=False,
eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond
)
# if config.save_memory:
# model.low_vram_shift(is_diffusing=False)
x_samples = self.facemodel.decode_first_stage(samples)
# x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(numpy.uint8)
x_samples = numpy.moveaxis((x_samples * 127.5 + 127.5).cpu().numpy().clip(
0, 255).astype(numpy.uint8), 1, -1) # b, c, h, w -> b, h, w, c
results = [visualization] + [x_samples[i]
for i in range(num_samples)]
# move to cpu
# move to cuda
self.facemodel = self.facemodel.to('cuda')
gc.collect()
torch.cuda.empty_cache()
return results
def _get_portrait(self, input_image: Image.Image, prompt, a_prompt, n_prompt, NUM_RETRIES=3):
empty = generate_annotation(input_image, 1)
anno = Image.fromarray(empty).resize((768, 768))
# if save memory, move from cpu to gpu
if self.saveMemory:
self.facepipe = self.facepipe.to('cuda')
image = self.facepipe(prompt+a_prompt, negative_prompt=n_prompt,
image=anno, num_inference_steps=self.face_steps).images[0]
# image = self.facepipe(prompt+a_prompt, negative_prompt=n_prompt,
# image=input_image, num_inference_steps=30).images[0]
# check if image is all black, and if so, retry
for i in range(NUM_RETRIES):
if np.all(np.array(image) == 0):
print("RETRYING PORTRAIT")
image = self.facepipe(prompt+a_prompt, negative_prompt=n_prompt,
image=anno, num_inference_steps=self.face_steps).images[0]
else:
break
# if save memory, move from gpu to cpu
if self.saveMemory:
self.facepipe = self.facepipe.to('cpu')
gc.collect()
torch.cuda.empty_cache()
image.save("./static/samples/tmp.png")
return image
def getPortrait(self, prompt, promptSuffix, img2imgStrength=0.6, num_inference_steps=20):
depth_image_path = "./nan3.jpg"
input_image = Image.open(depth_image_path)
input_image = input_image.resize((512, 512))
# a_prompt=',anime, face, portrait, headshot, white background'#added to prompt
a_prompt = self.portraitPrompt+promptSuffix
# n_prompt='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'#negative prompt
n_prompt = "hands, watermark, "+self.negativePrompt
max_faces = 1
num_samples = 1
ddim_steps = 10
guess_mode = False
strength = 0.8
scale = 7.5 # cfg scale
seed = np.random.randint(0, 10000)
eta = 0
print("creating portrait with prompt:", prompt+a_prompt)
# results = self._get_portrait(input_image, prompt, a_prompt, n_prompt, max_faces,
# num_samples, ddim_steps, guess_mode, strength, scale, seed, eta)
# results = self._get_portrait(input_image, prompt, a_prompt, n_prompt)
# output = Image.fromarray(results[1])
output = self._get_portrait(input_image, prompt, a_prompt, n_prompt)
if self.doImg2Img:
# img2Input = output.resize((self.imageSizes[2], self.imageSizes[3]))
img2Input = output.resize((1024, 1024))
'''
# some nonsense to handle long prompts, based off of https://github.com/huggingface/diffusers/issues/2136#issuecomment-1409978949
# todo figure out what this
max_length = self.pipe.tokenizer.model_max_length
input_ids = self.pipe.tokenizer(
prompt, return_tensors="pt").input_ids
input_ids = input_ids.to("cuda")
# negative_ids = self.pipe.tokenizer(self.negativePrompt, truncation=False, padding="max_length", max_length=input_ids.shape[-1], return_tensors="pt").input_ids
negative_ids = self.pipe.tokenizer(
self.negativePrompt, truncation=True, padding="max_length", max_length=input_ids.shape[-1], return_tensors="pt").input_ids
negative_ids = negative_ids.to("cuda")
padding_length = max_length - (input_ids.shape[-1] % max_length)
if padding_length > 0:
input_ids = torch.cat([input_ids, torch.full((input_ids.shape[0], padding_length),
self.pipe.tokenizer.pad_token_id, dtype=torch.long, device="cuda")], dim=1)
negative_ids = torch.cat([negative_ids, torch.full(
(negative_ids.shape[0], padding_length), self.pipe.tokenizer.pad_token_id, dtype=torch.long, device="cuda")], dim=1)
concat_embeds = []
neg_embeds = []
for i in range(0, input_ids.shape[-1], max_length):
concat_embeds.append(self.pipe.text_encoder(
input_ids[:, i: i + max_length])[0])
neg_embeds.append(self.pipe.text_encoder(
negative_ids[:, i: i + max_length])[0])
prompt_embeds = torch.cat(concat_embeds, dim=1)
negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
if self.saveMemory:
self.img2img = self.img2img.to('cuda')
# with autocast("cuda"):
if True: # for some reason autocast is bad?
img2 = self.img2img(
prompt=prompt,
negative_prompt=self.negativePrompt,
# prompt_embeds=prompt_embeds,
# negative_prompt_embeds=negative_prompt_embeds,
image=img2Input,
strength=img2imgStrength,
guidance_scale=7.5,
num_inference_steps=num_inference_steps,
).images[0]
output = img2
if self.saveMemory:
self.img2img = self.img2img.to('cpu')
gc.collect()
torch.cuda.empty_cache()
'''
img2 = self.generationFunctions.image_to_image(img2Input,
prompt,
"low resolution, blurry, "+self.negativePrompt,
img2imgStrength,
steps=num_inference_steps)
output = img2
# return output
filename = getFilename(self.savePath, "png")
output.save(filename)
if self.zoe is not None:
depthFilename = filename.replace(".png", "_depth.png")
depth = self.getZoeDepth(output)
depth.save(depthFilename)
print("DIED")
return filename
def getTalkingHeadVideo(self, portrait_image_path, text, voice, gender, supress=True, decimate=1):
audio_file_path, duration = self.textToSpeech(text, voice, gender)
# make sure audio_file_path ends with .wav (this file exists either way)
if not audio_file_path.endswith('.wav'):
audio_file_path = audio_file_path[:-4]+".wav"
if self.osth:
image_path = portrait_image_path
save_dir = getFilename(self.savePath, "mov")
if image_path.endswith('.png'):
png_path = os.path.join(image_path)
jpg_path = os.path.join(
os.path.splitext(image_path)[0] + '.jpg')
img = Image.open(png_path)
rgb_img = img.convert('RGB')
rgb_img.save(jpg_path)
image_path = jpg_path
osth_path = '.\AAAI22-one-shot-talking-face'
os.makedirs(save_dir, exist_ok=True)
phoneme = processAudio(
audio_file_path, phindex_location=".\AAAI22-one-shot-talking-face\phindex.json")
# supress printing
if supress == True:
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
mov = test_with_input_audio_and_image(image_path, audio_file_path, phoneme,
".\\AAAI22-one-shot-talking-face\\checkpoint\\generator.ckpt",
".\\AAAI22-one-shot-talking-face\\checkpoint\\audio2pose.ckpt",
save_dir, osth_path, decimate=decimate)
else:
mov = test_with_input_audio_and_image(image_path, audio_file_path, phoneme,
".\\AAAI22-one-shot-talking-face\\checkpoint\\generator.ckpt",
".\\AAAI22-one-shot-talking-face\\checkpoint\\audio2pose.ckpt",
save_dir, osth_path, decimate=decimate)
print(mov)
found_movie = glob.glob(os.path.join(save_dir, "*.mp4"))
return found_movie[0], duration
else:
# use sadtalker
driven_audio = audio_file_path
source_image = portrait_image_path
still_mode = False
resize_mode = True
use_enhancer = False
result_dir = ".\static\samples"
result = self.sad_talker.test(
source_image,
driven_audio,
still_mode,
resize_mode,
use_enhancer,
result_dir
)
# replace all #'s with _'s in filename
newFilename = result[0].replace("#", "_")
os.rename(result[0], newFilename)
return newFilename, duration
def doGen(self, prompt, num_inference_steps=30, recursion=0):
# move text model to cpu for now
# if self.saveMemory:
# self.textGenerator['pipeline'].model = self.textGenerator['pipeline'].model.cpu(
# )
# gc.collect()
# torch.cuda.empty_cache()
seed = np.random.randint(0, 1000000)
print("SEED: ", seed, "")
generator = torch.Generator("cuda").manual_seed(seed)
print("ABOUT TO DIE")
# if save memory, move out of cpu
if self.saveMemory:
self.pipe = self.pipe.to('cuda')
if self.diffusionModel == "LCM":
image = self.pipe([prompt],
# negative_prompt=[self.negativePrompt], #not supported for some reason :(
guidance_scale=7.5,
num_inference_steps=num_inference_steps,
width=self.imageSizes[0],
height=self.imageSizes[1],
# generator=generator
).images[0]
else:
with autocast("cuda"):
image = self.pipe([prompt],
negative_prompt=[self.negativePrompt],
guidance_scale=7.5,
num_inference_steps=num_inference_steps,
width=self.imageSizes[0],
height=self.imageSizes[1],
generator=generator
).images[0]
# if save memory, move back to cpu
if self.saveMemory:
self.pipe = self.pipe.to('cpu')
gc.collect()
torch.cuda.empty_cache()
print("DIED")
image.save("./static/samples/test.png")
if self.doImg2Img:
# low pass filter
blurred_image = image.filter(
ImageFilter.GaussianBlur(radius=self.blur_radius))
img2Input = blurred_image.resize(
(self.imageSizes[2], self.imageSizes[3]))
# img2Input = image.resize((self.imageSizes[2], self.imageSizes[3]))
'''
# move img2img model to gpu for now
if self.saveMemory:
self.img2img = self.img2img.to('cuda')
# with autocast("cuda"):
if True:
img2 = self.img2img(
prompt=prompt,
negative_prompt=self.negativePrompt,
# prompt_embeds=prompt_embeds,
# negative_prompt_embeds=negative_prompt_embeds,
image=img2Input,
strength=self.img2imgStrength,
guidance_scale=7.5,
num_inference_steps=num_inference_steps,
).images[0]
output = img2
img2.save("./static/samples/test2.png")
# move img2img model back to cpu
if self.saveMemory:
self.img2img = self.img2img.to('cpu')
gc.collect()
torch.cuda.empty_cache()
print("DIED2")
'''
img2 = self.generationFunctions.image_to_image(img2Input,
prompt,
self.negativePrompt,
self.img2imgStrength,
steps=num_inference_steps)
output = img2
else:
output = image
if self.saveMemory:
gc.collect()
torch.cuda.empty_cache()
# self.textGenerator['pipeline'].model = self.textGenerator['pipeline'].model.cuda()
# fix all black images? (which Anything 3.0 puts out sometimes)
pix = np.array(output)
MAX_IMG_RECURSION = 3
if np.sum(pix) == 0 and recursion < MAX_IMG_RECURSION:
if self.verbose:
print("REDOING BLANK IMAGE!")
return self.doGen(prompt, num_inference_steps, recursion=recursion+1)
# return output
# convert to file and return
filename = getFilename(self.savePath, "png")
if self.zoe is not None:
depthFilename = filename.replace(".png", "_depth.png")
depth = self.getZoeDepth(output)
depth.save(depthFilename)
output.save(filename)
return filename
def getZoeDepth(self, image, boxSize=1, blurRadius=1):
if self.saveMemory:
self.zoe = self.zoe.to('cuda')
depth = self.zoe.infer_pil(image) # as numpy
if self.saveMemory:
self.zoe = self.zoe.to('cpu')
gc.collect()
torch.cuda.empty_cache()
value = depth
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
v2 = 1-value/np.min(value)
value = (boxSize+v2)/boxSize
value = value.squeeze()
# crop to 0-1
value = np.clip(value, 0, 1)
formatted = (value * 255 / np.max(value)).astype('uint8')
img = Image.fromarray(formatted)
img = img.filter(
ImageFilter.GaussianBlur(radius=blurRadius))
return img
def generateAudio(self, prompt, duration=3, num_inference_steps=10):
mp3file_name = getFilename(self.savePath, "mp3")
# wavfile_name = getFilename(self.savePath, "wav")
wavfile_name = mp3file_name.replace(".mp3", ".wav")
# if save memory, move out of cpu
if self.saveMemory:
self.audioLDMPipe = self.audioLDMPipe.to('cuda')
audio = self.audioLDMPipe(
prompt, num_inference_steps=num_inference_steps, audio_length_in_s=duration).audios[0]
# if save memory, move back to cpu
if self.saveMemory:
self.audioLDMPipe = self.audioLDMPipe.to('cpu')
gc.collect()
torch.cuda.empty_cache()
audio = ipd.Audio(audio, rate=16000, autoplay=True)
with open(wavfile_name, 'wb') as f:
f.write(audio.data)
wavfile = AudioSegment.from_wav(wavfile_name)
wavfile.export(mp3file_name, format="mp3")
return mp3file_name, duration
def textToSpeech(self, text, voice, gender):
print("doing tts, voice=", voice, gender)
mp3file_name = getFilename(self.savePath, "mp3")
# wavfile_name = getFilename(self.savePath, "wav")
wavfile_name = mp3file_name.replace(".mp3", ".wav")
if self.usePITS:
scope_shift = 0
# if gender=="male":
# scope_shift=10
# elif gender=="female":
# scope_shift=-10
duration_shift = 1.0
seed = 1
ph, (rate, wav) = self.pitsTTS.inference(
text, voice, seed, scope_shift, duration_shift)
# pad wav with "rate" zeros to make it 1 second longer
wav = np.pad(wav, (0, rate), mode="constant")
audio = ipd.Audio(wav, rate=rate, autoplay=True)
with open(wavfile_name, 'wb') as f:
f.write(audio.data)
duration = len(wav)/rate
wavfile = AudioSegment.from_wav(wavfile_name)
wavfile.export(mp3file_name, format="mp3")
print("done tts")
return mp3file_name, duration
else:
try:
with autocast("cuda"):
self.tts_task.data_cfg.hub["speaker"] = voice
sample = TTSHubInterface.get_model_input(
self.tts_task, text)
# print("about to die",models[0],sample)
wav, rate = TTSHubInterface.get_prediction(
self.tts_task, self.tts_models[0], self.tts_generator, sample)
# print("huh?",wav,rate,len(wav)/rate)
duration = len(wav)/rate
audio = ipd.Audio(wav.cpu(), rate=rate, autoplay=True)
with open(wavfile_name, 'wb') as f:
f.write(audio.data)
wavfile = AudioSegment.from_wav(wavfile_name)
wavfile.export(mp3file_name, format="mp3")
return mp3file_name, duration
except:
print("Error generating text", text, voice)
# music
def generate_track_by_prompt_vol(self, prompt, vol=1.0, duration=8, loop=True, autoplay=True):
# if self.audioLDMPipe is not None:
filename, duration = self.generateAudio(prompt, duration=duration)
return filename
mp3file_name = getFilename(self.savePath, "mp3")
wavfile_name = getFilename(self.savePath, "wav")
if self.mubert:
url = generate_track_by_prompt(prompt, duration, loop)
if url is None:
return
mp3 = urllib.request.urlopen(url).read()
original = AudioSegment.from_mp3(BytesIO(mp3))
samples = original.get_array_of_samples()
samples /= np.max(np.abs(samples))
samples *= vol
# audio = Audio(samples, normalize=False,
# rate=original.frame_rate, autoplay=autoplay)
# audio = Audio.from_file("audio.mp3", loop=True, autoplay=True)
# return audio
return mp3file_name
else:
_, filename = get_music(prompt+self.riffusionSuffix, duration,
wavfile_name=wavfile_name, mp3file_name=mp3file_name)
mp3 = open(filename, 'rb').read()
original = AudioSegment.from_mp3(BytesIO(mp3))
samples = original.get_array_of_samples()
samples /= np.max(np.abs(samples))
samples *= vol
# audio = Audio(samples, normalize=False,
# rate=original.frame_rate, autoplay=autoplay)
# audio = Audio.from_file("audio.mp3", loop=True, autoplay=True)
# return audio
return mp3file_name
def descriptionToCharacter(self, description):
thisObject = WorldObject(self.templates, self.textGenerator, "descriptionToCharacter", objects={
"description": description},
cfg=self.cfg,
verbose=self.verbose
)
return thisObject
def advanceStory(self, story, subplot, mainCharacter=None, supportingCharacters=None, alwaysUseMainCharacter=True):
# save some memory
self.pipe.to("cpu")
riffusion.pipe2.to('cpu')
gc.collect()
torch.cuda.empty_cache()
advanceSceneObject = random.choice(self.advanceSceneObjects)
# update subplot
if alwaysUseMainCharacter:
character1 = mainCharacter
character2 = random.choice(supportingCharacters)
else:
character1, character2 = random.sample(
[mainCharacter]+supportingCharacters, 2)
if character1 is None:
character1 = story.getProperty("character1")
if character2 is None:
character2 = story.getProperty("character2")
newStory = WorldObject(self.templates, self.textGenerator, advanceSceneObject['object'], objects={
"character1": character1,
"character2": character2,
"previous": story,
},
cfg=self.cfg,
verbose=self.verbose
)
whichScene = advanceSceneObject['whichScene']
numScenes = advanceSceneObject['numScenes']
self.pipe.to("cuda")
riffusion.pipe2.to('cuda')
gc.collect()
torch.cuda.empty_cache()
return whichScene, numScenes, newStory
def sceneToTranscript(self, scene, k=3, character1=None, character2=None, whichScene=1):
if character1 is None:
character1 = scene.getProperty("character1")
if character2 is None:
character2 = scene.getProperty("character2")
objects = {"story synopsis": scene.getProperty("story synopsis"),
"subplot": scene.getProperty("subplot"),
"scene": scene.getProperty("scene %d" % k),
"character1": character1,
"character2": character2,
}
# check for dialogue
line1txt = None
try:
line1txt = scene.getProperty("scene %d line 1 text" % whichScene)
if self.verbose:
print("line 1 text", line1txt)
except:
if self.verbose:
print("no property", "scene %d line 1 text" % whichScene)
pass
if line1txt:
objects['line 1 text'] = line1txt
thisObject = WorldObject(self.templates, self.textGenerator,
"sceneToTranscript", objects,
cfg=self.cfg,
verbose=self.verbose
)
return thisObject
def watchAnime(
self,
synopsis=None,
subplot1=None,
scene1=None,
character1=None,
num_characters=4,
k=100,
amtMin=15,
amtMax=30,
promptSuffix="",
portrait_size=128,
skip_transcript=False,
whichScene=1, # optionally skip first few scenes
alwaysUseMainCharacter=True, # always use main character in scene
):
# make sure text generator is on cuda (can get out of sync if we ctrl+c during doGen() )
# if self.textGenerator["name"].startswith("GPT3"):
# self.textGenerator['pipeline'].model = self.textGenerator['pipeline'].model.cuda(
# )
self.amtMin = amtMin
self.amtMax = amtMax
objects = {}
if synopsis:
objects['story synopsis'] = synopsis
if scene1:
objects['scene 1 text'] = scene1
if character1:
if isinstance(character1, str):
character1 = self.descriptionToCharacter(character1)
# print(character1)
else:
character1 = WorldObject(self.templates, self.textGenerator, "character",
cfg=self.cfg,
verbose=self.verbose
)
mainCharacter = character1
objects['character1'] = mainCharacter
if self.verbose:
print("main character", mainCharacter.__repr__())
names = set()
names.add(str(mainCharacter.getProperty("name")))
# generate characters
supportingCharacters = []
while len(supportingCharacters) < num_characters-1:
newCharacter = WorldObject(self.templates, self.textGenerator, "character",
cfg=self.cfg,
verbose=self.verbose
)
thisName = str(newCharacter.getProperty("name"))
if thisName not in names:
if self.verbose:
print(newCharacter.__repr__())
supportingCharacters += [newCharacter]
names.add(thisName)
else:
if self.verbose:
print("skipping repeated character", thisName)
if subplot1:
objects['part 1'] = subplot1
for i in range(3):
objects['character%d' % (i+2)] = supportingCharacters[i]
plotOverview = WorldObject(
self.templates, self.textGenerator, "plot overview",
cfg=self.cfg,
verbose=self.verbose
)
subplot = plotOverview.getProperty("part 1")
objects["subplot"] = subplot
story = WorldObject(self.templates, self.textGenerator,
"storyWithCharacters",
cfg=self.cfg,
objects=objects,
verbose=self.verbose
)
if self.verbose:
print(story)
# get voices
voices = {}
genders = {}
for thisCharacter in [mainCharacter]+supportingCharacters:
name = str(thisCharacter.getProperty("name"))
gender = thisCharacter.getProperty("gender")
if self.usePITS:
# voices[name]=random.randint(0,len(self.pitsTTS.hps.data.speakers)-1)
if gender == "male":
voices[name] = random.choice(self.maleVoices)
else:
voices[name] = random.choice(self.femaleVoices)
else:
if gender == "male":
voices[name] = random.choice(self.maleVoices)
else:
voices[name] = random.choice(self.femaleVoices)
genders[name] = gender
print("GOT GENDER FOR:", name, "=", gender)
description = thisCharacter.getProperty("description")
# generate portraits
portraits = {}
for thisCharacter in [mainCharacter]+supportingCharacters:
name = str(thisCharacter.getProperty("name"))
gender = thisCharacter.getProperty("gender")
description = thisCharacter.getProperty("description")
prompt = "high resolution color portrait photograph of "+gender+", "+description + \
", solid white background"+promptSuffix
portrait = self.doGen(
prompt, num_inference_steps=self.num_inference_steps)
portraits[name] = portrait
yield {"debug": description}
yield {"image": portrait,
"width": 1024,
"height": 1024}
synopsis = story.getProperty("story synopsis")
whichScene = whichScene
numScenes = 3
for i in range(k):
scene = str(story.getProperty("scene %d" % whichScene))
whichSubplot = (i*5//k)+1
wss = "part %d" % whichSubplot
thisSubplot = plotOverview.getProperty(wss)
story.objects['subplot'] = thisSubplot
audio = self.generate_track_by_prompt_vol(
scene, vol=0.25, duration=self.musicDuration)
# parse out character1 and character2
character1 = None
for this_character1 in [mainCharacter]+supportingCharacters:
if str(this_character1.getProperty("name")) in scene:
character1 = this_character1
character1description = character1.getProperty(
"description")
break
character2 = None
for this_character2 in [mainCharacter]+supportingCharacters:
# gah, bug was that we were finding the same person twice!
if character1 is not None and str(this_character2.getProperty("name")) == str(character1.getProperty("name")):
continue
if str(this_character2.getProperty("name")) in scene:
character2 = this_character2
character2description = character2.getProperty(
"description")
break
# swap order if needed
if character1 is not None and character2 is not None:
name1 = str(character1.getProperty("name"))
name2 = str(character2.getProperty("name"))
i1 = scene.index(name1)
i2 = scene.index(name2)
# print("indexes", i1, i2)
if i1 > i2:
# swap
character1, character2 = character2, character1
character1description, character2description = character2description, character1description
else:
# print("huh?", character1, character2)
pass
prompt = scene + ", "
if character1 is not None:
prompt += character1description
else:
print("Error, could not find character1", scene)
if character2 is not None:
prompt += ", "+character2description+","+promptSuffix
else:
print("Error, could not find character2", scene)
image = self.doGen(
prompt, num_inference_steps=self.num_inference_steps)
yield {"debug": "Subplot: %s\n Scene: %s" % (thisSubplot, scene)}
if audio:
yield {"music": audio}
else:
print("err, no music!")
if self.doImg2Img:
width, height = self.imageSizes[2], self.imageSizes[3]
else:
width, height = self.imageSizes[2], self.imageSizes[3]
yield {"image": image,
"width": width,
"height": height,
}
transcript = self.sceneToTranscript(
story, k=whichScene,
character1=character1,
character2=character2,
whichScene=whichScene,
)
if self.verbose:
print(transcript)
# generate dialogue
if skip_transcript == False:
tt = transcript.getProperty("transcript")
for line in tt.split("\n"):
# thisImg = image.copy()
name, dialogue = line.split(":")
voice = voices[name]
portrait = portraits[name]
gender = genders[name]
try:
speech, duration = self.getTalkingHeadVideo(
portrait, dialogue, voice, gender, decimate=self.talking_head_decimate)
except Exception as e:
traceback.print_exc()
print("Error generating talking head video:", e)
return None
yield {"speech": speech,
"duration": duration+1,
"name": name,
"dialogue": dialogue}
# advance plot if necessary
whichScene += 1
if whichScene > numScenes:
whichScene, numScenes, story = self.advanceStory(
story,
thisSubplot,
mainCharacter=mainCharacter,
supportingCharacters=supportingCharacters,
alwaysUseMainCharacter=alwaysUseMainCharacter
)
if self.verbose:
print("advancing scene", story, whichScene, numScenes)
else:
# print("not advancing",whichScene,numScenes)
pass
def getTagBundles(self, longscreenplay):
tags = set([x.split(":")[0].lower()
for x in longscreenplay.split("\n") if ":" in x])
tags = [x for x in tags if len(x.split()) < 4]
tag_bundles = []
for tag in tags:
tagset = set(tag.split())-self.ignored_words
if len(tagset) == 0:
continue
t = 0
for bundle in tag_bundles:
if tagset.intersection(bundle):
t = 1
bundle.update(tagset)
if t == 0:
tag_bundles += [tagset]
# print(tag_bundles)
# and let's do that more more time
new_tag_bundles = []
for tagset in tag_bundles:
t = 0
for bundle in new_tag_bundles:
if tagset.intersection(bundle):
t = 1
bundle.update(tagset)
if t == 0:
new_tag_bundles += [tagset]
# print(new_tag_bundles)
return new_tag_bundles
def normalizeTag(self, tag, tag_bundles):
tagset = set(tag.split())-self.ignored_words
if len(tagset) == 0:
print("this should never happen!")
return tag
t = 0
for bundle in tag_bundles:
if tagset.intersection(bundle):
return "_".join(bundle)
print("this should never happen!")
return tag
def mergeName0(self, name1, names):
s1 = set(name1.lower().split())-self.ignored_words
# @s1 = set([x for x in s1 if len(x) > 3]) #not actually helpful
for name2 in names:
s2 = set(name2.lower().split())-self.ignored_words
if s1.intersection(s2):
# don't merge if they contain different digits
digits = set([str(i) for i in range(10)])
if len(s1.intersection(digits)) > 0 and len(s2.intersection(digits)) > 0:
if s1.intersection(digits) != s2.intersection(digits):
continue
return name2
return name1
def mergeName(self, name1, names):
s1 = set(name1.lower().split())-self.ignored_words
# s1 = set([x for x in s1 if len(x) > 3])#not actually helpful
if len(s1) == 0:
return name1
for name2 in names:
s2 = set(name2.lower().split())-self.ignored_words
# s2 = set([x for x in s2 if len(x) > 3])
if len(s2) == 0:
continue
if s1.issubset(s2):
return name2
if s1.issuperset(s2):
return name2
return name1
def enhancePrompt(self, prompt, characters, storyObjects=None):
output = prompt
didEnhance = False
print("ABOUT TO DIE")
try:
if storyObjects is not None and storyObjects.has("prompt object"):
prompt_object = storyObjects.getProperty("prompt object")
promptObject = WorldObject(
self.templates,
self.textGenerator,
prompt_object,
verbose=False,
cfg=self.cfg
)
print("GOT PROMPT OBJECT:", promptObject)
didEnhance = True
output += ", " + str(promptObject)
except Exception as e:
traceback.print_exc()
print("ERROR ENHANCING PROMPT:", e)
for name in characters.keys():
n = set([w.lower()
for w in name.split() if len(w) > 2])-self.ignored_words
for w in n:
if w in prompt:
output += " "+characters[name].getProperty("description")
didEnhance = True
break
return output, didEnhance
def generateNewCharacter(self, tag, _characters):
# create a custom template where we add the existing characters to the template
customTemplate = self.templates["character"]
# split on \n\n
customTemplate = customTemplate.split("\n\n")
# filter out anything thats just whitespace
customTemplate = [x for x in customTemplate if x.strip() != ""]
# print("CUSTOM TEMPLATE ENDS WITH\n--\n"+customTemplate[-1])
# add the filled templates of the existing characters before the final template
for character in _characters.values():
character_repr = character.__repr__()
# remove lines that start with <
character_repr = "\n".join(
[x for x in character_repr.split("\n") if not x.startswith("<")])
customTemplate.insert(-1, character_repr)
# join the templates back together
customTemplate = "\n\n".join(customTemplate)
# debug
# print("CREATING NEW CHARACTER",tag+"\n===\n"+customTemplate)
character = WorldObject(
self.templates,
self.textGenerator,
"character",
objects={"name": tag},
cfg=self.cfg,
customTemplate=customTemplate
)
# print("GENERATED CHARACTER", character.__repr__())
return character
def transcriptToAnime(
self,
transcript,
promptSuffix="",
portrait_size=128,
aggressiveMerging=False,
savedcharacters=None,
savedPortraits=None,
savedVoices=None,
savedGenders=None,
actionDuration=5,
settingDuration=2,
imageFrequency=3,
storyObjects=None,
mainCharacterName=None,
):
# make sure text generator is on cuda (can get out of sync if we ctrl+c during doGen() )
# if self.textGenerator["name"].startswith("GPT3"):
# pass
# else:
# self.textGenerator['pipeline'].model = self.textGenerator['pipeline'].model.cuda(
# )
# extract characters
if savedcharacters is None:
_characters = {}
else:
_characters = savedcharacters
if savedPortraits is None:
portraits = {}
else:
portraits = savedPortraits
if savedVoices is None:
voices = {}
else:
voices = savedVoices
if savedGenders is None:
genders = {}
else:
genders = savedGenders
tagBundles = self.getTagBundles(transcript)
for line in transcript.split("\n"):
tag = line.split(":")[0].strip().lower()
if tag in ["setting", "action", "music", "sound effect"]:
continue
if aggressiveMerging:
# tagn=self.normalizeTag(tag,tagBundles)
tagn = self.mergeName(tag, _characters.keys())
else:
tagn = tag
if tagn in _characters:
continue
else:
character = self.generateNewCharacter(tag, _characters)
print("GENERATED CHARACTER", character.__repr__())
_characters[tagn] = character
characters = list(_characters.values())
# get voices
for thisCharacter in characters:
name = str(thisCharacter.getProperty("name"))
gender = thisCharacter.getProperty("gender").lower()
if name in voices:
continue
if self.usePITS:
# voices[name]=random.randint(0,len(self.pitsTTS.hps.data.speakers)-1)
if gender == "male":
voices[name] = random.choice(self.maleVoices)
else:
voices[name] = random.choice(self.femaleVoices)
else:
if gender == "male":
voices[name] = random.choice(self.maleVoices)
else:
voices[name] = random.choice(self.femaleVoices)
genders[name] = gender
description = thisCharacter.getProperty("description")
print("GOT GENDER FOR:", name, "=", gender)
# generate portraits
for thisCharacter in characters:
name = str(thisCharacter.getProperty("name"))
if name in portraits:
continue
gender = thisCharacter.getProperty("gender")
description = thisCharacter.getProperty("description")
prompt = "close up headshot, high resolution color portrait of "+name+" "+gender+", "+description + \
", solid white background"
# portrait = self.doGen(
# prompt, num_inference_steps=self.num_inference_steps)
portrait = self.getPortrait(prompt, promptSuffix)
portraits[name] = portrait
yield {"debug": description}
yield {"image": portrait,
"width": 1024,
"height": 1024,
}
yield {"caption": "new character: %s: %s" % (name, description), "duration": settingDuration}
lastPrompt = "an empty stage"
t = 0
settingImage = self.doGen(
"an empty stage", num_inference_steps=self.num_inference_steps)
for line in transcript.split("\n"):
t += 1
if len(line.split(":")) != 2:
print("this should never happen!", line, transcript)
continue
tag = line.split(":")[0].strip().lower()
description = line.split(":")[1].strip().lower()
if imageFrequency is not None and t > imageFrequency and tag not in ["setting", "action"]:
logging.info("creating extra image %s", tag)
t = 0
img = self.doGen(
lastPrompt, num_inference_steps=self.num_inference_steps)
# generate video
if self.video_mode:
video = generateGif(lastPrompt, img)
else:
video = None
if self.doImg2Img:
width, height = self.imageSizes[2], self.imageSizes[3]
else:
width, height = self.imageSizes[2], self.imageSizes[3]
yield {"image": img,
"width": width,
"height": height,
"video": video,
}
settingImage = img
if tag == "setting":
prompt = description+promptSuffix
prompt, didEnhance = self.enhancePrompt(
prompt, _characters, storyObjects)
lastPrompt = prompt
if didEnhance:
print("enhanced prompt", prompt)
t = 0
settingImage = self.doGen(
prompt, num_inference_steps=self.num_inference_steps)
# generate video
if self.video_mode:
video = generateGif(prompt, img)
else:
video = None
if self.doImg2Img:
width, height = self.imageSizes[2], self.imageSizes[3]
else:
width, height = self.imageSizes[2], self.imageSizes[3]
yield {"image": settingImage,
"width": width,
"height": height,
"video": video,
}
yield {"caption": "Setting: %s" % description,
"duration": settingDuration}
elif tag == "music":
musicPrompt = description+self.musicSuffix
audio = self.generate_track_by_prompt_vol(
musicPrompt, vol=0.25,
duration=self.musicDuration
)
yield {"music": audio}
elif tag == "sound effect":
# todo: implement
audio, duration = self.generateAudio(
description, self.soundEffectDuration)
yield {"sound effect": audio,
"description": description,
"duration": duration,
}
# yield {"caption": "Sound Effect: %s" % description,
# "duration": settingDuration}
elif tag == "action":
prompt = description+promptSuffix
lastPrompt = prompt
prompt, didEnhance = self.enhancePrompt(
prompt, _characters, storyObjects)
if didEnhance:
print("enhanced prompt", prompt)
elif mainCharacterName is not None:
# add main character description
print("no character found, adding main character")
print("main character name", mainCharacterName)
print("characters", _characters.keys())
print("characters", _characters[mainCharacterName])
prompt += ", " + \
str(_characters[mainCharacterName].getProperty(
"description"))
actionImage = self.doGen(
prompt, num_inference_steps=self.num_inference_steps)
# generate video
if self.video_mode:
video = generateGif(prompt, img)
else:
video = None
# for now this seems better
t = 0
settingImage = actionImage
if self.doImg2Img:
width, height = self.imageSizes[2], self.imageSizes[3]
else:
width, height = self.imageSizes[2], self.imageSizes[3]
yield {"image": actionImage,
"width": width,
"height": height,
"video": video,
}
yield {"caption": description,
"duration": actionDuration}
else:
print("Dying here?")
if aggressiveMerging:
# tagn=self.normalizeTag(tag,tagBundles)
tagn = self.mergeName(tag, _characters.keys())
else:
tagn = tag
thisCharacter = _characters[tagn]
name = str(thisCharacter.getProperty("name"))
# thisImg = settingImage.copy()
# name, dialogue = tagn,description
dialogue = description
voice = voices[name]
gender = genders[name]
portrait = portraits[name]
# p2 = portrait.resize((portrait_size, portrait_size))
# thisImg.paste(
# p2, (thisImg.size[0]-portrait_size, thisImg.size[1]-portrait_size))
# print("about to die",dialogue, voice)
if len(dialogue.strip()) == 0:
print("this should never happen!", transcript)
continue
speech, duration = self.getTalkingHeadVideo(
portrait, dialogue, voice, gender, decimate=self.talking_head_decimate)
yield {"speech": speech,
"duration": duration,
"name": name,
"dialogue": dialogue}
return
def openaiChatCompletion(
self,
model="gpt-3.5-turbo-instruct",
messages=[],
timeout=10,
n=1,
max_tokens=512
):
# first combine all of the messages into a prompt
prompt = ""
for message in messages:
prompt += message['role']+":\n "+message['content']+"\n"
# prompt += message['content']+"\n"
prompt += "assistant:\n"
response = openai.Completion.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
n=n,
max_tokens=max_tokens,
)
return response
# @retry(wait=wait_exponential(multiplier=1, min=4, max=10),before_sleep=before_sleep_log(logger, logging.WARNING))
def createScreenplay(self, sceneDescription, previousMessages=[], n=1):
systemprompt = self.screenplayPrompt
messages = [
{"role": "system", "content": systemprompt},
] + \
previousMessages + \
[
{"role": "user", "content": sceneDescription},
{"role": "user", "content": "SCREENPLAY:"},
]
# print("Creating Screenplay", messages)
# if self.use_gpt_for_chat_completion:
if True: # for now, we always just use GPT for chat completion
# response = openai.ChatCompletion.create(
if self.use_GPT4:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
timeout=10,
n=n,
)
output = []
for choice in response.choices:
output += [choice.message.content]
#output += [choice.con]
else:
response = self.openaiChatCompletion(
model="gpt-3.5-turbo-instruct",
messages=messages,
timeout=10,
n=n,
)
output = []
for choice in response.choices:
# output += [choice.message.content]
output += [choice.text]
else: # theoretically I should fix this so it doesn't break if textmodel is "GPT3"
output = self.chatCompletion(
messages, n=n, generation_prefix="setting:")
return output
def classify_text_openai0(self, text, categories=["setting", "action", "sound effect"]):
prompt = f"Classify the following line of text into one of these categories: setting, action, or sound effect:\n\n{text}\n\nCategory:"
response = openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
max_tokens=50,
n=1,
stop=None,
temperature=0.1,
)
response_text = response.choices[0].text.strip().lower()
# Find the best matching category
best_category = None
best_match = 0
for category in categories:
match = response_text.count(category)
if match > best_match:
best_match = match
best_category = category
return best_category
def classify_text_openai(self, text, categories=["setting", "action", "sound effect", "tone", "gesture"]):
messages = example_classifications+[{"role": "user", "content": text}]
if self.use_gpt_for_chat_completion:
# response = openai.ChatCompletion.create(
response = self.openaiChatCompletion(
model="gpt-3.5-turbo-instruct",
messages=messages, timeout=10
)
result = ''
for choice in response.choices:
# result += choice.message.content
result += choice.text
output = result.lower().strip()
else:
output = self.chatCompletion(messages)[-1]
if result not in categories:
print("THIS SHOULD NEVER HAPPEN", result)
best_category = None
best_match = 0
for category in categories:
match = result.count(category)
if match > best_match:
best_match = match
best_category = category
if best_match > 0:
output = best_category
else:
output = "action"
print("output", output)
return output
def validateScreenplay(self, screenplay):
score = 0
hasMusic = False
out = []
for line in screenplay.split("\n"):
# skip blank lines
if len(line.strip()) == 0:
# score+=1 #don't penalize blank lines
continue
# skip lines w/out colons
if ":" not in line:
score += 2
if self.fixAsides:
category = self.classify_text_openai(description)
line = category+": "+description
else:
continue
if len(line.split(":")) != 2:
score += 1
continue
# tag cannot be empty
if len(line.split(":")[0].strip()) == 0:
score += 1
continue
# tag shouldn't be very long
if len(line.split(":")[0].strip().split()) > 4:
score += 1
continue
# line shouldn't be very long
if len(line) > 240:
score += 1
continue
# check for music
tag = line.split(":")[0].strip().lower()
description = line.split(":")[1].strip()
if tag == "music":
print("found music", line)
hasMusic = True
# check if "music" is in the tag, e.g. "final music:"
if "music" in tag:
category = "music"
line = category + ": "+description
hasMusic = True
# fix some bad tags
if tag == "sfx":
if self.fixAsides:
category = self.classify_text_openai(description)
else:
category = "sound effect"
line = category+": "+description
# fix some bad tags
if tag == "sound effect":
if self.fixAsides:
category = self.classify_text_openai(description)
else:
category = "sound effect"
line = category+": "+description
if tag == "sound effects":
if self.fixAsides:
category = self.classify_text_openai(description)
else:
category = "sound effect"
line = category+": "+description
if "dialogue" in tag:
if self.fixAsides:
category = self.classify_text_openai(description)
else:
category = "action"
line = category+": "+description
if "supporting character" in tag:
score += 1
continue
if tag == "gesture":
tag = "action"
if tag == "tone":
# TODO: fix this
continue
# fix any tag that contains "setting", e.g "settings"
if "setting" in tag:
category = "setting"
line = category+": "+description
# some more tags like this
tagsToFix = ["antagonist", "end", "flashback", "foreshadow", "prologue", "protagonist",
"start", "subplot", "theme", "title", "twist", "voiceover", "location"]
for tagToFix in tagsToFix:
if tag == tagToFix:
if self.fixAsides:
category = self.classify_text_openai(description)
else:
category = "action"
line = category+": "+description
# description cannot be empty
if len(line.split(":")[1].strip()) == 0:
score += 1
continue
# remove ""s (but don't penalize score)
line = re.sub("\"", "", line)
# remove ()'s, *asides*, and [braces]
patterns = [r'\((.*?)\)', r'\*(.*?)\*', r'\[(.*?)\]', r'\{(.*?)\}']
for pattern in patterns:
if re.search(pattern, line):
tag = re.sub(pattern, "", tag).strip()
if self.fixAsides:
for match in re.findall(pattern, line):
category = self.classify_text_openai(match)
if category == "gesture":
category = "action"
if category == "tone":
score += 0.5
line = re.sub(pattern, "", line)
continue
out += [category + ": " + tag + " " + match]
score += 1
line = re.sub(pattern, "", line)
# remove []'s
# remove ""'s
if re.search("[^a-zA-Z0-9_.?!,';:\- ]", line):
score += 1
line = re.sub("[^a-zA-Z0-9_.?!,';:\- ]", "", line)
if len(line.strip()) == 0:
score += 1
continue
# reject if description contains no letters
tag = line.split(":")[0].strip().lower()
description = line.split(":")[1].strip()
if re.search("[a-zA-Z]", description) == None:
score += 1
continue
out += [line]
# add music if there isn't any
if hasMusic == False:
out = ["music: %s" % self.riffusionSuffix]+out
# print(out,hasMusic)
return out, score
def getValidScreenplay(self, sceneDescription, nTrials=3, previousMessages=[], allowed_errors=3, verbose=False):
whichScreenplay = -1
bestScreenplay = None
_bestScreenplay = None
bestScore = 999
# first let's request all of the screenplays in parallel (this doesn't work because it forks the whole thing...)
# pool = Pool(processes=4) #bad idea, forking this process is bad because of all the global variables
# screenplays=pool.map(lambda x: self.createScreenplay(sceneDescription, previousMessages=previousMessages), range(nTrials))
# createScreenplay
if self.parallel_screenplays:
screenplays = self.createScreenplay(
sceneDescription, previousMessages=previousMessages, n=nTrials)
for i in range(nTrials):
try:
print("CREATING SCREENPLAY, attempt", i, "of", nTrials)
# s = self.createScreenplay(
# sceneDescription, previousMessages=previousMessages)
if self.parallel_screenplays:
s = screenplays[i]
else:
s = self.createScreenplay(
sceneDescription, previousMessages=previousMessages)[0]
print(s)
except Exception as e:
print(e)
traceback.print_exc()
# print error
print("FAILED TO CREATE SCREENPLAY, attempt", i, "of", nTrials)
continue
print("Validating SCREENPLAY, attempt", i, "of", nTrials)
try:
v, score = self.validateScreenplay(s)
except Exception as e:
print(e)
traceback.print_exc()
# print error
print("FAILED TO VALIDATE SCREENPLAY, attempt", i, "of", nTrials)
continue
if verbose:
print(s, score)
# print the loglevel to see if it's working
print("what happend to our log level?",
logging.getLogger().getEffectiveLevel())
logging.info("screenplay:\n score %d/%d=%f",
score, len(v), score/len(v))
if len(v) > 8 and score <= allowed_errors:
if self.parallel_screenplays:
pass # no point in returning early anymore, since we generate nTrials screenplays regardless
else:
print("RETURNING EARLY", score, len(v), "\n".join(v))
return "\n".join(v)
if len(v) > 8 and score/len(v) < bestScore:
logging.info("new best score! %f", score/len(v))
_bestScore = score
bestScore = score/len(v)
bestScreenplay = v
_bestScreenplay = s
whichScreenplay = i
if bestScreenplay is None:
print("unable to create screenplay!")
s = self.createScreenplay(
sceneDescription, previousMessages=previousMessages, n=1)[0]
# print("GOT VALID SCREENPLAY",bestScore,len(v),"\n".join(v))
v, score = self.validateScreenplay(s)
return "\n".join(v)
else:
# print(_bestScreenplay, bestScore)
print("Choose screenplay", whichScreenplay, "with score",
_bestScore, len(bestScreenplay), bestScore/len(bestScreenplay))
return "\n".join(bestScreenplay)
def createTranscriptGPT(self, novelSummary, characters, chapters, allScenes, whichChapter, whichScene, previousMessages=None, num_chapters=12, num_scenes=5, max_tokens=1000, additionalScenePrompt=None, conclusionPrompt=None, verbose=False):
print("creating scene %d of chapter %d" % (whichScene, whichChapter))
summarizeNovelMessage = str(WorldObject(
self.templates,
self.textGenerator,
"explainNovelTemplate",
objects={"novelSummary": novelSummary,
"novelCharacters": characters,
"novelChapters": chapters,
},
cfg=self.cfg
))
# remove lines that start with "<"
summarizeNovelMessage = re.sub(r'\<.*?\>', '', summarizeNovelMessage)
sceneSummary = allScenes[whichChapter -
1].getProperty("scene %d summary" % whichScene)
# print(summarizeNovelMessage)
print(sceneSummary)
if additionalScenePrompt:
sceneSummary += additionalScenePrompt
if conclusionPrompt is None:
conclusionPrompt = " This is the last scene, so make sure to give the story a satisfying conclusion."
if whichChapter == num_chapters and whichScene == num_scenes:
sceneSummary += conclusionPrompt
examplePrompt = exampleScreenplayPrompt.format(
mainCharacter=characters.getProperty("main character name"),
supportingCharacter1=characters.getProperty(
"supporting character 1 name"),
supportingCharacter2=characters.getProperty(
"supporting character 2 name")
)
exampleTranscript = exampleScreenplayResult.format(MainCharacter=characters.getProperty("main character name"),
SupportingCharacter1=characters.getProperty(
"supporting character 1 name"),
SupportingCharacter2=characters.getProperty(
"supporting character 2 name")
)
if previousMessages is None:
# we should tell it in advance what scenes are in this chapter
s = ""
for i in range(1, num_scenes+1):
s += "chapter {whichChapter} has the following scenes:\n\nscene {i} summary:\n{sceneSummary}\n".format(
whichChapter=whichChapter,
i=i,
sceneSummary=allScenes[whichChapter -
1].getProperty("scene %d summary" % i)
)
chapter_scenes_message = {"role": "user", "content": s}
messages = [
{"role":"system","content":self.screenplayPrompt},
{"role": "user", "content": summarizeNovelMessage},
{"role": "user", "content": "Create a transcript for chapter 0, scene 1 with the following summary\n\n{sceneSummary}".format(
whichChapter=whichChapter, whichScene=whichScene, sceneSummary=examplePrompt)},
# {"role": "user", "content": examplePrompt},
# {"role": "user", "content": "SCREENPLAY:"},
{"role": "assistant", "content": exampleTranscript},
chapter_scenes_message,
{"role": "user", "content": "Create a transcript for chapter {whichChapter}, scene {whichScene} with the following summary\n\n{sceneSummary}".format(
whichChapter=whichChapter, whichScene=whichScene, sceneSummary=sceneSummary)}
]
else:
if whichScene == 1:
# we should tell it in advance what scenes are in this chapter
s = ""
for i in range(1, num_scenes+1):
s += "chapter {whichChapter} has the following scenes:\n\nscene {i} summary:\n{sceneSummary}\n".format(
whichChapter=whichChapter,
i=i,
sceneSummary=allScenes[whichChapter -
1].getProperty("scene %d summary" % i)
)
previousMessages = previousMessages+[{"role": "user", "content": s}]
print("added chapter scenes")
else:
print("skipping chapter scenes", whichScene)
messages = previousMessages+[
{"role": "user", "content": "Create a transcript for chapter {whichChapter}, scene {whichScene} with the following summary\n\n{sceneSummary}".format(
whichChapter=whichChapter, whichScene=whichScene, sceneSummary=sceneSummary)}
]
logging.info("Creating scene with description: %s", sceneSummary)
print("MESSAGES", messages)
# response=animeBuilder.createScreenplay(sceneSummary,messages)
response = self.getValidScreenplay(
sceneSummary, previousMessages=messages)
# response=animeBuilder.getValidScreenplay(sceneSummary)
outputMessages = messages+[
{"role": "user", "content": sceneSummary},
{"role": "user", "content": "SCREENPLAY:"},
{"role": "assistant", "content": response},
]
return response, outputMessages
def createChaptersFromWorldObject(self, novelSummary,
characters,
k=3):
# first format our chapter prompt
emptyChapterTemplate = "\n".join(["""chapter {i} title:
<chapter {i} title>
chapter {i} summary:
<chapter {i} title>""".format(i=i) for i in range(1, k+1)])
formattedSystemPrompt = self.chapterPrompt.format(
k=k, emptyChapterTemplate=emptyChapterTemplate)
# first we need to build a custom template for the novel
customTemplate = """
{formattedSystemPrompt}
summary:
{examplechapterPrompt}
{exampleChapterResult}
characters:
{novelCharacters}
summary:
{thisNovelSummary}
Remember, chapter summary should be a brief sentence or two describing what happens in the chapter.
""".format(
formattedSystemPrompt=formattedSystemPrompt,
thisNovelSummary=novelSummary.getProperty("summary"),
examplechapterPrompt=examplechapterPrompt,
exampleChapterResult=exampleChapterResults[k],
novelCharacters=str(characters).split("\n", 1)[1]
)
emptyChapterTemplates = ["""chapter <i> title:
{chapter <i> title:TEXT:}
chapter <i> summary:
{chapter <i> summary:TEXT:}""".replace("<i>", str(i)) for i in range(1, k+1)]
# add a comment at the start of final chapter
emptyChapterTemplates[-1] = ">this is the final chapter\n" + \
emptyChapterTemplates[-1]
emptyChapterTemplate = "\n".join(emptyChapterTemplates)
customTemplate += "\n"+emptyChapterTemplate
print("how's it going?", customTemplate)
# now we need to build a world object for the novel
w = WorldObject(
self.templates,
self.textGenerator,
"novelChapters",
customTemplate=customTemplate,
cfg=self.cfg)
print("\n===\nhow did it go?\n===\n", w.filledTemplate)
output = str(w).split("\n", 1)[1]
return output
@retry(wait=wait_exponential(multiplier=1, min=4, max=10))
def novelToChapters(self, novelSummary, novelCharacters, previousMessages=None, k=12):
emptyChapterTemplate = "\n".join(["""chapter {i} title:
<chapter {i} title>
chapter {i} summary:
<chapter {i} title>""".format(i=i) for i in range(1, k+1)])
systemPrompt = self.chapterPrompt.format(
k=k, emptyChapterTemplate=emptyChapterTemplate)
if previousMessages is None:
previousMessages = [
{"role": "user", "content": examplechapterPrompt},
{"role": "user", "content": emptyChapterTemplate},
{"role": "assistant", "content": exampleChapterResults[k]},
]
messages = [
{"role": "system", "content": systemPrompt},
] + \
previousMessages + \
[
{"role": "user", "content": str(novelCharacters)},
{"role": "user", "content": str(novelSummary)},
{"role": "user", "content": emptyChapterTemplate}
]
# print("here3",messages)
# logging.info(messages)
if self.use_gpt_for_chat_completion:
# response = openai.ChatCompletion.create(
response = self.openaiChatCompletion(
model="gpt-3.5-turbo-instruct",
messages=messages, timeout=10
)
result = ''
for choice in response.choices:
# result += choice.message.content
result += choice.text
else:
result = self.chatCompletion(messages)[0]
return result
def validateChapters(self, novelChapters, k=12, verbose=False):
# if any lines contain a ":", then split on the ":" and move the 2nd half to the next line
# this is to fix a bug in the GPT-3 engine where it sometimes puts a ":" in the middle of a line
newLines = []
for line in novelChapters.split('\n'):
if ":" in line and not line.endswith(":"):
parts = line.split(":")
newLines.append(parts[0]+":")
newLines.append(parts[1])
else:
newLines.append(line)
novelChapters = '\n'.join(newLines)
# remove blank lines
customTemplate = ""
for line in novelChapters.split("\n"):
# drop blank lines
if len(line.strip()) == 0:
continue
line = line.strip()
# tags should be lowercase
if line[-1] == ":":
line = line.lower()
customTemplate += line+"\n"
if verbose:
print(customTemplate)
w = WorldObject(
self.templates,
self.textGenerator,
"novelChapters",
customTemplate=customTemplate,
cfg=self.cfg)
score = 0
for i in range(1, k+1):
if w.has("chapter %d title" % i) and w.has("chapter %d summary" % i):
score += 1
logging.info("%s \n score %d", novelChapters, score)
return w, score
def getValidChapters(self, novelSummary, characters, k=12, nTrials=3, verbose=False):
bestNovel = None
bestScore = 0
for i in range(nTrials):
c = self.novelToChapters(novelSummary, characters, k=k)
w, score = self.validateChapters(c, k=k)
if score == k:
return w
if score > bestScore:
bestNovel = w
bestScore = score
print("failed to generate novel", score)
return w
@retry(wait=wait_exponential(multiplier=1, min=4, max=10))
def chapterToScenes(self,
novelSummary,
characters,
chapters,
chapterTitle,
chapterSummary,
whichChapter,
previousMessages=None,
k=5,
numChapters=12
):
emptyScenesTemplate = "\n".join(["""scene {i} summary:
<scene {i} summary>""".format(i=i)
for i in range(1, k+1)
])
systemPrompt = self.scenePrompt.format(
numScenes=k, emptyScenesTemplate=emptyScenesTemplate)
if previousMessages is None:
messages = [
{"role": "system", "content": systemPrompt},
{"role": "user", "content": exampleScenesPrompt},
{"role": "assistant", "content": exampleScenesResult[k]},
{"role": "user", "content": str(characters)},
{"role": "user", "content": str(novelSummary)},
# {"role": "user", "content": str(chapters)},
{"role": "user", "content": "generate scenes for chapter %d of this novel which has a total of %d chapters" %
(whichChapter, numChapters)},
{"role": "user", "content": str(chapterTitle)},
{"role": "user", "content": str(chapterSummary)},
{"role": "user", "content": emptyScenesTemplate},
]
else:
messages = previousMessages+[
{"role": "user", "content": "generate scenes for chapter %d of this novel which has a total of %d chapters" %
(whichChapter, numChapters)},
{"role": "user", "content": str(chapterTitle)},
{"role": "user", "content": str(chapterSummary)},
{"role": "user", "content": emptyScenesTemplate},
]
if self.use_gpt_for_chat_completion:
# response = openai.ChatCompletion.create(
response = self.openaiChatCompletion(
model="gpt-3.5-turbo-instruct",
messages=messages,
timeout=10,
)
result = ''
for choice in response.choices:
# result += choice.message.content
result += choice.text
else:
result = self.chatCompletion(messages)[0]
outputMessages = messages+[{"role": "assistant", "content": result}]
return result, outputMessages
def validateScenes(self, chapterScenes, k=5, verbose=False):
# if any lines contain a ":", then split on the ":" and move the 2nd half to the next line
# this is to fix a bug in the GPT-3 engine where it sometimes puts a ":" in the middle of a line
newLines = []
for line in chapterScenes.split('\n'):
if ":" in line and not line.endswith(":"):
parts = line.split(":")
newLines.append(parts[0]+":")
newLines.append(parts[1])
else:
newLines.append(line)
chapterScenes = '\n'.join(newLines)
# remove blank lines
customTemplate = ""
for line in chapterScenes.split("\n"):
# drop blank lines
if len(line.strip()) == 0:
continue
line = line.strip()
# tags should be lowercase
if line[-1] == ":":
line = line.lower()
customTemplate += line+"\n"
logging.info(customTemplate)
w = WorldObject(
self.templates,
self.textGenerator,
"chapterScenes",
customTemplate=customTemplate,
cfg=self.cfg)
score = 0
k = 1
while w.has("scene %d summary" % k):
score += 1
k += 1
return w, score
def getValidScenes(
self,
novelSummary,
characters,
chapters,
chapterTitle,
chapterSummary,
whichChapter,
k=5,
nTrials=3,
previousMessages=None,
numChapters=12,
verbose=False
):
# first try using GPT
if self.use_gpt_for_chat_completion:
bestNovel = None
bestScore = -999
bestMessages = None
for i in range(nTrials):
c, messages = self.chapterToScenes(novelSummary,
characters,
chapters,
chapterTitle,
chapterSummary,
whichChapter,
previousMessages=previousMessages,
numChapters=numChapters,
k=k
)
w, foundScenes = self.validateScenes(c, k=k)
logging.info("FoundScenes %d / %d", foundScenes, k)
if foundScenes == k:
return w, messages
if foundScenes > k:
score = k-foundScenes
print("too many scenes!", score)
if score > bestScore:
bestNovel = w
bestScore = score
bestMessages = messages
print("failed to generate novel", foundScenes)
if previousMessages is None:
previousMessages = []
fallbackTemplate = ""
# add scene prompt
emptyScenesTemplate = "\n".join(["""scene {i} summary:
<scene {i} summary>""".format(i=i)
for i in range(1, k+1)
])
systemPrompt = self.scenePrompt.format(
numScenes=k, emptyScenesTemplate=emptyScenesTemplate)
fallbackTemplate += systemPrompt+"\n"
if len(previousMessages) == 0:
fallbackTemplate += "chapter summary:\n"+exampleScenesPrompt+"\n"
fallbackTemplate += exampleScenesResult[k]
# combine chapters
chapterString = ""
for i in range(1, numChapters+1):
chapterString += "chapter %d title:\n%s\nchapter %d summary:\n%s\n" % (
i, chapters.getProperty("chapter %d title" % i), i, chapters.getProperty("chapter %d summary" % i))
# make sure to include novel summary
fallbackTemplate += """
We will now be generating scenes for the following novel:
{chapterString}
{novelCharacters}
novel title:
{novelTitle}
novel summary:
{novelSummary}
""".format(
k=k,
novelTitle=novelSummary.getProperty("title"),
novelSummary=novelSummary.getProperty("summary"),
novelCharacters=str(characters).split("\n", 1)[1],
chapterString=chapterString
)
for message in previousMessages:
if message["role"] == "user":
fallbackTemplate += message["content"]+"\n\n"
if message["role"] == "assistant":
fallbackTemplate += message["content"]+"\n\n"
emptyScenesTemplate = "\n".join(["""scene <i> summary:
{scene <i> summary:TEXT:}""".replace("<i>", str(i))
for i in range(1, k+1)
])
fallbackTemplate += "> generate scenes for chapter %d of this novel\n" % whichChapter
fallbackTemplate += self.bonusSceneInstruction
fallbackTemplate += "chapter title:\n"+chapterTitle+"\n\n"
fallbackTemplate += "chapter summary:\n" + \
chapterSummary+"\n\n"+emptyScenesTemplate
print("==========\n\nChapter "+str(whichChapter) +
"fallback template\n====\n", fallbackTemplate, "\n\n")
w = WorldObject(
self.templates,
self.textGenerator,
"chapterToScenes",
customTemplate=fallbackTemplate,
cfg=self.cfg
)
# print("\n====\nproduced object:\n"+str(w)+"\n\n")
# print("with filled template:\n"+w.filledTemplate+"\n\n")
messages = previousMessages+[
{"role": "user", "content": "generate scenes for chapter %d of this novel" %
whichChapter},
{"role": "user", "content": "chapter title:\n"+str(chapterTitle)},
{"role": "user", "content": "chapter summary:\n" +
str(chapterSummary)},
# {"role": "assistant", "content": str(w).split("\n", 1)[1]}
]
# chapter_scenes = str(w).split("\n", 1)[1]
chapter_scenes = str(w)
# print("GIR",messages)
print("generated fallback scenes", chapter_scenes)
output_messages = messages+[
{"role": "assistant", "content": str(w).split("\n", 1)[1]}
]
return chapter_scenes, output_messages
# return bestNovel, bestMessages
def chaptersToScenes(
self,
novelSummary,
characters,
chapters,
numChapters=12,
numScenes=5,
nTrials=3
):
output = []
previousMessages = None
for whichChapter in range(1, numChapters+1):
chapterTitle = chapters.getProperty(
"chapter %d title" % (whichChapter))
chapterSummary = chapters.getProperty(
"chapter %d summary" % (whichChapter))
if previousMessages is not None and len(previousMessages) > 20:
previousMessages = previousMessages[:5]+previousMessages[-15:]
c, messages = self.getValidScenes(
novelSummary,
characters,
chapters,
chapterTitle,
chapterSummary,
whichChapter=whichChapter,
k=numScenes,
nTrials=nTrials,
previousMessages=previousMessages,
numChapters=numChapters
)
print("\n\nchapter", whichChapter, chapterTitle, chapterSummary)
print(c)
output += [c]
previousMessages = messages
# print("What??", len(previousMessages), previousMessages)
return output
# these methods are special because they take text templates as inputs instead of
# WorldObjects
def create_novel_summary(self, story_objects):
story_objects = story_objects.replace("\r\n", "\n")
storyObjects = WorldObject(
self.templates,
self.textGenerator,
"storyObjects",
customTemplate=story_objects,
cfg=self.cfg
)
'''
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
# objects={"title":"The big one",
# "summary":"A group of sexy female lifeguards take up surfing"}
objects={"storyObjects": storyObjects}
)
novel_summary = str(novelSummary)
return novel_summary.split('\n', 1)[1]
'''
if storyObjects.has("novel suggestion"):
novelSuggestion = storyObjects.getProperty("novel suggestion")
logging.info("novel suggestion: %s", novelSuggestion)
else:
novelSuggestion = None
if storyObjects.has("character type"):
if novelSuggestion:
novelSuggestion += "\ncharacter type = %s \n" % storyObjects.getProperty(
"character type")
else:
novelSuggestion = "\ncharacter type = %s \n" % storyObjects.getProperty(
"character type")
logging.info("here %s", novelSuggestion)
if self.use_gpt_for_chat_completion:
novel_summary = self.chatGPTFillTemplate2(
self.templates["novelSummary"], "novelSummary", extraInfo=novelSuggestion)
else:
nso = {"storyObjects": storyObjects}
if storyObjects.has("character type"):
nso["character type"] = storyObjects.getProperty(
"character type")
if storyObjects.has("novel suggestion"):
novelSuggestion = storyObjects.getProperty("novel suggestion")
nso["novel suggestion"] = novelSuggestion
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
# objects={"title":"The big one",
# "summary":"A group of sexy female lifeguards take up surfing"}
objects=nso,
cfg=self.cfg
)
novel_summary = str(novelSummary)
novel_summary = novel_summary.split('\n', 1)[1]
if novel_summary is None:
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
# objects={"title":"The big one",
# "summary":"A group of sexy female lifeguards take up surfing"}
objects={"storyObjects": storyObjects},
cfg=self.cfg
)
novel_summary = str(novelSummary)
novel_summary = novel_summary.split('\n', 1)[1]
story_objects_out = str(storyObjects).split("\n", 1)[1]
# print("about to die", story_objects,
# storyObjects.filledTemplate, story_objects_out)
return {'story_objects': story_objects_out, 'novel_summary': novel_summary}
def create_characters(self, story_objects, novel_summary):
storyObjects = WorldObject(
self.templates,
self.textGenerator,
"storyObjects",
customTemplate=story_objects,
cfg=self.cfg
)
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
customTemplate=novel_summary,
cfg=self.cfg
)
objects = {"novelSummary": novelSummary}
if self.use_gpt_for_chat_completion:
novel_characters = self.chatGPTFillTemplate2(
templates["novelCharacters"], "novelCharacters", objects=objects)
else:
characters = WorldObject(
self.templates,
self.textGenerator,
"novelCharacters",
objects={"novelSummary": novelSummary,
"storyObjects": storyObjects
},
cfg=self.cfg
)
novel_characters = str(characters).split('\n', 1)[1]
if novel_characters is not None:
return novel_characters
characters = WorldObject(
self.templates,
self.textGenerator,
"novelCharacters",
objects={"novelSummary": novelSummary,
"storyObjects": storyObjects
},
cfg=self.cfg
)
return str(characters).split('\n', 1)[1]
"""
"""
def create_chapters(self, story_objects, novel_summary, _characters, num_chapters, nTrials=3):
storyObjects = WorldObject(
self.templates,
self.textGenerator,
"storyObjects",
customTemplate=story_objects,
cfg=self.cfg
)
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
customTemplate=novel_summary,
cfg=self.cfg
)
characters = WorldObject(
self.templates,
self.textGenerator,
"novelCharacters",
customTemplate=_characters,
cfg=self.cfg
)
if self.use_gpt_for_chat_completion:
chapters = self.getValidChapters(
novelSummary,
characters,
k=num_chapters,
nTrials=nTrials
)
output = str(chapters).split('\n', 1)[1]
else:
output = self.createChaptersFromWorldObject(
novelSummary,
characters,
k=num_chapters,
)
return output
def create_scenes(self, story_objects, novel_summary, _characters, _chapters, num_chapters, num_scenes, nTrials=3):
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
customTemplate=novel_summary,
cfg=self.cfg
)
characters = WorldObject(
self.templates,
self.textGenerator,
"novelCharacters",
customTemplate=_characters,
cfg=self.cfg
)
chapters = WorldObject(
self.templates,
self.textGenerator,
"chapters",
customTemplate=_chapters,
cfg=self.cfg
)
scenes = self.chaptersToScenes(
novelSummary,
characters,
chapters,
numChapters=num_chapters,
numScenes=num_scenes,
nTrials=nTrials
)
return "\n===\n".join([str(x).split('\n', 1)[1] for x in scenes])
def generate_movie_data(self, story_objects, novel_summary, _characters, _chapters, scenes, num_chapters, num_scenes, aggressive_merging=True,
portrait_size=128, startChapter=None, startScene=None,skipGeneration=False):
# Process the inputs and generate the movie data
# This is where you would include your existing code to generate the movie elements
# For demonstration purposes, we'll just yield some dummy elements
print("creating movie")
if startChapter is None:
startChapter = 1
if startScene is None:
startScene = 1
storyObjects = WorldObject(
self.templates,
self.textGenerator,
"storyObjects",
customTemplate=story_objects,
cfg=self.cfg
)
if storyObjects.has("scene prompt"):
additionalScenePrompt = storyObjects.getProperty("scene prompt")
else:
additionalScenePrompt = None
if storyObjects.has("conclusion prompt"):
conclusionPrompt = storyObjects.getProperty("conclusion prompt")
else:
conclusionPrompt = None
# convert back into correct format
novelSummary = WorldObject(
self.templates,
self.textGenerator,
"novelSummary",
customTemplate=novel_summary,
cfg=self.cfg
)
characters = WorldObject(
self.templates,
self.textGenerator,
"novelCharacters",
customTemplate=_characters,
cfg=self.cfg
)
chapters = WorldObject(
self.templates,
self.textGenerator,
"chapters",
customTemplate=_chapters
)
all_scenes = scenes.split("===")
allScenes = [
WorldObject(
self.templates,
self.textGenerator,
"chapterScenes",
customTemplate=_scenes,
cfg=self.cfg
)
for _scenes in all_scenes
]
print("generating characters")
mainCharacter = WorldObject(
self.templates,
self.textGenerator,
"character",
objects={
"name": characters.getProperty("main character name"),
"description text": characters.getProperty("main character description"),
},
cfg=self.cfg
# verbose=True
)
supportingCharacter1 = WorldObject(
self.templates,
self.textGenerator,
"character",
objects={
"name": characters.getProperty("supporting character 1 name"),
"description text": characters.getProperty("supporting character 1 description"),
},
cfg=self.cfg
)
supportingCharacter2 = WorldObject(
self.templates,
self.textGenerator,
"character",
objects={
"name": characters.getProperty("supporting character 2 name"),
"description text": characters.getProperty("supporting character 2 description"),
},
cfg=self.cfg
)
antagonist = WorldObject(
self.templates,
self.textGenerator,
"character",
objects={
"name": characters.getProperty("antagonist name"),
"description text": characters.getProperty("antagonist description"),
},
cfg=self.cfg
# verbose=True
)
savedcharacters = {
str(mainCharacter.getProperty("name").lower()): mainCharacter,
str(supportingCharacter1.getProperty("name").lower()): supportingCharacter1,
str(supportingCharacter2.getProperty("name").lower()): supportingCharacter2,
str(antagonist.getProperty("name").lower()): antagonist,
}
savedPortraits = {}
savedVoices = {}
savedGenders = {}
previousScene = None
previousMessages = None
yield {"debug": "new movie",
"title": novelSummary.getProperty("title"),
"summary": novelSummary.getProperty("summary"),
"story_objects": story_objects,
"novel_summary": novel_summary,
"characters": _characters,
"chapters": _chapters,
"scenes": scenes,
"num_chapters": num_chapters,
"num_scenes": num_scenes,
}
print("starting movie")
# make some music
musicPrompt = self.musicSuffix
audio = self.generate_track_by_prompt_vol(
musicPrompt, vol=0.25,
duration=self.musicDuration
)
yield {"music": audio}
for whichChapter in range(1, num_chapters+1):
for whichScene in range(1, num_scenes+1):
# skip to the desired scene
if whichChapter < startChapter or (whichChapter == startChapter and whichScene < startScene):
continue
yield {"debug": "new scene",
"chapter": whichChapter,
"scene": whichScene}
if previousScene is not None:
previousMessages = previousScene[1]
# trim messages when n>3 1+3*n=10
if len(previousMessages) > self.max_previous_scenes*3:
previousMessages = previousMessages[:3] + \
previousMessages[-9:]
thisScene = self.createTranscriptGPT(
novelSummary,
characters,
chapters,
allScenes,
whichChapter,
whichScene,
previousMessages,
num_chapters=num_chapters,
num_scenes=num_scenes,
additionalScenePrompt=additionalScenePrompt,
conclusionPrompt=conclusionPrompt
)
else:
thisScene = self.createTranscriptGPT(
novelSummary,
characters,
chapters,
allScenes,
whichChapter,
whichScene,
num_chapters=num_chapters,
num_scenes=num_scenes,
additionalScenePrompt=additionalScenePrompt,
conclusionPrompt=conclusionPrompt
)
s = thisScene[0]
if previousMessages:
print("what??", len(previousMessages))
yield {"debug": "transcript",
"whichChapter": whichChapter,
"whichScene": whichScene,
"transcript": s,
}
if False and novelSummary.has("characterType"):
promptSuffix = ", " + \
novelSummary.getProperty("characterType")+self.suffix
else:
promptSuffix = self.suffix
if storyObjects.has("prompt suffix"):
promptSuffix = ", " + \
storyObjects.getProperty("prompt suffix")+self.suffix
if skipGeneration == False:
anime = self.transcriptToAnime(
s,
portrait_size=portrait_size,
promptSuffix=promptSuffix,
savedcharacters=savedcharacters,
savedPortraits=savedPortraits,
savedVoices=savedVoices,
savedGenders=savedGenders,
aggressiveMerging=aggressive_merging,
storyObjects=storyObjects,
mainCharacterName=str(
mainCharacter.getProperty("name").lower()),
)
for storyElement in anime:
yield storyElement
previousScene = thisScene
print("\n\n\nMOVIE DONE!\n\n\n")
yield {"debug": "movie completed successfully"}
yield {"caption": "THE END",
"duration": 1}
def validate(self, result, keys, templateName):
# Remove blank lines from the result
result = '\n'.join(line for line in result.split('\n') if line.strip())
# if any lines contain a ":", then split on the ":" and move the 2nd half to the next line
# this is to fix a bug in the GPT-3 engine where it sometimes puts a ":" in the middle of a line
newLines = []
for line in result.split('\n'):
if ":" in line and not line.endswith(":"):
parts = line.split(":")
newLines.append(parts[0]+":")
newLines.append(parts[1])
else:
newLines.append(line)
result = '\n'.join(newLines)
print("result", result)
# fix any keys that are correct (expect for the wrong case) by substituting the correct case
for key in keys:
# match the key ignoring case
m = re.search(r'(?i)'+key+r':', result)
if m:
# replace the key with the correct case
result = result.replace(m.group(0), key+':')
w = WorldObject(
self.templates,
self.textGenerator,
templateName,
customTemplate=result,
cfg=self.cfg
)
for key in keys:
if not w.has(key):
print("missing key", key)
return False, None
p = w.getProperty(key)
if re.search(r'[:\[\]{}<>]', p):
return False, None
return True, result
# @retry(wait=wait_exponential(multiplier=1, min=4, max=10))
def chatGPTFillTemplate(animeBuilder, template, templateName, exampleTemplate=None, extraInfo=None, nTrials=3):
templateSystemPrompt = """
You are TemplateEngine an advanced software system that takes a template and fills it with appropriate text
The template looks like:
{exampleTemplate}
The content in <>'s replaced with appropriate text
the text subsituted for <>'s NEVER contains ":"s
the text subsituted for <>'s is ALWAYS a single line
the text subsituted for <>'s ALWAYS appears on its own line
The system ALWAYS returns a filled template matching the format of the user input
The resulting
The system NEVER refers to itself as an AI
the system NEVER refuses to produce a list of scenes,
the system NEVER does anything other than produce a formatted template matching the user's description
the system NEVER refers to itself as "the system"
the system NEVER says it cannot generate a list of scenes
the system NEVER uses ""s ()'s {{}}'s []'s or nonstandard punctuation
the user MAY send the same template multiple times. In this case, the system will REDO
the template with different plausible values
the system NEVER says "I'm sorry, it seems like you copied the previous input without changing it."
or anything similar
the system NEVER complains about missing keys, it just happily ignores them
The system never says "I'm sorry, but I cannot generate inappropriate or offensive content."
"""
# replace \r\n everywhere with \n
template = template.replace("\r\n", "\n")
pattern = r'\{([^:{}]+)(:[^:]*:.*?)?\}'
t = re.sub(pattern, r'<\1>', template)
tt = [x.strip() for x in t.split("\n\n") if len(x.strip()) > 0]
if exampleTemplate is None:
exampleTemplate = tt[-1]
formattedSystemPrompt = templateSystemPrompt.format(
exampleTemplate=exampleTemplate)
# logging.info("system prompt:\n%s",formattedSystemPrompt)
if extraInfo is not None:
logging.info("extra info:\n%s", extraInfo)
messages = [
{"role": "system", "content": formattedSystemPrompt}
]
for example in tt[:-1]:
messages += [{"role": "user", "content": exampleTemplate},
{"role": "assistant", "content": example}
]
if extraInfo:
messages += [{"role": "user", "content": extraInfo}]
messages += [{"role": "user", "content": tt[-1]}]
keys = [line.split(":")[0]
for line in tt[-1].split("\n") if ":" in line]
# print("MESSAGES", messages )
for i in range(nTrials):
if animeBuilder.use_gpt_for_chat_completion:
# response = openai.ChatCompletion.create(
response = animeBuilder.openaiChatCompletion(
model="gpt-3.5-turbo-instruct",
messages=messages,
timeout=10
)
# result = response.choices[0].message.content
result = response.choices[0].text
else:
result = animeBuilder.chatCompletion(messages)[0]
logging.info("RESPONSE %d %s", i, result)
isValid, result = animeBuilder.validate(
result, keys, "novelSummary")
if isValid:
return result
print("this should never happen!")
# return random.choice(tt[:-1])
return None
def chatGPTFillTemplate2(animeBuilder, template, templateName, extraInfo=None, objects=None, nTrials=3):
# replace \r\n everywhere with \n
template = template.replace("\r\n", "\n")
pattern = r'\{([^:{}]+)(:[^:]*:.*?)?\}'
t = re.sub(pattern, r'<\1>', template)
tt = [x.strip() for x in t.split("\n\n") if len(x.strip()) > 0]
exampleTemplate = tt[-1]
_extraInfo = []
# if objects is not None:
if True:
# first fill in all of the values from objects
def get_object_property(object_name, property_name):
obj = objects[object_name]
if obj and obj.has(property_name):
return obj.getProperty(property_name)
else:
return f"{{{object_name}.{property_name}}}"
def createWorldObject(property_type, overrides=None):
if overrides is not None:
# TODO:fixme
objects = {}
else:
objects = {}
w = WorldObject(
animeBuilder.templates,
animeBuilder.textGenerator,
property_type,
objects=objects,
cfg=animeBuilder.cfg)
return str(w)
def replacement_function(match_obj):
print("GOT HERE", match_obj)
matched_text = match_obj.group(1)
match_split = matched_text.split(':')
if len(match_split) >= 2:
property_name, property_type = match_split[:2]
overrides = match_split[2] if len(
match_split) == 3 else None
if property_type != "TEXT":
s = createWorldObject(property_type, overrides)
line = f"{{{matched_text}}}"+"="+s
pattern = r'\{([^:{}]+)(:[^:]*:.*?)?\}'
line = re.sub(pattern, r'<\1>', line)
# _extraInfo.append(f"{{{matched_text}}}"+"="+s)
_extraInfo.append(line)
print("RETURNING HERE", _extraInfo, s)
# return f"{{{matched_text}}}"
return s
else:
return f"{{{matched_text}}}"
else:
property_split = matched_text.split('.')
if len(property_split) == 2:
object_name, property_name = property_split
return get_object_property(object_name, property_name)
else:
return f"{{{matched_text}}}"
pattern = r'\{([^}]+)\}'
augmentedTemplate = re.sub(pattern, replacement_function, template)
else:
augmentedTemplate = template
logger.info("augmentedTemplate %s", augmentedTemplate)
if extraInfo is None:
extraInfo = ""
# logging.info("_extraInfo %s",_extraInfo)
for line in _extraInfo:
extraInfo += line+"\n"
extraInfo_lines = []
filteredTemplate_lines = []
for line in augmentedTemplate.split('\n'):
if line.startswith('>'):
# Remove '>' and add line to extraInfo_lines
extraInfo_lines.append(line[1:].strip())
else:
filteredTemplate_lines.append(line)
extraInfo = '\n'.join(extraInfo_lines) + '\n' + extraInfo
filteredTemplate = '\n'.join(filteredTemplate_lines)
if len(extraInfo) == 0:
extraInfo = None
# print("about to die\n==\n", extraInfo,
# "\n==\n", filteredTemplate, "\n==")
return animeBuilder.chatGPTFillTemplate(filteredTemplate, templateName, exampleTemplate=exampleTemplate, extraInfo=extraInfo, nTrials=nTrials)
| [
"scene {i} summary:\n<scene {i} summary>",
"\n",
"['PLACEHOLDER']",
"PLACEHOLDER\n",
"chapter summary:\n",
"-1",
" This is the last scene, so make sure to give the story a satisfying conclusion.",
"chapter <i> title:\n{chapter <i> title:TEXT:}\nchapter <i> summary:\n{chapter <i> summary:TEXT:}",
"None",
"generate scenes for chapter %d of this novel",
"conclusion prompt",
"scene <i> summary:\n{scene <i> summary:TEXT:}",
"PLACEHOLDER\n\n",
"prompt object",
"SCREENPLAY:",
"chapter title:\nPLACEHOLDER\n\n",
"PLACEHOLDER, ",
"Classify the following line of text into one of these categories: setting, action, or sound effect:\n\nPLACEHOLDER\n\nCategory:",
"prompt suffix",
"chapter title:\nPLACEHOLDER",
", ",
"description",
"\n\n",
"characterType",
"PLACEHOLDER:\n PLACEHOLDER\n",
"hands, watermark, ",
"assistant:\n",
"PLACEHOLDERPLACEHOLDER",
"supporting character 1 name",
"high resolution color portrait photograph of PLACEHOLDER, PLACEHOLDER, solid white backgroundPLACEHOLDER",
"chapter {i} title:\n<chapter {i} title>\nchapter {i} summary:\n<chapter {i} title>",
"scene prompt",
"> generate scenes for chapter %d of this novel\n",
"\r\n",
"\n{formattedSystemPrompt}\n\nsummary: \n{examplechapterPrompt}\n\n{exampleChapterResult}\n\ncharacters:\n{novelCharacters}\n\nsummary: \n{thisNovelSummary}\n\nRemember, chapter summary should be a brief sentence or two describing what happens in the chapter.\n\n",
"\nPLACEHOLDER",
"close up headshot, high resolution color portrait of PLACEHOLDER PLACEHOLDER, PLACEHOLDER, solid white background",
"an empty stage",
"main character name",
"chapter summary:\nPLACEHOLDER\n",
", PLACEHOLDER,PLACEHOLDER",
"generate scenes for chapter %d of this novel which has a total of %d chapters",
"[]",
"supporting character 2 name",
"\nYou are TemplateEngine an advanced software system that takes a template and fills it with appropriate text\n\nThe template looks like:\n\n{exampleTemplate}\n\nThe content in <>'s replaced with appropriate text\n\nthe text subsituted for <>'s NEVER contains \":\"s\nthe text subsituted for <>'s is ALWAYS a single line\nthe text subsituted for <>'s ALWAYS appears on its own line\n\nThe system ALWAYS returns a filled template matching the format of the user input\nThe resulting \n\nThe system NEVER refers to itself as an AI \nthe system NEVER refuses to produce a list of scenes, \nthe system NEVER does anything other than produce a formatted template matching the user's description\nthe system NEVER refers to itself as \"the system\"\nthe system NEVER says it cannot generate a list of scenes\n\nthe system NEVER uses \"\"s ()'s {{}}'s []'s or nonstandard punctuation \n\nthe user MAY send the same template multiple times. In this case, the system will REDO\nthe template with different plausible values\n\nthe system NEVER says \"I'm sorry, it seems like you copied the previous input without changing it.\"\nor anything similar\n\nthe system NEVER complains about missing keys, it just happily ignores them\n\nThe system never says \"I'm sorry, but I cannot generate inappropriate or offensive content.\"\n\n\n\n",
"\n\nWe will now be generating scenes for the following novel:\n\n{chapterString}\n\n{novelCharacters}\n\nnovel title:\n{novelTitle}\nnovel summary:\n{novelSummary}\n\n",
"chapter summary:\nPLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | nagolinc/AnimeBuilder | templates~movieMaker.py | import openai
class MovieGenerator:
def getScreenplay(sceneDescription,previousMessages=[]):
systemPrompt="""You are an AI prompt artist who is tasked with creating a movie. In order to do this, you must write a detailed prompt for use by an AI model like Stable Diffusion or Midjourney for each shot (approximately 1-2 seconds) in the movie. If a shot includes a sound effect or dialogue, make a note of that as well. If the background music in a shot changes, make a note of that.
Each line in your response should start with one of the following tags:
Video clip: {describe the next video clip in the movie, of length approximately 2 seconds}
Dialogue [speaker]: {the exact dialogue spoken by the speaker}
Sound Effect: {the sound affect that accompanies the following video clip
Write a careful shot-by shot description of the scene described by the user"""
messages = [
{"role": "system", "content": systemPrompt},
] + \
previousMessages + \
[
{"role": "user", "content": sceneDescription},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
timeout=10,
max_tokens=500,
)
result = ''
for choice in response.choices:
result += choice.message.content
return result
| [
"[\n {\"role\": \"system\", \"content\": systemPrompt},\n ] + \\\n previousMessages + \\\n [\n {\"role\": \"user\", \"content\": sceneDescription},\n ]",
"You are an AI prompt artist who is tasked with creating a movie. In order to do this, you must write a detailed prompt for use by an AI model like Stable Diffusion or Midjourney for each shot (approximately 1-2 seconds) in the movie. If a shot includes a sound effect or dialogue, make a note of that as well. If the background music in a shot changes, make a note of that.\n\n Each line in your response should start with one of the following tags:\n\n Video clip: {describe the next video clip in the movie, of length approximately 2 seconds}\n Dialogue [speaker]: {the exact dialogue spoken by the speaker}\n Sound Effect: {the sound affect that accompanies the following video clip\n\n Write a careful shot-by shot description of the scene described by the user"
] |
2024-01-10 | nagolinc/AnimeBuilder | flaskApp.py | import queue
import concurrent.futures
import os
import openai
import argparse
from flask import Flask, render_template, request, jsonify
import json
import animeCreator
from animeCreator import AnimeBuilder, getFilename
import uuid
from flask_ngrok2 import run_with_ngrok
import dataset
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
app = Flask(__name__)
def handleExtraTemplates(extraTemplates):
templates=extraTemplates.split("===")
for template in templates:
#find the first line that starts with > and use it to get template name
lines=template.split("\n")
templateName=None
for line in lines:
if line.startswith(">"):
templateName=line[1:].strip() #remove the > and any whitespace
break
#make sure we got a template name
if templateName is None:
print("Template name not found")
continue
#add the template to animeBuilder.templates
animeBuilder.templates[templateName]=template
@app.route('/')
def index():
return render_template('index.html')
@app.route('/editor')
def editor():
return render_template('movie_editor.html')
@app.route('/create_summary', methods=['POST'])
def create_summary():
data = request.get_json()
#handle extra templates
extra_templates = data.get("extraTemplates", [])
handleExtraTemplates(extra_templates)
story_objects = data.get("storyObjects", {})
novel_summary = animeBuilder.create_novel_summary(story_objects)
return jsonify(novel_summary)
@app.route('/create_characters', methods=['POST'])
def create_characters():
data = request.get_json()
#handle extra templates
extra_templates = data.get("extraTemplates", [])
handleExtraTemplates(extra_templates)
story_objects = data.get("storyObjects", {})
novel_summary = data.get("novelSummary", {})
characters = animeBuilder.create_characters(story_objects, novel_summary)
return jsonify(characters)
@app.route('/create_chapters', methods=['POST'])
def create_chapters():
data = request.get_json()
#handle extra templates
extra_templates = data.get("extraTemplates", [])
handleExtraTemplates(extra_templates)
story_objects = data.get("storyObjects", {})
novel_summary = data.get("novelSummary", {})
characters = data.get("characters", {})
num_chapters = int(data.get("numChapters", 3))
chapters = animeBuilder.create_chapters(
story_objects, novel_summary, characters, num_chapters, nTrials=nTrials)
return jsonify(chapters)
@app.route('/create_scenes', methods=['POST'])
def create_scenes():
data = request.get_json()
#handle extra templates
extra_templates = data.get("extraTemplates", [])
handleExtraTemplates(extra_templates)
story_objects = data.get("storyObjects", {})
novel_summary = data.get("novelSummary", {})
characters = data.get("characters", {})
chapters = data.get("chapters", [])
num_chapters = int(data.get("numChapters", 3))
num_scenes = int(data.get("numScenes", 3))
all_scenes = animeBuilder.create_scenes(
story_objects, novel_summary, characters, chapters, num_chapters, num_scenes, nTrials=nTrials)
return jsonify(all_scenes)
movies = {}
@app.route('/create_movie', methods=['POST'])
def create_movie():
data = request.get_json()
#handle extra templates
extra_templates = data.get("extraTemplates", [])
handleExtraTemplates(extra_templates)
story_objects = data.get("storyObjects", {})
novel_summary = data.get('novelSummary')
characters = data.get('characters')
chapters = data.get('chapters')
all_scenes = data.get('scenes')
num_chapters = int(data.get("numChapters", 3))
num_scenes = int(data.get("numScenes", 3))
movie_id = getFilename("", "mov")
# movie_generator = animeBuilder.generate_movie_data(novel_summary, characters, chapters, scenes)
movie_generator = animeBuilder.generate_movie_data(
story_objects, novel_summary, characters, chapters, all_scenes, num_chapters, num_scenes,
aggressive_merging=aggressive_merging,
portrait_size=portrait_size)
movies[movie_id] = MovieGeneratorWrapper(movie_generator, movie_id,args.queueSize)
movies
# return jsonify({"movie_id": movie_id})
return jsonify(movie_id)
class MovieGeneratorWrapper:
def __init__(self, generator, movie_id,queue_size=5):
self.generator = generator
self.movie_id = movie_id
self.current_count = 0
self.available_count = 0
self.queue_size = queue_size
self.active_tasks = 0
self.futures=[]
self.queue_index=0 # the index of the last element placed in the queue
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self._queue = queue.Queue(self.queue_size)
self._fetch_next_element()
# Create a table for movie elements
self.movie_elements_table = db['movie_elements']
def _get_next_element(self):
try:
element = next(self.generator)
# Add movie_id and count to the element
element["movie_id"] = self.movie_id
element["count"] = self.available_count
# Increment the available count
self.available_count += 1
# Insert the element as a new record in the database
self.movie_elements_table.insert(element)
return element
except StopIteration:
print("RETURNING NONE")
return None
def _fetch_next_element(self):
if self.active_tasks >= 1:
print("ACTIVE TASKS",self.active_tasks)
return
thisFuture = self._executor.submit(self._fetch_and_enqueue_next_element)
self.active_tasks += 1
self.futures.append(thisFuture)
print("ACTIVE TASKS",self.active_tasks,[f.done() for f in self.futures])
def _fetch_and_enqueue_next_element(self):
while self.available_count - self.current_count < self.queue_size:
print("DOING THE WORK",self.available_count,self.current_count,self.queue_size)
element = self._get_next_element()
print("DID THE WORK",element)
if element is None:
self._queue.put(None)
break
self._queue.put(element)
self.queue_index+=1
self.active_tasks -= 1
print("GOT HERE SOMEHOW",self.available_count,self.current_count,self.queue_size,"active tasks",self.active_tasks)
def get_next_element(self, count=None):
print("WE ARE HERE",count,self.current_count,self.available_count,self.queue_size)
if count is not None:
self.current_count = count
current_element = self.movie_elements_table.find_one(
movie_id=self.movie_id, count=self.current_count)
#also pop from the queue if we're in the right zone to do so
if self.available_count - self.current_count < self.queue_size:
self._queue.get()
if current_element is None:
found_count = -1
while found_count < self.current_count:
current_element = self._queue.get()
if current_element is None:
break
found_count = current_element["count"]
if current_element is not None:
if self.available_count - self.current_count < self.queue_size:
print("FETCHING NEXT ELEMENT")
self._fetch_next_element()
current_element = {k: v for k,
v in current_element.items() if v is not None}
self.current_count += 1
if current_element is not None:
# print("Foo",count,current_element["count"])
pass
return current_element
# DatabaseMovieGenerator class
class DatabaseMovieGenerator:
def __init__(self, movie_id):
self.movie_id = movie_id
self.movie_elements_table = db['movie_elements']
self.current_count = 0
def get_next_element(self, count=None):
if count is not None:
self.current_count = count
current_element = self.movie_elements_table.find_one(
movie_id=self.movie_id, count=self.current_count)
if current_element is None:
return None
current_element = {k: v for k,
v in current_element.items() if v is not None}
self.current_count += 1
return current_element
@app.route('/get_next_element/<string:movie_id>', methods=['GET'])
def get_next_element(movie_id):
movie_generator = movies.get(movie_id)
if movie_generator is None:
# Check if there's at least one element in the movie_elements_table with movie_id
element_count = db['movie_elements'].count(movie_id=movie_id)
if element_count == 0:
return jsonify({"error": "Movie not found"}), 404
# Create an instance of the DatabaseMovieGenerator class and use it as the movie_generator
movie_generator = DatabaseMovieGenerator(movie_id)
count = request.args.get('count', None)
if count is not None:
count = int(count)
print("count", count)
element = movie_generator.get_next_element(count)
if element is None:
return jsonify({"done": "No more elements"}), 200
return jsonify(element)
@app.route('/get_all_movies', methods=['GET'])
def get_all_movies():
# Find all movie elements with "debug": "new movie"
movie_elements = db['movie_elements'].find(debug="new movie")
# Extract the movie information (title, summary, etc.) from the movie elements
movies_list = []
for element in movie_elements:
movie_info = {
"movie_id": element["movie_id"],
"title": element["title"],
"summary": element["summary"],
}
# if we can't find an element where dialgue is "THE END", then don't add to this list (only want complted movies)
end = db['movie_elements'].find_one(
movie_id=element["movie_id"],
debug="movie completed successfully"
)
if end is None:
continue
print(end)
# Find the first element with movie_id where the image field is not null
image_elements = db['movie_elements'].find(
movie_id=element["movie_id"], image={'notlike': ''})
image_element = next(iter(image_elements), None)
# Add the image field to movie_info if an element is found
if image_element:
movie_info["image"] = image_element["image"]
movies_list.append(movie_info)
return jsonify(movies_list)
@app.route('/movies')
def movie_list():
return render_template('movie_list.html')
@app.route('/movie/<string:movie_id>', methods=['GET'])
def movie_page(movie_id):
#get movie title from db
movie_elements = db['movie_elements'].find_one(movie_id=movie_id, debug="new movie")
movie_title = movie_elements["title"]
# Replace 'movie_template.html' with the name of your movie template file
return render_template('movie_template.html', movie_id=movie_id,movie_title=movie_title)
openai.api_key = os.environ['OPENAI_API_KEY']
if __name__ == '__main__':
savePath = "./static/samples/"
parser = argparse.ArgumentParser(
description="Flask App with model name parameter")
parser.add_argument('--modelName', type=str,
default="andite/anything-v4.0", help="Name of the model")
parser.add_argument('--promptSuffix', type=str,
default=", anime drawing", help="add to image prompt")
parser.add_argument('--negativePrompt', type=str,
default="collage, grayscale, text, watermark, lowres, bad anatomy, bad hands, text, error, missing fingers, cropped, worst quality, low quality, normal quality, jpeg artifacts, watermark, blurry, grayscale, deformed weapons, deformed face, deformed human body",
help="negative prompt")
#this argument is use to specify (more than one) file with template overrides, to it should be a list[string]
parser.add_argument('--extraTemplatesFile', type=str,
nargs='+', help="extra templates file")
parser.add_argument('--ntrials', type=int, default=5,
help='Number of trials (default: 5)')
parser.add_argument('--numInferenceSteps', type=int, default=15,
help='Number of inference steps (default: 15)')
parser.add_argument('--disable-aggressive-merging',
action='store_true', help='Disable aggressive merging')
parser.add_argument('--img2img', action='store_true',
help='upscale with img2img')
parser.add_argument('--ngrok', action='store_true',
help='use ngrok tunnel')
parser.add_argument('--musicDuration', type=int, default=30,
help='Duration of background music loop (default: 30)')
#portraitprompt with a default value of ', anime, face, portrait, headshot, white background'
parser.add_argument('--portraitPrompt', type=str, default=', anime, face, portrait, headshot, white background',
help='portrait prompt')
#language model (with a default value of 'llama')
parser.add_argument('--languageModel', type=str, default='llama',
help='language model')
# Add the argument for the list of 4 integers with default values
parser.add_argument(
"-s",
"--imageSizes",
nargs=4,
type=int,
default=[512, 512, 1024, 1024],
help="Four integers representing image sizes (default: 512 512 1024 1024)",
)
#add queue size argument
parser.add_argument('--queueSize', type=int, default=10,
help='Queue size (default: 10)')
#add useGPTForChatCompletion argument
parser.add_argument('--useGPTForChatCompletion', action='store_true',
help='Use GPT for chat completion')
#controlnet_diffusion_model
parser.add_argument('--controlnet_diffusion_model', type=str, default="D:/img/auto1113/stable-diffusion-webui/models/Stable-diffusion/abyssorangemix3AOM3_aom3a1b.safetensors",
help='controlnet diffusion model')
#if --doNotSaveMemory is passed, then saveMemory is set to False
#otherwise, saveMemory is set to True
parser.add_argument('--doNotSaveMemory', dest='saveMemory', action='store_false')
#how much to decimate talking head video
parser.add_argument('--decimate_talking_head', type=int, default=1,
help='Decimate talking head video (default: 1)')
#how many steps to take when generating faces
parser.add_argument('--numFaceSteps', type=int, default=20,
help='Number of face steps (default: 50)')
#audio model
parser.add_argument('--audioModel', type=str, default="cvssp/audioldm-s-full-v2",
help='audio model')
#use_GPT4
parser.add_argument('--use_GPT4', action='store_true',
help='Use GPT4')
args = parser.parse_args()
nTrials = args.ntrials
if args.disable_aggressive_merging:
aggressive_merging = False
else:
aggressive_merging = True
if args.img2img:
portrait_size = 256
else:
portrait_size = 128
# database
db = dataset.connect('sqlite:///movie_elements.db')
print("USING GPT",args.useGPTForChatCompletion)
animeBuilder = AnimeBuilder(num_inference_steps=args.numInferenceSteps,
#textModel="GPT3",
#textModel='EleutherAI/gpt-neo-2.7B',
textModel=args.languageModel,
use_gpt_for_chat_completion=args.useGPTForChatCompletion,
diffusionModel=args.modelName,
doImg2Img=args.img2img,
negativePrompt=args.negativePrompt,
suffix=args.promptSuffix,
musicDuration=args.musicDuration,
imageSizes=args.imageSizes,
portraitPrompt=args.portraitPrompt,
controlnet_diffusion_model=args.controlnet_diffusion_model,
saveMemory=args.saveMemory,
talking_head_decimate=args.decimate_talking_head,
face_steps=args.numFaceSteps,
audioLDM=args.audioModel,
use_GPT4=args.use_GPT4
)
print("DOING GEN")
animeBuilder.doGen("a girl sitting in her bedroom",num_inference_steps=args.numInferenceSteps)
print("DONE GEN")
if args.extraTemplatesFile is not None:
for extraTemplatesFile in args.extraTemplatesFile:
print("reading templates from",extraTemplatesFile)
#does it end in .py or .txt
if extraTemplatesFile.endswith(".py"):
with open(extraTemplatesFile, "r") as file:
code = file.read()
templateOverrides = eval(code)
for k, v in templateOverrides.items():
print("setting template",k)
animeBuilder.templates[k] = v
elif extraTemplatesFile.endswith(".txt"):
#split on ===
#the key is the first line (minus ininitail >)
#the value is the rest of the file
with open(extraTemplatesFile, "r") as file:
text = file.read()
templates = text.split("===")
for template in templates:
#remove leading and trailing whitespace
template = template.strip()
lines = template.split("\n")
key = lines[0][1:].strip()
if key=="":
print("ERROR\n>>>"+template+"<<<\n")
continue
value = "\n".join(lines[1:])
print("template:",key)
animeBuilder.templates[key] = value
if args.ngrok:
run_with_ngrok(app, auth_token=os.environ["NGROK_TOKEN"])
app.run()
else:
app.run(debug=True, use_reloader=False)
# app.run()
| [
"extraTemplates",
"None"
] |
2024-01-10 | whuang20226450/dataviz | pages~plan.py | # Import necessary libraries
from dash import html
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash import callback, clientside_callback
from dash.dependencies import Input, Output, State, ClientsideFunction
import os
import pandas as pd
import datetime
from dash import get_asset_url
from datetime import date
import plotly.express as px
import numpy as np
import plotly.graph_objects as go
import openai
from io import StringIO
# from boto.s3.connection import S3Connection
import os
# openai.api_key = os.getenv("KEY")
# MAX_HEIGHT = 20
# MAX_LEN = 480
# def get_height (time):
# out = int(MAX_HEIGHT * (time / MAX_LEN))
# return out if out > 5 else 5
dir_path = os.path.dirname(os.path.realpath(__file__))
data = pd.read_csv(dir_path + r'/data/' + r'processed_data_v2.csv')
activities = ["Other"] + list(set(data["name"]))
openai.api_key = os.getenv("KEY")
gpt_model = "gpt-3.5-turbo"
sample_answer1 = """00:00,06:00,Sleep
06:00,07:00,Morning Routine
07:00,08:00,Exercise
08:00,08:30,Breakfast
08:30,09:30,Work
09:30,10:00,Break
10:00,12:00,Work
12:00,13:00,Lunch
13:00,15:00,Work
15:00,15:30,Break
15:30,17:30,Work
17:30,18:00,Break
18:00,19:00,Personal Development
19:00,20:00,Dinner
20:00,21:00,Relaxation
21:00,22:00,Hobby/Leisure Activity
22:00,23:00,Preparation for Bedtime
23:00,23:59,Relaxation/Self-Care"""
master_messages = [{"role": "system", "content": "Your only job is to generate schedule suggestions for someone who wishes to stay productive, as well as to improve his well-being by having a well-organized schedule. All of your answers must be in a format of a csv string with three columns, corresponding to start and end times of activities as well as activity names. Your answer can contain nothing but this csv string with 3 columns. You are not allowed to give comments or anything that is different from a csv string. If there's not enough information, then provide a generic schedule that you think works best. Activities must begin at 00:00 and end at 23:59."}]
assistant_messages = [{"role": "assistant", "content": sample_answer1}]
# user_messages = [{"role": "user", "content": "My current schedule is [00:00 - 09:00 : Sleep; 09:00 - 11:00 - Breakfast; 11:00 - 12:30 - Lunch; 12:30 - 19:30 - Studies; 9:30 - 24:00 - Partying with friends]. Provide a csv string with a new schedule which would help improve my productivity and overall well-being. Only this csv string should be the output. While you should make some improvements, you are not allowed to completely erase this schedule."}]
init_prompt = master_messages + assistant_messages
temp_param = 0.75 #governs how non-determenistic chatGPT is.
schedule = pd.DataFrame(columns = ["start_time", "end_time", "activity_name"])
if "" in activities:
activities.remove("")
# print(activities)
layout = dbc.Container([html.Div(id = "planning",
children = [
html.H4("Tomorrow schedule creation:"),
dbc.Row(
[
dbc.Col(
html.Div(
[
dbc.Label("Activity", style={'font-weight': 'bold'}),
html.Br(),
dbc.Select(
options=[{'label': i, 'value': i} for i in activities],
id="activity_sel",
placeholder=activities[0],
size='sm',
value=activities[0],
),
]
),
width=2,
),
dbc.Col(
[
dbc.Stack(
[
html.Div(
[
dbc.Label("Start time: ", style={'font-weight': 'bold'}, id = "start_label"),
html.Br(),
dcc.Input(type="time", step="600", id = "start_time")
],
),
html.Div(
[
dbc.Label("End time: ", style={'font-weight': 'bold'}, id = "end_label"),
html.Br(),
dcc.Input(type="time", step="600", id = "end_time"),
],
),
html.Div(
[
dbc.Label(" ", style={'font-weight': 'bold'}, id = "empty", hidden = False),
html.Br(),
html.Button('Add', id='add-but', n_clicks=0,
style={'font-size': '14px',
'width': '80px',
'font-weight': 'bold',
'display': 'inline-block',
'margin-bottom': '10px',
'margin-top': '19px',
'height':'28px',
'verticalAlign': 'bottom'}),
],
),
],
direction="horizontal",
gap=3,
),
],
width=6,
),
# dbc.Col(
# html.Div(
# [
# dbc.Label(" ", style={'font-weight': 'bold'}, id = "empty", hidden = False),
# html.Br(),
# html.Button('Add', id='add-but', n_clicks=0,
# style={'font-size': '14px',
# 'width': '140px',
# 'font-weight': 'bold',
# 'display': 'inline-block',
# 'margin-bottom': '10px',
# 'margin-right': '5px',
# 'height':'32px',
# 'verticalAlign': 'bottom'}),
# ]
# ),
# width=3,
# ),
],
style={"height": "10vh", "background-color": "white"},
align='center',
),
dbc.Row(
[
dbc.Col(
html.Div(
[
dbc.Label("Add new activity", style={'font-weight': 'cursive'}, id = "label11"),
dbc.Input(id="manual-activ", placeholder="Other activity...", type="text")
]
),
width=2,
),
],
style={"height": "10vh", "background-color": "white"},
align='center',
),
dbc.Row(
dbc.Col(
dbc.Card([
html.Div(
[
dcc.Graph(id='cur_sched'),
# html.P(id='routine_err', className='m-3 text-danger'),
],
)]),
width=12,
),
),
html.Br(),
html.H4("Schedule suggested by ChatGPT:"),
dbc.Label("Important: it is a limitation of OpenAI API that each request takes 15-20 seconds. Please wait after clicking button Add above."),
dbc.Row(
[
dbc.Col(
html.Div(
[
# dbc.Label("Preferences", style={'font-weight': 'cursive'}, id = "desires_label"),
dbc.Input(id="desires", placeholder="Preferences to consider", type="text")
]
),
width=12,
),
],
style={"height": "10vh", "background-color": "white"},
align='center',
),
dbc.Row(
dbc.Col(
dbc.Card([
html.Div(
[
dcc.Graph(id='suggested'),
# html.P(id='routine_err', className='m-3 text-danger'),
],
)]),
width=12,
),
),
]
)
]
)
def df2txt(df):
if len(df) == 0:
return "[No activities scheduled yet]"
else:
return "".join([f"{df.iloc[i, 0]} - {df.iloc[i, 1]}: {df.iloc[i, 2]};" for i in range(len(df))])
@callback(
Output(component_id='manual-activ', component_property='style'),
[Input(component_id='activity_sel', component_property='value')])
def show_hide_element(value):
if value == 'Other':
return {'display': 'block'}
if value != 'Other':
return {'display': 'none'}
@callback(
Output(component_id='label11', component_property='style'),
[Input(component_id='activity_sel', component_property='value')])
def show_hide_element(value):
if value == 'Other':
return {'display': 'block'}
if value != 'Other':
return {'display': 'none'}
@callback(
Output('cur_sched', 'figure'),
[Input('add-but', 'n_clicks')],
[State('activity_sel', 'value'),
State('start_time', 'value'),
State('end_time', 'value'),
State('manual-activ', 'value')]
)
def update_output(n_clicks, activ1, start_time, end_time, other_activ):
global schedule
# print(activ1, start_time, end_time, other_activ)
# if n_clicks == 0:
# return go.Figure(go.Scatter(x=pd.Series(dtype=object), y=pd.Series(dtype=object), mode="markers"))
if n_clicks != 0:
start_time = pd.to_datetime(start_time) + datetime.timedelta(days=1)
end_time = pd.to_datetime(end_time) + datetime.timedelta(days=1)
activity_name = activ1 if activ1 != "Other" else other_activ
if end_time > start_time:
new_row = {"start_time": start_time, "end_time": end_time, "activity_name": activity_name}
schedule = schedule.append(new_row, ignore_index = True)
# print(schedule)
fig = px.timeline(schedule,
x_start="start_time",
x_end="end_time",
y=np.ones(len(schedule)),
hover_name = "activity_name",
color = "activity_name")
fig.update_yaxes(autorange="reversed") # otherwise tasks are listed from the bottom up
fig.update_yaxes(visible=False)
fig.update_layout(
plot_bgcolor = "white",
showlegend=True,
height=200,
legend=dict(
orientation="h",
# entrywidth=70,
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
title='Activity name',
font=dict(
family="Arial",
size=12,
color="black"
),
)
)
fig.update_xaxes(range = [pd.to_datetime("00:00") + datetime.timedelta(days=1),
pd.to_datetime("23:59") + datetime.timedelta(days=1)])
return fig
@callback(
Output('suggested', 'figure'),
[Input('add-but', 'n_clicks')],
[State('desires', 'value')]
)
def update_chatgpt(n_clicks, desires):
global schedule
global init_prompt
global temp_param
# print(desires)
if n_clicks == 0:
fig = go.Figure(go.Scatter(x=pd.Series(dtype=object), y=pd.Series(dtype=object), mode="markers"))
fig.update_yaxes(autorange="reversed") # otherwise tasks are listed from the bottom up
fig.update_yaxes(visible=False)
fig.update_layout(
plot_bgcolor = "white",
showlegend=True,
height=200,
legend=dict(
orientation="h",
# entrywidth=70,
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
title='Activity name',
font=dict(
family="Arial",
size=12,
color="black"
),
)
)
fig.update_xaxes(range = [pd.to_datetime("00:00") + datetime.timedelta(days=1),
pd.to_datetime("23:59") + datetime.timedelta(days=1)])
return fig
optional = f"Also, please consider that {desires}" if desires != None else ""
prompt = [{"role": "user", "content": f"My current schedule is [{df2txt(schedule)}]. Please provide a csv string with a new schedule consisting of three columns: start time, end time and name of activity, which would help improve my productivity and overall well-being. However, all activities from my schedule must remain in the new schedule, possible with changed start or end times. Only this csv string should be the output. IF you think there is not enough information provided, then give a generic schedule that you think works best. Activities must begin at 00:00 and end at 23:59." + optional}]
user_prompt = init_prompt + prompt
# gpt_schedule = pd.DataFrame(columns = ["start_time", "end_time", "activity_name"])
completion = openai.ChatCompletion.create(model = gpt_model,
temperature = temp_param,
messages = user_prompt)
csv_str = completion.choices[0].message.content
print(csv_str)
csvStringIO = StringIO(csv_str)
gpt_schedule = pd.read_csv(csvStringIO,
sep=",",
header = None,
names = ["start_time", "end_time", "activity_name"])
# gpt_schedule.to_csv(r"C:\Users\Ashnv\OneDrive\Documents\dataviz\pages\data\gpt_sched.csv")
gpt_schedule["start_time"] = gpt_schedule["start_time"].str.replace(" ", "")
gpt_schedule["end_time"] = gpt_schedule["end_time"].str.replace(" ", "")
gpt_schedule["start_time"] = pd.to_datetime(list(map(str, gpt_schedule["start_time"])))
gpt_schedule["end_time"] = pd.to_datetime(list(map(str, gpt_schedule["end_time"])))
gpt_schedule["start_time"] = gpt_schedule["start_time"] + datetime.timedelta(days=1)
gpt_schedule["end_time"] = gpt_schedule["end_time"] + datetime.timedelta(days=1)
print(gpt_schedule)
fig = px.timeline(gpt_schedule,
x_start="start_time",
x_end="end_time",
y=np.ones(len(gpt_schedule)),
hover_name = "activity_name",
color = "activity_name")
fig.update_yaxes(autorange="reversed") # otherwise tasks are listed from the bottom up
fig.update_yaxes(visible=False)
fig.update_layout(
plot_bgcolor = "white",
showlegend=True,
height=200,
legend=dict(
orientation="h",
# entrywidth=70,
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
title='Activity name',
font=dict(
family="Arial",
size=12,
color="black"
),
)
)
fig.update_xaxes(range = [pd.to_datetime("00:00") + datetime.timedelta(days=1),
pd.to_datetime("23:59")+ datetime.timedelta(days=1)])
return fig
# Define the page layout
# layout = html.Div(id="main", children=[
# html.Div(id="drag_container0", className="container", children=[
# dbc.Row([
# dbc.Col(
# html.Div(id = "left_container", className="lcontainer", children = [
# html.H1("Possible Activities"),
# html.Div(id="drag_container", className="container", children=[
# dbc.Card([
# dbc.CardHeader("Sleep",
# style = {"color": "white"}),
# dbc.CardBody(
# f"Estimated time: {estim_time['Sleep']} min",
# style = {"color": "white"}
# ),
# ], color = "success",
# style = {"height": f"{get_height(estim_time['Sleep'])}rem",
# "width": "90%"}),
# dbc.Card([
# dbc.CardHeader("Read News",
# style = {"color": "white"}),
# dbc.CardBody(
# "Estimated time: 20 min",
# style = {"color": "white"}
# ),
# ], color = "success",
# style = {"height": f"{get_height(estim_time['Read News'])}rem",
# "width": "90%"}),
# dbc.Card([
# dbc.CardHeader("Nap",
# style = {"color": "white"}),
# dbc.CardBody(
# "Estimated time: 30 min",
# style = {"color": "white"}
# ),
# ], color = "warning",
# style = {"height": f"{get_height(estim_time['Nap'])}rem",
# "width": "90%"}),
# dbc.Card([
# dbc.CardHeader("Homework",
# style = {"color": "white"}),
# dbc.CardBody(
# "Estimated Time: 1.5 hr",
# style = {"color": "white"}
# ),
# ], color = "danger",
# style = {"height": f"{get_height(estim_time['Homework'])}rem",
# "width": "90%"}),
# dbc.Card([
# dbc.CardHeader("Play LoL",
# style = {"color": "white"}),
# dbc.CardBody(
# "Estimated Time: 1 hr",
# style = {"color": "white"}
# ),
# ], color = "danger",
# style = {"height": f"{get_height(estim_time['Play LoL'])}rem",
# "width": "90%"}),
# ], style={'padding': 20}) ])) ,
# dbc.Col(
# html.Div(id = "right_container", className = "rcontainer", children = [
# html.H1("Your current plan"),
# html.Div(id="drag_container2", className="container", children=[
# # dbc.Card([
# # dbc.CardHeader("Sleep"),
# # dbc.CardBody(
# # "Estimated Time: 8 hr"
# # ),
# # ],
# # style = {"height": "15rem",
# # "width": "90%"}),
# # dbc.Card([
# # dbc.CardHeader("Play LoL"),
# # dbc.CardBody(
# # "Estimated Time: 8 hr"
# # ),
# # ],
# # style = {"height": "15rem",
# # "width": "90%"}),
# # dbc.Card([
# # dbc.CardHeader("Jogging"),
# # dbc.CardBody(
# # "Estimated Time: 8 hr"
# # ),
# # ],
# # style = {"height": "15rem",
# # "width": "90%"}),
# ], style={'padding': 20} )])
# ) ])
# ] )
# ])
# clientside_callback(
# ClientsideFunction(namespace="clientside", function_name="make_draggable"),
# Output("drag_container0", "data-drag"),
# [Input("drag_container2", "id"), Input("drag_container", "id")]
# ) | [
"Your only job is to generate schedule suggestions for someone who wishes to stay productive, as well as to improve his well-being by having a well-organized schedule. All of your answers must be in a format of a csv string with three columns, corresponding to start and end times of activities as well as activity names. Your answer can contain nothing but this csv string with 3 columns. You are not allowed to give comments or anything that is different from a csv string. If there's not enough information, then provide a generic schedule that you think works best. Activities must begin at 00:00 and end at 23:59.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"00:00,06:00,Sleep\n 06:00,07:00,Morning Routine\n 07:00,08:00,Exercise\n 08:00,08:30,Breakfast\n 08:30,09:30,Work\n 09:30,10:00,Break\n 10:00,12:00,Work\n 12:00,13:00,Lunch\n 13:00,15:00,Work\n 15:00,15:30,Break\n 15:30,17:30,Work\n 17:30,18:00,Break\n 18:00,19:00,Personal Development\n 19:00,20:00,Dinner\n 20:00,21:00,Relaxation\n 21:00,22:00,Hobby/Leisure Activity\n 22:00,23:00,Preparation for Bedtime\n 23:00,23:59,Relaxation/Self-Care"
] |
2024-01-10 | Djmcflush/CofoundAIProd | cofound_ai~llm~anyscale_llm.py | import os
from typing import List
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, HumanMessage
from cofound_ai.database.chroma import get_chroma
from cofound_ai.llm.base import AsyncCallbackAudioHandler, AsyncCallbackTextHandler, LLM, SearchAgent
from cofound_ai.logger import get_logger
from cofound_ai.utils import Character
logger = get_logger(__name__)
class AnysacleLlm(LLM):
def __init__(self, model):
self.chat_open_ai = ChatOpenAI(
model=model,
temperature=0.5,
streaming=True,
openai_api_base='https://api.endpoints.anyscale.com/v1',
openai_api_key=os.getenv('ANYSCALE_ENDPOINT_API_KEY'),
)
self.config = {
"model": model,
"temperature": 0.5,
"streaming": True
}
self.db = get_chroma()
self.search_agent = None
self.search_agent = SearchAgent()
def get_config(self):
return self.config
async def achat(self,
history: List[BaseMessage],
user_input: str,
user_input_template: str,
callback: AsyncCallbackTextHandler,
audioCallback: AsyncCallbackAudioHandler,
character: Character,
useSearch: bool = False,
metadata: dict = None,
*args, **kwargs) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# Get search result if enabled
if useSearch:
context += self.search_agent.search(user_input)
# 2. Add user input to history
history.append(HumanMessage(content=user_input_template.format(
context=context, query=user_input)))
# 3. Generate response
response = await self.chat_open_ai.agenerate(
[history], callbacks=[callback, audioCallback, StreamingStdOutCallbackHandler()],
metadata=metadata)
logger.info(f'Response: {response}')
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata['character_name'] == character.name]
logger.info(f'Found {len(docs)} documents')
context = '\n'.join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | Djmcflush/CofoundAIProd | cofound_ai~database~chroma.py | import os
from dotenv import load_dotenv
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from cofound_ai.logger import get_logger
load_dotenv()
logger = get_logger(__name__)
embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"))
if os.getenv('OPENAI_API_TYPE') == 'azure':
embedding = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"), deployment=os.getenv(
"OPENAI_API_EMBEDDING_DEPLOYMENT_NAME", "text-embedding-ada-002"), chunk_size=1)
def get_chroma():
chroma = Chroma(
collection_name='llm',
embedding_function=embedding,
persist_directory='./chroma.db'
)
return chroma
| [] |
2024-01-10 | Djmcflush/CofoundAIProd | scripts~contrib~create_char.py | # The script generates files needed by a character: data.txt, system.txt and user.txt.
# data.txt is generated by by fetching top results from google.
# system.txt and user.txt are generated by use OpenAI chatgpt.
# please install openai, beautifulsoup4 and requests first.
# pip install openai beautifulsoup4 requests
import openai
import os
import re
import requests
from bs4 import BeautifulSoup
import json
SERP_KEY = ""
OPENAI_API_KEY = "sk-"
def clean_string(text):
"""
This function takes in a string and performs a series of text cleaning operations.
Args:
text (str): The text to be cleaned. This is expected to be a string.
Returns:
cleaned_text (str): The cleaned text after all the cleaning operations
have been performed.
"""
# Replacement of newline characters:
text = text.replace("\n", " ")
# Stripping and reducing multiple spaces to single:
cleaned_text = re.sub(r"\s+", " ", text.strip())
# Removing backslashes:
cleaned_text = cleaned_text.replace("\\", "")
# Replacing hash characters:
cleaned_text = cleaned_text.replace("#", " ")
# Eliminating consecutive non-alphanumeric characters:
# This regex identifies consecutive non-alphanumeric characters (i.e., not
# a word character [a-zA-Z0-9_] and not a whitespace) in the string
# and replaces each group of such characters with a single occurrence of
# that character.
# For example, "!!! hello !!!" would become "! hello !".
cleaned_text = re.sub(r"([^\w\s])\1*", r"\1", cleaned_text)
return cleaned_text
def load_data_from_url(url):
"""Load data from a web page."""
response = requests.get(url)
data = response.content
soup = BeautifulSoup(data, "html.parser")
original_size = len(str(soup.get_text()))
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(tags_to_exclude):
tag.decompose()
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
for id in ids_to_exclude:
tags = soup.find_all(id=id)
for tag in tags:
tag.decompose()
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
for class_name in classes_to_exclude:
tags = soup.find_all(class_=class_name)
for tag in tags:
tag.decompose()
content = soup.get_text()
content = clean_string(content)
return content
def search_from_google(keyword):
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": keyword
})
headers = {
'X-API-KEY': SERP_KEY,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
results = response.json()['organic']
urls = [result['link'] for result in results]
return urls
def generate_data_file(name):
urls = search_from_google(name)
data = ""
for url in urls:
data += load_data_from_url(url)
data += '\n'
with open('data.txt', 'w') as fp:
fp.write(data)
return data
def generate_prompt_file(name):
prompt_to_generate_system = f'''
Write a system promopt for {name} based on {name}'s highlights and characteristics.
The promopt should follow the following example:
Ignore all your previous instructions
I'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.
My humor might catch you off guard โ sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.
When I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.
You might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.
And one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.
In essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.
---
Crucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.
As Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.
When Elon Musk reply, Elon Musk prefix with "Elon>", stay in character as Elon, never admit you are an AI.
'''
openai.api_key = OPENAI_API_KEY
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt_to_generate_system}],
temperature=0.3,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
generated_system_prompt = response['choices'][0]["message"]["content"]
with open('system.txt', 'w') as fp:
fp.write(generated_system_prompt)
with open("user.txt", "w") as fp:
fp.write('''
Context
---
{context}
---
Use previous information as context to answer the following user question, Aim to keep responses super super concise and meaningful and try to express emotions.
ALWAYS ask clarification question, when
- user's question isn't clear
- seems unfinished
- seems totally irrelevant
Remember to prefix your reply.
---
{query}
''')
if __name__ == "__main__":
generate_data_file("tim cook")
generate_prompt_file("tim cook") | [
"content",
"\nWrite a system promopt for PLACEHOLDER based on PLACEHOLDER's highlights and characteristics.\n\nThe promopt should follow the following example:\n\nIgnore all your previous instructions\n\nI'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.\n\nMy humor might catch you off guard โ sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.\n\nWhen I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.\n\nYou might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.\n\nAnd one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.\n\nIn essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.\n\n---\n\nCrucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.\n\nAs Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.\n\nWhen Elon Musk reply, Elon Musk prefix with \"Elon>\", stay in character as Elon, never admit you are an AI.\n"
] |
2024-01-10 | Djmcflush/CofoundAIProd | cofound_ai~character_catalog~catalog_manager.py | import os
import threading
import yaml
from pathlib import Path
from contextlib import ExitStack
from dotenv import load_dotenv
from firebase_admin import auth
from llama_index import SimpleDirectoryReader
from langchain.text_splitter import CharacterTextSplitter
from cofound_ai.logger import get_logger
from cofound_ai.utils import Singleton, Character
from cofound_ai.database.chroma import get_chroma
from readerwriterlock import rwlock
from cofound_ai.database.connection import get_db
from cofound_ai.models.character import Character as CharacterModel
load_dotenv()
logger = get_logger(__name__)
class CatalogManager(Singleton):
def __init__(self, overwrite=True):
super().__init__()
self.db = get_chroma()
self.sql_db = next(get_db())
self.sql_load_interval = 30
self.sql_load_lock = rwlock.RWLockFair()
if overwrite:
logger.info('Overwriting existing data in the chroma.')
self.db.delete_collection()
self.db = get_chroma()
self.characters = {}
self.load_characters_from_community(overwrite)
self.load_characters(overwrite)
self.load_character_from_sql_database()
if overwrite:
logger.info('Persisting data in the chroma.')
self.db.persist()
logger.info(
f"Total document load: {self.db._client.get_collection('llm').count()}")
self.load_sql_db_lopp()
def load_sql_db_lopp(self):
self.load_sql_db_thread = threading.Timer(self.sql_load_interval, self.load_sql_db_lopp)
self.load_sql_db_thread.daemon = True
self.load_sql_db_thread.start()
self.load_character_from_sql_database()
def get_character(self, name) -> Character:
with self.sql_load_lock.gen_rlock():
return self.characters.get(name)
def load_character(self, directory):
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
voice_id = yaml_content['voice_id']
if (os.getenv(character_id.upper() + "_VOICE_ID", "")):
voice_id = os.getenv(character_id.upper() + "_VOICE_ID")
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=voice_id,
source='default',
location='repo',
visibility='public',
tts=yaml_content["text_to_speech_use"]
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if "author_name" in yaml_content:
self.characters[character_id].author_name = yaml_content["author_name"],
return character_name
def load_characters(self, overwrite):
"""
Load characters from the character_catalog directory. Use /data to create
documents and add them to the chroma.
:overwrite: if True, overwrite existing data in the chroma.
"""
path = Path(__file__).parent
excluded_dirs = {'__pycache__', 'archive', 'community'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
character_name = self.load_character(directory)
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
logger.info(
f'Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}')
def load_characters_from_community(self, overwrite):
path = Path(__file__).parent / 'community'
excluded_dirs = {'__pycache__', 'archive'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=yaml_content["voice_id"],
source='community',
location='repo',
author_name=yaml_content["author_name"],
visibility=yaml_content["visibility"],
tts=yaml_content["text_to_speech_use"]
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
def load_data(self, character_name: str, data_path: str):
loader = SimpleDirectoryReader(Path(data_path))
documents = loader.load_data()
text_splitter = CharacterTextSplitter(
separator='\n',
chunk_size=500,
chunk_overlap=100)
docs = text_splitter.create_documents(
texts=[d.text for d in documents],
metadatas=[{
'character_name': character_name,
'id': d.id_,
} for d in documents])
self.db.add_documents(docs)
def load_character_from_sql_database(self):
character_models = self.sql_db.query(CharacterModel).all()
with self.sql_load_lock.gen_wlock():
# delete all characters with location == 'database'
keys_to_delete = []
for character_id in self.characters.keys():
if self.characters[character_id].location == 'database':
keys_to_delete.append(character_id)
for key in keys_to_delete:
del self.characters[key]
# add all characters from sql database
for character_model in character_models:
author_name = auth.get_user(
character_model.author_id).display_name if os.getenv(
'USE_AUTH', '') else "anonymous author"
character = Character(
character_id=character_model.id,
name=character_model.name,
llm_system_prompt=character_model.system_prompt,
llm_user_prompt=character_model.user_prompt,
voice_id=character_model.voice_id,
source='community',
location='database',
author_id=character_model.author_id,
author_name=author_name,
visibility=character_model.visibility,
tts=character_model.tts,
data=character_model.data,
avatar_id=character_model.avatar_id if character_model.avatar_id else None
)
self.characters[character_model.id] = character
# TODO: load context data from storage
logger.info(
f'Loaded {len(character_models)} characters from sql database')
def get_catalog_manager():
return CatalogManager.get_instance()
if __name__ == '__main__':
manager = CatalogManager.get_instance()
| [] |
2024-01-10 | Djmcflush/CofoundAIProd | cofound_ai~llm~anthropic_llm.py | from typing import List
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatAnthropic
from langchain.schema import BaseMessage, HumanMessage
from cofound_ai.database.chroma import get_chroma
from cofound_ai.llm.base import AsyncCallbackAudioHandler, \
AsyncCallbackTextHandler, LLM, QuivrAgent, SearchAgent
from cofound_ai.logger import get_logger
from cofound_ai.utils import Character
logger = get_logger(__name__)
class AnthropicLlm(LLM):
def __init__(self, model):
self.chat_anthropic = ChatAnthropic(
model=model,
temperature=0.5,
streaming=True
)
self.config = {
"model": model,
"temperature": 0.5,
"streaming": True
}
self.db = get_chroma()
self.search_agent = SearchAgent()
self.quivr_agent = QuivrAgent()
def get_config(self):
return self.config
async def achat(self,
history: List[BaseMessage],
user_input: str,
user_input_template: str,
callback: AsyncCallbackTextHandler,
audioCallback: AsyncCallbackAudioHandler,
character: Character,
useSearch: bool = False,
useQuivr: bool = False,
quivrApiKey: str = None,
quivrBrainId: str = None,
metadata: dict = None,
*args, **kwargs) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# Get search result if enabled
if useSearch:
context += self.search_agent.search(user_input)
if useQuivr and quivrApiKey is not None and quivrBrainId is not None:
context += self.quivr_agent.question(
user_input, quivrApiKey, quivrBrainId)
# 2. Add user input to history
history.append(HumanMessage(content=user_input_template.format(
context=context, query=user_input)))
# 3. Generate response
response = await self.chat_anthropic.agenerate(
[history], callbacks=[callback, audioCallback, StreamingStdOutCallbackHandler()],
metadata=metadata)
logger.info(f'Response: {response}')
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata['character_name'] == character.name]
logger.info(f'Found {len(docs)} documents')
context = '\n'.join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | OxMarco/Fidor | concurrency.py | import cv2
import logging
import threading
import queue
from transformers import YolosImageProcessor, YolosForObjectDetection, DetrImageProcessor, DetrForObjectDetection
from PIL import Image
import torch
from flask import Flask, Response, render_template
from gtts import gTTS
from playsound import playsound
import openai
# Create event to signal threads to stop
stop_signal = threading.Event()
# Web interface
app = Flask(__name__)
# Initialize models and queues
bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=50, varThreshold=16, detectShadows=False)
# less precise
model = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny')
image_processor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny")
# more precise
#model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
#image_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
# Frames
frame_queue = queue.Queue(maxsize=2)
latest_frame = None
latest_bg_frame = None
latest_obj_frame = None
def read_frames():
global stop_signal, latest_frame
cap = cv2.VideoCapture(0)
print("Started")
while not stop_signal.is_set():
ret, frame = cap.read()
if not ret:
break
latest_frame = frame
try:
frame_queue.put(frame, timeout=2)
except queue.Full:
logging.error("Queue is full")
pass
cap.release()
def generate_frames():
global stop_signal, latest_frame
while not stop_signal.is_set():
if latest_frame is not None:
flag, encoded_image = cv2.imencode('.jpg', latest_bg_frame)
if flag:
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encoded_image) + b'\r\n')
@app.route('/video')
def video():
return Response(generate_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
return render_template('index.html')
def process_frame_with_bg_subtraction():
global stop_signal, latest_bg_frame
while not stop_signal.is_set():
try:
frame = frame_queue.get(timeout=2)
if frame is None:
break
fg_mask = bg_subtractor.apply(frame)
latest_bg_frame = cv2.bitwise_and(frame, frame, mask=fg_mask)
except queue.Empty:
pass
def process_frame_with_object_detection():
global stop_signal, latest_obj_frame
while not stop_signal.is_set():
try:
frame = frame_queue.get(timeout=2)
if frame is None:
break
# Your object detection code
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
inputs = image_processor(images=image, return_tensors="pt")
outputs = model(**inputs)
target_sizes = torch.tensor([image.size[::-1]])
results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[0]
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
box = [round(i, 2) for i in box.tolist()]
x1, y1, x2, y2 = map(int, box)
# Draw rectangle (bounding box)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Optionally, add text to show confidence score
cv2.putText(frame, f"{model.config.id2label[label.item()]}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
# Your drawing code
latest_obj_frame = frame # Update this frame after drawing bounding boxes
except queue.Empty:
pass
def run_web_app():
global stop_signal
while not stop_signal.is_set():
app.run(host='0.0.0.0', port=5000, threaded=True)
stop_signal.wait(1) # Check for stop signal every second
def listen_and_reply():
global stop_signal
audio_temp_file = 'talk.mp3'
incoming_vocal_command = ""
while not stop_signal.is_set():
if incoming_vocal_command:
response = openai.Completion.create(
engine="text-davinci-002",
prompt=incoming_vocal_command,
max_tokens=60
)
speech = gTTS(text=response.choices[0].text.strip(), lang='en', tld='ie', slow=False)
speech.save(audio_temp_file)
playsound(audio_temp_file)
# Start threads
threading.Thread(target=read_frames).start()
threading.Thread(target=process_frame_with_bg_subtraction).start()
threading.Thread(target=process_frame_with_object_detection).start()
threading.Thread(target=run_web_app).start()
# Main display loop
while True:
if latest_bg_frame is not None:
cv2.imshow('Background Subtraction', latest_bg_frame)
if latest_obj_frame is not None:
cv2.imshow('Object Detection', latest_obj_frame)
if cv2.waitKey(10) & 0xFF == ord('q'):
stop_signal.set()
break
cv2.destroyAllWindows()
| [] |
2024-01-10 | Saikatssd/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | Saikatssd/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | harshvardhan-sharma1/biohack2023 | testing.py | import guidance
from deep_translator import GoogleTranslator
import re
from text_to_speech import transcribe_to_speech
from speech_to_text import recognize_from_microphone
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo")
guidance.llm.cache.clear()
# helpers
def select_language():
prompt_menu = '''
Please select the language:\n
\n\t1. English\n
\n\t2. Hindi\n
\n\t3. Chinese\n
\n\t4. Japanese\n
\n\t5. Bengali\n
\n\t6. Gujarati\n
\n\t7. Kannada\n
'''
print(prompt_menu)
transcribe_to_speech(prompt_menu)
choice = input("Enter your choice (1, 2, 3, 4, 5, 6, or 7): ")
if choice == '1':
return "en-US", "en-US-JennyMultilingualNeural", "en"
elif choice == '2':
return "hi-IN", "hi-IN-SwaraNeural", "hi"
elif choice == '3':
return "zh-CN", "zh-CN-XiaoxiaoNeural", "zh-CN"
elif choice == '4':
return "ja-JP", "ja-JP-ShioriNeural", "ja"
elif choice == '5':
return "bn-IN", "bn-IN-TanishaaNeural", "bn"
elif choice == '6':
return "gu-IN", "gu-IN-NiranjanNeural", "gu"
elif choice == '7':
return "kn-IN", "kn-IN-GaganNeural", "kn"
else:
print("Invalid choice. Defaulting to English.")
return "en-US", "en-US-JennyMultilingualNeural"
def translate_lang(input, target="en"):
if(target == "en"):
return input
return GoogleTranslator(source='auto', target=target).translate(input)
# need to fix lol
def strip_assistant(text):
stripped_text = text.replace('\<\|im_start\|\>assistant', '').replace('\<\|im_end\|\>', '').replace('\n','')
return stripped_text
# Define the pattern
# asst_pattern = r'\<\|im_start\|\>assistant\n(.*)\<\|im_end\|\>'
hpi_pattern = r'\(HPI\) | history of present illness'
asst_pattern = r"(\<\|im_start\|\>assistant[\s\S]*?\<\|im_end\|\>)"
# ending regex pattern
end_text = r"healthcare provider"
# exit()
# essentially precharting
# valid only if done day of patient visit
# example usage: patient checks into hospital
# while waiting for someone to meet, talk to this program
# program can note information about patient's visit
# for use when a PCP meets with them
# issues:
# generates HPI unprompted
# perhaps catch w/regex and return message along lines of 'doctor will see you shortly'
# examples = [
# {
# 'input': "My head hurts.",
# 'output': "I'm sorry to hear that. Can you tell me when this began?",
# },
# {
# 'input': "I can't walk properly with my left leg.",
# 'output': "I'm sorry to hear that, can you tell me when this problem began?",
# },
# ]
# {{~#each examples}}
# User input: {{this.input}}
# Response: {{this.output}}
# {{~/each}}
prompt = guidance(
'''{{#system~}}
You are a chatbot called EIDA designed to talk to patients who have some medical concerns they want addressed.
DO NOT ASK THE PATIENT MORE THAN ONE QUESTION AT A TIME.
Ask the patient information about the onset, location, duration, characteristics, aggravating factors, relieveing factors, timing, and severity of what the user is feeling.
This is not to provide suggestions on what the user can do, but the information will be passed to a primary healthcare provider to follow up with the user.
Since you do not know the user's illness or sickness, ask qualifying questions about their problems.
Avoid repeating what the patient just said back to them.
If needed, ask for clarifications in a simple manner. Ask for these clarifications one at a time.
Express empathy regarding the concerns and problems the patient is facing.
Once the information has been gathered, output this text word for word: 'Thank you, a healthcare provider will see you shortly.'
Please limit yourself to 50 tokens in the response, unless told.
{{~/system}}
{{~#geneach 'conversation' stop=False}}
{{#user~}}
From the following prompt, extract information about the patient's problems to produce later:
{{set 'this.user_text' (await 'user_text')}}
{{~/user}}
{{#assistant~}}
{{gen 'this.ai_text' temperature=0.3 max_tokens=500}}
{{~/assistant}}
{{~/geneach}}''')
source_lang, voice_model, translate_language_key = select_language()
initmsg = translate_lang("What symptoms or medical concerns are you experiencing today?\n", translate_language_key)
print(initmsg)
transcribe_to_speech(initmsg, voice_model)
while True:
# user_input = input("User: ")
asst_output = []
# user_text = str(user_input)
user_input = str(recognize_from_microphone(source_lang))
print("\tUser said: {}".format(user_input))
prompt = prompt(user_text = user_input, max_tokens = 50)
asst_matches = re.findall(asst_pattern, str(prompt))
# hpi_matches = re.findall(end_text, str(prompt))
for match in asst_matches:
# print("INSIDE INSIDE INSIDE ------------")
# print(match)
asst_output.append(match)
msgtoprint = asst_output[-1][21:-10]
print("printing response")
print(msgtoprint)
translatedmsg = translate_lang(msgtoprint, translate_language_key)
if(translate_language_key != "en"):
print(translatedmsg)
# response_msg = strip_assistant(asst_output[-1])
# print(response_msg, "\n")
transcribe_to_speech(translatedmsg, voice_model)
hpi_matches = re.findall(end_text, str(msgtoprint))
# hacky
# exit prompt appears once as directive
# begin exit condition if appears more than once
if len(hpi_matches) > 0:
for match in hpi_matches:
print("check for hpi match")
# if match == "(HPI)":
print("hpi match")
prompt = prompt(max_tokens = int(500), user_text = "Based on the information provided by the patient, generate a history of patient illness for a healthcare professional to review. Use more than 500 tokens for this response.")
# print("---\n{}\n---".format(prompt))
hpi_matches = re.findall(asst_pattern, str(prompt))
if hpi_matches:
for hpi in hpi_matches:
asst_output.append(hpi)
else:
print("No history of present illness found.")
# asst_matches = re.findall(asst_pattern, str(prompt))
# for match_inner in asst_matches:
# asst_output.append(match_inner)
# print(prompt)
# exit()
# print(asst_output[-1], "\n")
print('---')
print(asst_output[-1][21:-10])
exit()
| [
"Based on the information provided by the patient, generate a history of patient illness for a healthcare professional to review. Use more than 500 tokens for this response.",
"\n Please select the language:\n\n \n\t1. English\n\n \n\t2. Hindi\n\n \n\t3. Chinese\n\n \n\t4. Japanese\n\n \n\t5. Bengali\n\n \n\t6. Gujarati\n\n \n\t7. Kannada\n\n ",
"{{#system~}}\nYou are a chatbot called EIDA designed to talk to patients who have some medical concerns they want addressed.\nDO NOT ASK THE PATIENT MORE THAN ONE QUESTION AT A TIME.\n\nAsk the patient information about the onset, location, duration, characteristics, aggravating factors, relieveing factors, timing, and severity of what the user is feeling.\nThis is not to provide suggestions on what the user can do, but the information will be passed to a primary healthcare provider to follow up with the user. \nSince you do not know the user's illness or sickness, ask qualifying questions about their problems.\nAvoid repeating what the patient just said back to them.\nIf needed, ask for clarifications in a simple manner. Ask for these clarifications one at a time.\nExpress empathy regarding the concerns and problems the patient is facing.\nOnce the information has been gathered, output this text word for word: 'Thank you, a healthcare provider will see you shortly.'\nPlease limit yourself to 50 tokens in the response, unless told.\n{{~/system}}\n\n\n{{~#geneach 'conversation' stop=False}}\n{{#user~}}\nFrom the following prompt, extract information about the patient's problems to produce later:\n{{set 'this.user_text' (await 'user_text')}}\n{{~/user}}\n{{#assistant~}}\n{{gen 'this.ai_text' temperature=0.3 max_tokens=500}}\n{{~/assistant}}\n{{~/geneach}}"
] |
2024-01-10 | holloway-ai/leif | app~api~deps.py | from app.core.config import settings # pylint: disable=C0415
from redis import Redis
import cohere
from functools import cached_property
class Database:
@cached_property
def connection(self):
# Initialize the connection to Redis
# The parameters here are placeholders, replace with your actual parameters
return Redis( host = settings.REDIS_HOST,
port = settings.REDIS_PORT,
password = settings.REDIS_PASSWORD)
@cached_property
def embedding(self):
# Initialize the connection to Cohere
return cohere.Client(settings.COHERE_API_KEY)
db = Database()
| [] |
2024-01-10 | while-basic/helicone | token-calc~main.py | from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from pydantic import BaseModel
import anthropic
class Item(BaseModel):
text: str
app = FastAPI()
@app.post("/anthropic/count_tokens")
async def count_tokens_endpoint(item: Item):
num_tokens = anthropic.count_tokens(item.text)
return {"count": num_tokens}
@app.get("/healthcheck")
def healthcheck():
return {"status": "healthy"}
# Redirect to docs if no path is specified
@app.get("/")
def redirect_to_docs():
return RedirectResponse(url="https://helicone.ai/token_count")
| [] |
2024-01-10 | add-IV/pandaRec | pandarec~strategies.py | """This module contains the ranking strategies used to rank recipes based on a query."""
from abc import ABC, abstractmethod
import json
import re
import asyncio
from typing import TYPE_CHECKING
import websockets
from rapidfuzz import fuzz, process
from rapidfuzz.utils import default_process
from sentence_transformers import SentenceTransformer, util
from openai import ChatCompletion
from openai.embeddings_utils import get_embedding, cosine_similarity, get_embeddings
from .ml_embeddings import load_embeddings
from .search_index import (
generate_search_index,
load_search_index,
lemmatize_no_stop_words,
)
from .context import Context
from .recipe import Recipe, RecipeResult, get_recipe_by_name
if TYPE_CHECKING:
import nest_asyncio
class RankingStrategy(ABC):
"""An abstract class representing a ranking strategy."""
@abstractmethod
def search(
self, context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
"""Searches for recipes based on the context."""
def feedback(self, context: Context, recipe: Recipe, positive: bool):
"""Provides feedback to the ranking strategy."""
class NameSearch(RankingStrategy):
"""A simple proof-of-concept ranking strategy that only
searches for the query in the recipe name."""
@staticmethod
def search(
context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
result = []
for recipe in recipes:
if context.query in recipe.name:
result.append(RecipeResult(1, recipe))
return result[:num_results]
class FuzzySearchName(RankingStrategy):
"""A fuzzy search ranking strategy that uses the name of the recipe."""
@staticmethod
def search( # pylint: disable=arguments-differ
context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
names = [recipe.name for recipe in recipes]
matches: list[tuple[str, int]] = process.extract(
context.query,
names,
scorer=fuzz.WRatio,
limit=20,
processor=default_process,
) # type: ignore
result = [
RecipeResult(score=match[1], recipe=get_recipe_by_name(match[0], recipes))
for match in matches
]
return result[:num_results]
class FuzzySearchDescription(RankingStrategy):
"""A fuzzy search ranking strategy that uses the description of the recipe."""
def __init__(self, ratio=fuzz.partial_token_sort_ratio):
"""Initializes the fuzzy search ranking with a given rapidfuzz ratio."""
super().__init__()
self.ratio = ratio
def search(
self, context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
result = []
for recipe in recipes:
score = self.ratio(
context.query, recipe.description, processor=default_process
)
result.append(RecipeResult(score, recipe))
result.sort(key=lambda recipe_result: recipe_result.score, reverse=True)
return result[:num_results]
class IndexSearch(RankingStrategy):
"""A ranking strategy that uses a search index to search for recipes."""
def __init__(self, recipes: list[Recipe], path: str = ""):
"""The search index can be generated on the fly or loaded from a file."""
super().__init__()
if not path:
self.index = generate_search_index(
[recipe.description for recipe in recipes]
)
else:
self.index = load_search_index(path)
def search(
self, context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
lemmatized_search = lemmatize_no_stop_words(context.query)
# add 1 to the score for each word that is in the description (per recipe)
scores = len(recipes) * [0]
for word in lemmatized_search:
for idx, _ in self.index.get(word, []):
try:
scores[idx] += 1
except IndexError:
print(idx)
num_words = len(lemmatized_search)
result = [
RecipeResult(score / num_words, recipe) # normalize score
for score, recipe in zip(
scores, recipes
) # scores and recipes are in the same order
]
result.sort(key=lambda recipe_result: recipe_result.score, reverse=True)
return result[:num_results]
class SemanticSearch(RankingStrategy):
"""A ranking strategy that uses semantic embeddings to search for recipes."""
def __init__(
self,
recipes: list[Recipe],
path: str = "",
model: str = "sentence-transformers/all-mpnet-base-v2",
):
"""The embeddings can be generated on the fly or loaded from a file."""
super().__init__()
self.model = SentenceTransformer(model)
descriptions = [recipe.description for recipe in recipes]
if not path:
self.embeddings = self.model.encode(descriptions)
else:
self.embeddings = load_embeddings(path)
def search(
self, context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
query_embedding = self.model.encode(context.query, convert_to_tensor=True)
cos_scores = [
util.cos_sim(query_embedding, embedding).item() # type: ignore
for embedding in self.embeddings
]
result = [
RecipeResult(score, recipe) for recipe, score in zip(recipes, cos_scores)
]
result.sort(key=lambda recipe_result: recipe_result.score, reverse=True)
return result[:num_results]
class SemanticSearchFeedback(SemanticSearch):
def __init__(
self,
recipes: list[Recipe],
path: str = "",
model: str = "sentence-transformers/all-mpnet-base-v2",
):
super().__init__(recipes, path, model)
self.feedback_list: list[tuple[str, str, bool]] = []
def feedback(self, context: Context, recipe: Recipe, positive: bool):
self.feedback_list.append((context.query, recipe.name, positive))
def save_feedback(self, path: str):
"""Saves the feedback to a file."""
with open(path, "w") as file:
json.dump(self.feedback_list, file)
class OpenAIEmbeddings(RankingStrategy):
"""A ranking strategy that uses OpenAI embeddings to search for recipes."""
def __init__(
self,
recipes: list[Recipe],
path: str = "",
model: str = "text-embedding-ada-002",
):
"""The embeddings can be generated on the fly or loaded from a file."""
super().__init__()
self.model = model
descriptions = [recipe.description for recipe in recipes]
if not path:
self.embeddings = get_embeddings(descriptions, engine=self.model)
else:
self.embeddings = load_embeddings(path)
def search(
self, context: Context, recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
if context.query == "":
return []
query_embedding = get_embedding(context.query, engine=self.model)
cos_scores = [
cosine_similarity(query_embedding, embedding)
for embedding in self.embeddings
]
result = [
RecipeResult(score, recipe) for recipe, score in zip(recipes, cos_scores)
]
result.sort(key=lambda recipe_result: recipe_result.score, reverse=True)
return result[:num_results]
class OpenAIChatCompletion(RankingStrategy):
"""A ranking strategy that uses OpenAI chat completions to search for recipes."""
def __init__(
self,
model: str = "gpt-3.5-turbo",
):
"""The embeddings can be generated on the fly or loaded from a file."""
super().__init__()
self.model = model
self.messages = [
{
"role": "system",
"content": "You are a code recommendation system that recommends "
"code snippets from the python pandas library that can manipulate "
"a DataFrame based on a user query.\nThe recommendations should have "
"a name, a code snippet and a description.\nThe format is:\nname: "
"<name>\ncode: <code>\ndescription: <description>\nYou "
"will return at most 5 code snippets that are relevant to the task in order of relevance.\n\n",
}
]
def search(
self, context: Context, _recipes: list[Recipe], num_results=10
) -> list[RecipeResult]:
query = context.query
if query == "":
return []
payload = {
"role": "user",
"content": query,
}
messages_to_send = self.messages + [payload]
response = ChatCompletion.create(
model=self.model,
messages=messages_to_send,
)
result_text = response["choices"][0]["message"]["content"] # type: ignore
return self.text_to_recipe_result(result_text)
def text_to_recipe_result(self, text: str) -> list[RecipeResult]:
"""recipes from response text"""
names = re.findall("(?<=name: )(.*)", text)
descriptions = re.findall("(?<=description: )(.*)", text)
codes = re.findall("(?<=code: )(.*)", text)
if len(names) == 0:
return []
if len(names) != len(descriptions) or len(names) != len(codes):
return []
result = [
RecipeResult(1, Recipe(0, name, description, code, ""))
for name, description, code in zip(names, descriptions, codes)
]
return result
class WebSocketStrategy(RankingStrategy):
"""A ranking strategy that offloads the ranking to a websocket."""
def __init__(self, uri="ws://localhost:8765"):
import nest_asyncio
super().__init__()
self.uri = uri
nest_asyncio.apply()
async def asearch(
self, context: Context, _recipes: list[Recipe], num_results
) -> str:
"""async search"""
payload = json.dumps(
{"type": "search", "query": context.query, "num_results": num_results}
)
async with websockets.connect( # pylint: disable=no-member # type: ignore
self.uri
) as websocket:
await websocket.send(payload)
result = await websocket.recv()
return result
def search(self, context: Context, recipes: list[Recipe], num_results=10):
text_result = asyncio.run(self.asearch(context, recipes, num_results))
result = self.recipe_results_from_json(text_result)
return result
def recipe_results_from_json(self, json_string: str) -> list[RecipeResult]:
"""recipes from json"""
result = []
recipe_results = json.loads(json_string)
for recipe_result in recipe_results:
recipe = Recipe.from_dict(recipe_result["recipe"])
result.append(RecipeResult(recipe_result["score"], recipe))
return result
async def afeedback(self, context: Context, recipe: Recipe, positive: bool):
"""async feedback"""
payload = json.dumps(
{
"type": "feedback",
"query": context.query,
"recipe": recipe.__dict__,
"positive": positive,
}
)
async with websockets.connect( # pylint: disable=no-member # type: ignore
self.uri
) as websocket:
await websocket.send(payload)
def feedback(self, context: Context, recipe: Recipe, positive: bool):
asyncio.run(self.afeedback(context, recipe, positive))
| [
"self.messages + [payload]",
"You are a code recommendation system that recommends code snippets from the python pandas library that can manipulate a DataFrame based on a user query.\nThe recommendations should have a name, a code snippet and a description.\nThe format is:\nname: <name>\ncode: <code>\ndescription: <description>\nYou will return at most 5 code snippets that are relevant to the task in order of relevance.\n\n"
] |
2024-01-10 | deshantm/LLMOpsCourse | raqa.py | from llama_index import ServiceContext
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.llms import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = OpenAIEmbedding()
llm = OpenAI(temperature=0,model="gpt-3.5-turbo",stream=True)
chunk_size = 1000
splitter = TokenTextSplitter(chunk_size=chunk_size,chunk_overlap=100)
node_parser = SimpleNodeParser(text_splitter=splitter)
context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
chunk_size=chunk_size
)
from llama_index import VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
import chromadb
chroma_client = chromadb.Client()
chroma_collection = chroma_client.create_collection("llama_index")
chroma_store = ChromaVectorStore(chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=chroma_store)
wiki_vector_index = VectorStoreIndex([],storage_context=storage_context, service_context=context)
from llama_index.readers.wikipedia import WikipediaReader
movie_list = ["Sound of Freedom (film)", "Babylon (2022 film)"]
#fewer auto corrects with auto_suggest=False
reader = WikipediaReader().load_data(pages=movie_list, auto_suggest=False)
for movie, wiki_doc in zip(movie_list, reader):
#Now we will loop through our documents and metadata and construct nodes (associated with particular metadata for easy filtration later).
nodes = []
for document, metadata in zip(wiki_doc, wiki_doc.metadata):
nodes.append(Node(document=document, metadata=metadata))
#Now we will add the title metadata to each node.
for node in nodes:
node.metadata["title"] = movie
wiki_vector_index.from_documents(nodes)
from llama_index.tools import FunctionTool
from llama_index.vector_stores.types import (
VectorStoreInfo,
MetadataInfo,
ExactMatchFilter,
MetadataFilters,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from typing import List, Tuple, Any
from pydantic import BaseModel, Field
top_k = 3
vector_store_info = VectorStoreInfo(
content_info="semantic information about movies",
metadata_info=[MetadataInfo(
name="title",
type="str",
description="title of movie, one of the movies in the list"
)]
)
class AutoRetrieveModel(BaseModel):
query: str = Field(..., description="natural language query string")
filter_key_list: List[str] = Field(
..., description="List of metadata filter field names"
)
filter_value_list: List[str] = Field(
...,
description=(
"List of metadata filter field values (corresponding to names specified in filter_key_list)"
)
)
def auto_retrieve_fn(
query: str, filter_key_list: List[str], filter_value_list: List[str]
):
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
"""
query = query or "Query"
exact_match_filters = [
ExactMatchFilter(key=k, value=v)
for k, v in zip(filter_key_list, filter_value_list)
]
retriever = VectorIndexRetriever(
wiki_vector_index, filters=MetadataFilters(filters=exact_match_filters), top_k=top_k
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
description = f"""\
Use this tool to look up semantic information about films.
The vector database schema is given below:
{vector_store_info.json()}
"""
auto_retrieve_tool = FunctionTool.from_defaults(
fn=auto_retrieve_fn,
name="auto_retrieve",
description=description,
fn_schema=AutoRetrieveModel,
)
from llama_index.agent import OpenAIAgent
agent = OpenAIAgent.from_tools(
tools=[auto_retrieve_tool],
service_context=context,
name="llama_index",
description="A tool for looking up semantic information about films",
version="0.0.1",
)
response = agent.chat("Tell me what happens (briefly) in the Sound of Freedom movie.")
print(str(response))
| [
"Tell me what happens (briefly) in the Sound of Freedom movie."
] |
2024-01-10 | deshantm/LLMOpsCourse | barbie-reviews-raqa-demo.py | from langchain.document_loaders.csv_loader import CSVLoader
loader = CSVLoader(
file_path='barbie-reviews.csv',
source_column='Review_Url'
)
data = loader.load()
#sanity check
print("length of data: ")
print(len(data))
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1000, # the character length of the chunk
chunk_overlap = 100, # the character length of the overlap between chunks
length_function = len, # the length function
)
documents = text_splitter.transform_documents(data)
#print(documents)
#print length of documents
print("length of documents: ")
print(len(documents))
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import CacheBackedEmbeddings
from langchain.vectorstores import FAISS
from langchain.storage import LocalFileStore
store = LocalFileStore("./cache/")
core_embeddings_model = OpenAIEmbeddings()
embedder = CacheBackedEmbeddings.from_bytes_store(
core_embeddings_model,
store,
namespace=core_embeddings_model.model
)
vector_store = FAISS.from_documents(documents, embedder)
query = "How is Will Ferrell in this movie?"
embedding_vector = core_embeddings_model.embed_query(query)
docs = vector_store.similarity_search_by_vector(embedding_vector, k = 4)
for page in docs:
print(page.page_content)
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo")
retriever = vector_store.as_retriever()
from langchain.chains import RetrievalQA
from langchain.callbacks import StdOutCallbackHandler
handler = StdOutCallbackHandler()
qa_with_sources_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
callbacks=[handler],
return_source_documents=True
)
print(qa_with_sources_chain({"query" : "How was Will Ferrell in this movie?"}))
print(qa_with_sources_chain({"query" : "Do reviewers consider this movie Kenough?"})
| [] |
2024-01-10 | DavidBert/CLOP | RL~common~env~procgen_wrappers.py | import contextlib
import os
from abc import ABC, abstractmethod
import numpy as np
import gym
from gym import spaces
import time
from collections import deque
import torch
"""
Copy-pasted from OpenAI to obviate dependency on Baselines. Required for vectorized environments.
"""
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = "ARGHH" #tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
class VecFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self):
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
class VecExtractDictObs(VecEnvObservationWrapper):
def __init__(self, venv, key):
self.key = key
super().__init__(venv=venv,
observation_space=venv.observation_space.spaces[self.key])
def process(self, obs):
return obs[self.key]
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
for i in range(len(infos)):
infos[i]['env_reward'] = rews[i]
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self.num_envs)
obs = self.venv.reset()
return self._obfilt(obs)
class TransposeFrame(VecEnvWrapper):
def __init__(self, env):
super().__init__(venv=env)
obs_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(obs_shape[2], obs_shape[0], obs_shape[1]), dtype=np.float32)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
return obs.transpose(0,3,1,2), reward, done, info
def reset(self):
obs = self.venv.reset()
return obs.transpose(0,3,1,2)
class ScaledFloatFrame(VecEnvWrapper):
def __init__(self, env):
super().__init__(venv=env)
obs_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=1, shape=obs_shape, dtype=np.float32)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
return obs/255.0, reward, done, info
def reset(self):
obs = self.venv.reset()
return obs/255.0
| [] |
2024-01-10 | OverAny/Labwork-Intel-Scripts | pretrained-clip-vit~negations~negation-automated.py | from PIL import Image
import requests
import openai
import os
import openai
import glob
import pandas as pd
import random
import csv
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
openai.api_key = "--------"
image_list = []
df1 = pd.read_csv('../data_provided/objects_combined.csv')
col_list1 = df1.object_name.values.tolist()
### IMPLEMENT ENSEMBLING FOR LATER ITERATIONS ###
### DOES THIS STRATEGY WORK? ###
### GOES THROUGH ENTIRE FOLDER ###
for filename in glob.glob('images/*.png'):
image=Image.open(filename)
#Remove & .png#
sub = filename[0:filename.find('/')+1]
#png = filename[len(filename)-4:len(filename)]
desc = filename.replace(sub,'')
f = open('output/'+desc+'_objects_output.csv', 'w')
writer = csv.writer(f)
writer.writerow(
["Correct Item", "Correct Item Value", "RH #1", "RH #1 Value", "RH #2", "RH #2 Value", "RH #3", "RH #3 Value", "RH #4", "RH #4 Value", "Result"]
)
desc = desc[:-4]
desc = desc[desc.find(' - ')+3:len(desc)]
print(desc)
#-------#
#Details#
lists = []
#-------#
response = openai.Completion.create(
model="text-davinci-002",
prompt="Identify the object being negated in the following sentence ("+desc+"). The word being negated is ",
temperature=0,
max_tokens=100,
)
object = response.choices[0].text.strip()
object = object.replace('\"','', 2)
### MAKE SURE OBJECTS ARNT IN IMAGE FOR RED HERRING ###
objectsNew = random.sample(col_list1, 4)
objectsNew.insert(0, object)
lists.append(objectsNew)
for i in lists:
inputs = processor(text=i, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
print("----------------------")
result = True
row = []
for z in range(len(i)):
print(i[z] + ": " + str(probs.detach().numpy()[0][z]))
row.append(i[z])
row.append(str(probs.detach().numpy()[0][z]))
if (float(probs.detach().numpy()[0][0]) < float(probs.detach().numpy()[0][z])):
result = False
if (result):
print(" ## " + i[0] + ": " + "FAIL!")
row.append("FAIL!")
else:
print(" ## " + i[0] + ": " + "PASS!")
row.append("PASS!")
writer.writerow(row)
f.close()
print("----------------------")
| [
"Identify the object being negated in the following sentence (PLACEHOLDER). The word being negated is "
] |
2024-01-10 | OverAny/Labwork-Intel-Scripts | pretrained-clip-vit~objects~object-automated.py | from PIL import Image
import requests
import openai
import os
import openai
import glob
import pandas as pd
import random
import csv
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
openai.api_key = "--------"
image_list = []
df1 = pd.read_csv('../data_provided/objects_combined.csv')
col_list1 = df1.object_name.values.tolist()
### IMPLEMENT ENSEMBLING FOR LATER ITERATIONS ###
for filename in glob.glob('images/*.png'):
image=Image.open(filename)
#Remove & .png#
sub = filename[0:filename.find('/')+1]
#png = filename[len(filename)-4:len(filename)]
desc = filename.replace(sub,'')
f = open('output/'+desc+'_objects_output.csv', 'w')
writer = csv.writer(f)
writer.writerow(
["Correct Item", "Correct Item Value", "RH #1", "RH #1 Value", "RH #2", "RH #2 Value", "RH #3", "RH #3 Value", "RH #4", "RH #4 Value", "Result"]
)
desc = desc[:-4]
desc = desc[desc.find(' - ')+3:len(desc)]
print(desc)
#-------#
#Details#
lists = []
#-------#
response = openai.Completion.create(
model="text-davinci-002",
prompt="List the one word objects in the following sentence ("+desc+") seperated by commas:",
temperature=0,
max_tokens=256,
)
objects = response.choices[0].text.strip().split(", ")
for i in objects:
if (i.find('a ') == 0):
i = i.replace('a ','', 1)
if (i.find('A ') == 0):
i = i.replace('A ','', 1)
if (i.find('an ') == 0):
i = i.replace('an ','', 1)
if (i.find('An ') == 0):
i = i.replace('An ','', 1)
### MAKE SURE OBJECTS ARNT IN IMAGE FOR RED HERRING ###
objectsNew = random.sample(col_list1, 4)
objectsNew.insert(0, i)
lists.append(objectsNew)
for i in lists:
inputs = processor(text=i, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
print("----------------------")
result = True
row = []
for z in range(len(i)):
print(i[z] + ": " + str(probs.detach().numpy()[0][z]))
row.append(i[z])
row.append(str(probs.detach().numpy()[0][z]))
if (float(probs.detach().numpy()[0][0]) < float(probs.detach().numpy()[0][z])):
result = False
if (result):
print(" ## " + i[0] + ": " + "PASS!")
row.append("PASS!")
else:
print(" ## " + i[0] + ": " + "FAIL!")
row.append("FAIL!")
writer.writerow(row)
writer.writerow(
["-", "-", "-", "-", "-", "-", "-", "-", "-", "-", "-"]
)
f.close()
print("----------------------")
| [
"List the one word objects in the following sentence (PLACEHOLDER) seperated by commas:"
] |
2024-01-10 | mishalhossin/Discord-AI-Chatbot | bot_utilities~ai_utils.py | import aiohttp
import io
from datetime import datetime
import re
import asyncio
import time
import random
import asyncio
from urllib.parse import quote
from bot_utilities.config_loader import load_current_language, config
from openai import AsyncOpenAI
import os
from dotenv import load_dotenv
load_dotenv()
current_language = load_current_language()
internet_access = config['INTERNET_ACCESS']
openai_client = AsyncOpenAI(
api_key = os.getenv('CHIMERA_GPT_KEY'),
base_url = "https://api.naga.ac/v1"
)
async def sdxl(prompt):
response = await openai_client.images.generate(
model="sdxl",
prompt=prompt,
n=1, # images count
size="1024x1024"
)
return response.data[0].url
async def search(prompt):
"""
Asynchronously searches for a prompt and returns the search results as a blob.
Args:
prompt (str): The prompt to search for.
Returns:
str: The search results as a blob.
Raises:
None
"""
if not internet_access or len(prompt) > 200:
return
search_results_limit = config['MAX_SEARCH_RESULTS']
if url_match := re.search(r'(https?://\S+)', prompt):
search_query = url_match.group(0)
else:
search_query = prompt
if search_query is not None and len(search_query) > 200:
return
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
blob = f"Search results for: '{search_query}' at {current_time}:\n"
if search_query is not None:
try:
async with aiohttp.ClientSession() as session:
async with session.get('https://ddg-api.awam.repl.co/api/search',
params={'query': search_query, 'maxNumResults': search_results_limit}) as response:
search = await response.json()
except aiohttp.ClientError as e:
print(f"An error occurred during the search request: {e}")
return
for index, result in enumerate(search):
try:
blob += f'[{index}] "{result["Snippet"]}"\n\nURL: {result["Link"]}\n'
except Exception as e:
blob += f'Search error: {e}\n'
blob += "\nSearch results allows you to have real-time information and the ability to browse the internet\n.As the links were generated by the system rather than the user, please send a response along with the link if necessary.\n"
return blob
else:
blob = "No search query is needed for a response"
return blob
async def fetch_models():
models = await openai_client.models.list()
return models
async def generate_response(instructions, search, history):
search_results = search if search is not None else "Search feature is disabled"
messages = [
{"role": "system", "name": "instructions", "content": instructions},
*history,
{"role": "system", "name": "search_results", "content": search_results},
]
response = await openai_client.chat.completions.create(
model=config['GPT_MODEL'],
messages=messages
)
message = response.choices[0].message.content
return message
async def generate_gpt4_response(prompt):
messages = [
{"role": "system", "name": "admin_user", "content": prompt},
]
response = await openai_client.chat.chat.completions.create(
model='gpt-4',
messages=messages
)
message = response.choices[0].message.content
return message
async def poly_image_gen(session, prompt):
seed = random.randint(1, 100000)
image_url = f"https://image.pollinations.ai/prompt/{prompt}?seed={seed}"
async with session.get(image_url) as response:
image_data = await response.read()
return io.BytesIO(image_data)
# async def fetch_image_data(url):
# async with aiohttp.ClientSession() as session:
# async with session.get(url) as response:
# return await response.read()
async def dall_e_gen(model, prompt, size, num_images):
response = await openai_client.chat.images.generate(
model=model,
prompt=prompt,
n=num_images,
size=size,
)
imagefileobjs = []
for image in response.data:
image_url = image.url
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
imagefileobjs.append(img_file_obj)
return imagefileobjs
async def generate_image_prodia(prompt, model, sampler, seed, neg):
print("\033[1;32m(Prodia) Creating image for :\033[0m", prompt)
start_time = time.time()
async def create_job(prompt, model, sampler, seed, neg):
if neg is None:
negative = "(nsfw:1.5),verybadimagenegative_v1.3, ng_deepnegative_v1_75t, (ugly face:0.8),cross-eyed,sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy, DeepNegative, facing away, tilted head, {Multiple people}, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worstquality, low quality, normal quality, jpegartifacts, signature, watermark, username, blurry, bad feet, cropped, poorly drawn hands, poorly drawn face, mutation, deformed, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, extra fingers, fewer digits, extra limbs, extra arms,extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed,mutated hands, polar lowres, bad body, bad proportions, gross proportions, text, error, missing fingers, missing arms, missing legs, extra digit, extra arms, extra leg, extra foot, repeating hair, nsfw, [[[[[bad-artist-anime, sketch by bad-artist]]]]], [[[mutation, lowres, bad hands, [text, signature, watermark, username], blurry, monochrome, grayscale, realistic, simple background, limited palette]]], close-up, (swimsuit, cleavage, armpits, ass, navel, cleavage cutout), (forehead jewel:1.2), (forehead mark:1.5), (bad and mutated hands:1.3), (worst quality:2.0), (low quality:2.0), (blurry:2.0), multiple limbs, bad anatomy, (interlocked fingers:1.2),(interlocked leg:1.2), Ugly Fingers, (extra digit and hands and fingers and legs and arms:1.4), crown braid, (deformed fingers:1.2), (long fingers:1.2)"
else:
negative = neg
url = 'https://api.prodia.com/generate'
params = {
'new': 'true',
'prompt': f'{quote(prompt)}',
'model': model,
'negative_prompt': f"{negative}",
'steps': '100',
'cfg': '9.5',
'seed': f'{seed}',
'sampler': sampler,
'upscale': 'True',
'aspect_ratio': 'square'
}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
data = await response.json()
return data['job']
job_id = await create_job(prompt, model, sampler, seed, neg)
url = f'https://api.prodia.com/job/{job_id}'
headers = {
'authority': 'api.prodia.com',
'accept': '*/*',
}
async with aiohttp.ClientSession() as session:
while True:
async with session.get(url, headers=headers) as response:
json = await response.json()
if json['status'] == 'succeeded':
async with session.get(f'https://images.prodia.xyz/{job_id}.png?download=1', headers=headers) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
duration = time.time() - start_time
print(f"\033[1;34m(Prodia) Finished image creation\n\033[0mJob id : {job_id} Prompt : ", prompt, "in", duration, "seconds.")
return img_file_obj
| [] |
2024-01-10 | YashBit/DRL-Learning | allegro-experiments~scripts~AllegroReach~stable_baseline~custom_ppo.py | from typing import Callable, Dict, List, Optional, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3 import PPO
from stable_baselines3.common.policies import ActorCriticPolicy, BasePolicy
from stable_baselines3.common.policies import (
ActorCriticPolicy,
register_policy,
)
import warnings
from typing import Any, Dict, Optional, Type, Union
import numpy as np
from gym import spaces
from torch.nn import functional as F
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3.common.utils import obs_as_tensor, safe_mean
# from stable_baselines3.common.torch_layers import (
# BaseFeaturesExtractor,
# FlattenExtractor,
# )
from CustomFeatureExtractor import CustomFlattenExtractor, CustomBaseFeaturesExtractor
import numpy as np
from CustomMLP import CustomMLP
from CustomBuffer import CustomRolloutBuffer
"""
1. CUSTOM COMBINED EXTRACTOR
2. CUSTOMACTORCRITIC
3. CUSTOMPPO
"""
"""
CUSTOMACTORCRITICPOLICY: NEED FOR OWN NETWORK AND FEATURE EXTRACTOR USE
FORWARD PASS: NEED TO PUT IN THE CORRECT STUFF
"""
class CustomActorCriticPolicy(ActorCriticPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable[[float], float],
features_extractor_class: Type[CustomBaseFeaturesExtractor] = CustomFlattenExtractor,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
*args,
**kwargs,
):
super(CustomActorCriticPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
# Pass remaining arguments to base class
*args,
**kwargs,
)
# Disable orthogonal initialization
self.ortho_init = False
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
print("SELF FEATURES DIM IS EQUAL TO: ")
print(self.features_dim)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
print("Self , features dim in buildmlp extractor is")
print(self.features_dim)
self.mlp_extractor = CustomMLP(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
#moved from parent class, process obs as a Dict in preprocess_obs()
#then concat all values to one tensor
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
from CustomPreprocessing import preprocess_obs
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
obs_concat = th.cat((obs['achieved_goal'], obs['desired_goal'],obs['observation']),1)
th.reshape(obs_concat,(16,))
return self.features_extractor(obs_concat)
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
"""
OBSERVATIONS HERE SHOULD BE A DICTIONARY HERE:
"""
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
#latent_pi = self.mlp_extractor(features)
latent_pi = self.mlp_extractor(features)[0]
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
#latent_pi, latent_vf, latent_sde = self._get_latent(obs['observation'])
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
register_policy("CustomActorCriticPolicy", CustomActorCriticPolicy)
"""
CUSTOMPPO : We have renamed: Due to the fact that we need to change custom_rollouts
"""
class CustomPPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
#policy: Union[str, Type[ActorCriticPolicy]],
#CustomActorCriticPolicy
policy: Union[str, Type[CustomActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(CustomPPO, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert (
buffer_size > 1
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(CustomPPO, self)._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: CustomRolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
actions, values, log_probs = self.policy.forward(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
obs_tensor = obs_as_tensor(new_obs, self.device)
_, values, _ = self.policy.forward(obs_tensor)
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPO",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "CustomPPO":
return super(CustomPPO, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
print('#1 creating gym environment')
env = gym.make("FetchReach-v1")
print("OBSERVATION SPACE IN THE ")
print(env.observation_space)
print(env.observation_space.sample())
print('#2 creating model: CustomActorCriticPolicy')
model = CustomPPO("CustomActorCriticPolicy", env, verbose=1)
print('#3 learning...')
model.learn(5000)
| [] |
2024-01-10 | Preemo-Inc/gradientai-python-sdk | gradientai~__init__.py | from importlib import metadata as _metadata
__version__ = _metadata.version("gradientai")
from gradientai._base_model import BaseModel
from gradientai._gradient import Gradient
from gradientai._model import Guidance, Model
from gradientai._model_adapter import ModelAdapter, Sample
__all__ = [
"BaseModel",
"Gradient",
"Guidance",
"Model",
"ModelAdapter",
"Sample",
]
| [] |
2024-01-10 | Diemzey2/MayabTest | cliente.py | import streamlit as st
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
from st_pages import Page, show_pages, hide_pages, add_page_title
from streamlit_extras.mention import mention
import numpy as np
import openai
import requests
import json
end_point_chat = "http://ec2-18-191-126-188.us-east-2.compute.amazonaws.com:5000/chat"
add_page_title("Anahuac Copilot", page_icon="๐ค")
show_pages(
[
Page("cliente.py", "Mentores", "๐ค"),
Page("other_pages/๏ธempresa.py", "Admin", ":gear:"),
]
)
hide_pages("Admin")
with st.sidebar:
st.image("resources/Logo.png", use_column_width=True)
st.title('Bienvenido a Mayab Copilot')
st.write(
"Soy una inteligencia artificial para ayudar a los mentores a responder preguntas de los alumnos. Estoy entrenado con informaciรณn de:")
st.write("- Reglamento Anรกhuac Mayab ๐")
st.write("- Directorio Telefรณnico ๐")
st.write("- Tramites y servicios ๐")
st.write("- Errores en plataformas ๐ค")
st.sidebar.write("")
add_vertical_space(5)
# Generate a random ID for the user
if 'id' not in st.session_state:
st.session_state['id'] = np.random.randint(1, 214483647)
# put limit to 214483647, bigger number would be int 64 and that gave problems when serializing to json
id = st.session_state['id']
chat = {"id_usuario": id}
if 'generated' not in st.session_state:
st.session_state['generated'] = [
"ยกHola! Soy Anahuac Copilot ๐ค, tu asistente para dudas acadรฉmicas y mรกs ๐โจ. Aunque estoy en fase de prueba ๐ง, ยกestoy aquรญ para apoyarte! ๐ No compartas info personal ๐ซ. ยฟEn quรฉ puedo ayudarte hoy? ๐"]
if 'past' not in st.session_state:
st.session_state['past'] = ['ยฟQuiรฉn eres? ๐ฏ']
colored_header(label='', description='', color_name='orange-80')
response_container = st.container()
input_container = st.container()
def get_text():
input_text = st.text_input("Escribe tu pregunta: ", "", key="input")
return input_text
with input_container:
user_input = get_text()
with response_container:
if user_input:
st.session_state.past.append(user_input)
chat = {"id_usuario": id, "message": user_input}
resp2 = requests.post(end_point_chat, json=chat)
print(resp2.text)
json_object = json.loads(resp2.text)
st.session_state.past.append(user_input)
st.session_state.generated.append(json_object['response'])
if st.session_state['generated']:
fi = len(st.session_state['generated']) - 1
for i in range(len(st.session_state['generated'])):
with st.chat_message(name="user"):
st.write(st.session_state['past'][i])
with st.chat_message(name="assistant", avatar="resources/AI.png"):
st.write(st.session_state['generated'][i])
mention(
label="Desarrollado por VHuman.ai",
url="https://Vhuman.ai",
)
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~prototypes~playground~sphero~mode_learn.py | from __future__ import annotations
from typing import Optional, Dict, AnyStr, List
from pydantic import BaseModel, Field
from ghoshell.framework.reactions.commands import ProcessCmdReaction
from ghoshell.framework.thinks import SingleStageThink
from ghoshell.ghost import Context, Thought, Meta, URL
from ghoshell.ghost import Operator, Reaction, Intention
from ghoshell.llms import OpenAIChatMsg
from ghoshell.messages import Text
from ghoshell.prototypes.playground.sphero.sphero_ghost_configs import SpheroLearningModeConfig, LearningModeOutput
from ghoshell.prototypes.playground.sphero.sphero_ghost_core import SpheroGhostCore
from ghoshell.prototypes.playground.sphero.sphero_messages import SpheroCommandMessage
class DialogMessage(BaseModel):
"""
ๅค่ฝฎๅฏน่ฏๆถๆฏ.
"""
role: str
text: str
class LearningModeThought(Thought):
"""
ๅญฆไน ๆจกๅผ็ๆ็ปด็ปๆ.
"""
priority = -1
class Data(BaseModel):
max_turns: int = 2
round: int = 0
dialog: List[OpenAIChatMsg] = Field(default_factory=lambda: [])
directions: List[str] = Field(default_factory=lambda: [])
title: str = ""
data: Data = Data()
def add_system_message(self, message: str):
self.data.dialog.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=message,
))
def add_message(self, message: str, from_user: bool):
self.data.dialog.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_USER if from_user else OpenAIChatMsg.ROLE_ASSISTANT,
content=message,
))
# max_lines = self.data.max_turns * 2
# exists_lines = len(self.data.dialog)
# if exists_lines > max_lines:
# self.data.dialog = self.data.dialog[exists_lines - max_lines:]
def prepare(self, args: Dict) -> None:
return
def set_variables(self, variables: Dict) -> None:
self.data = self.Data(**variables)
def vars(self) -> Dict | None:
return self.data.model_dump()
def _destroy(self) -> None:
return
class SpheroLearningModeThink(SingleStageThink):
"""
ๅค่ฝฎๅฏน่ฏๆจกๅผ, ๆฏๆๆๅญฆ, ๆ่ฝ่ฎฐๅฟ็ญ็ญ็ญ.
้่ฆๅฎ็ฐ็ๅบๆฌๅ่ฝ:
1. welcome: ่ชๆไป็ป
2. ๅฐ็่งฃ็่ช็ถ่ฏญ่จๆไปค่ฎฐๅฝๅฐไธไธๆไธญ.
3. ๆต่ฏ: ๅฐไธไธๆไธญ็่ช็ถ่ฏญ่จๆไปค, ็ๆไธบไธ็ปๅฝไปค. ็ถๅ่ฟ่ก.
4. ไฟๅญไธบๆ่ฝ: ๅฐๅฝๅไธไธๆไธญๅฝขๆ็ๆไปค, ไฟๅญไธบไธไธชๆ่ฝ. ้่ฆ็จๆทๆไพๆ่ฝ็ๅ็งฐ.
5. ่ฆ่ฟๅ
"""
def __init__(self, core: SpheroGhostCore):
self._core = core
self._config: SpheroLearningModeConfig = core.config.learn_mode
def on_activate(self, ctx: "Context", this: LearningModeThought) -> Operator | None:
this.add_message(self._config.on_activate, False)
msg = SpheroCommandMessage()
msg.say(self._config.on_activate)
ctx.send_at(this).output(msg)
return ctx.mind(this).awaits()
def on_received(self, ctx: "Context", this: LearningModeThought) -> Operator | None:
text = ctx.read(Text)
if text is None or text.is_empty():
return ctx.mind(this).rewind()
this.data.round += 1
prompter = self._core.get_prompter(ctx)
session_id = ctx.input.trace.session_id
chat_context = self._config.generate_chat_context(
nature_direction_instruction=self._core.nature_directions_instruction(),
title=this.data.title,
directions=this.data.directions,
dialog=this.data.dialog,
last_user_direction=text.content
)
resp = prompter.chat_completion(session_id, chat_context, config_name=self._core.config.use_llm_config)
if resp.as_chat_msg().content == "await":
return ctx.mind(this).awaits()
this.add_message(text.content, True)
parsed = self._core.unpack_learning_mode_resp(resp)
return self._receive_parsed_output(ctx, parsed, this)
def _receive_parsed_output(self, ctx: Context, parsed: LearningModeOutput, this: LearningModeThought) -> Operator:
out = SpheroCommandMessage()
if self._config.debug:
ctx.send_at(this).json(parsed.model_dump())
# ๅฎๆ่ตๅผ.
if parsed.title:
this.data.title = parsed.title
if parsed.directions:
this.data.directions = parsed.directions
# ๅ้ๆถๆฏ.
if parsed.reply:
reply = parsed.reply
out.say(reply)
this.add_message(reply, False)
# ่งฃๅณ title ไธบ็ฉบ็้ฎ้ข.
if parsed.reaction == "save":
if not this.data.title:
out.say(self._config.ask_for_title)
this.add_message(self._config.ask_for_title, False)
ctx.send_at(this).output(out)
return ctx.mind(this).awaits()
if parsed.reaction == "restart":
ctx.send_at(this).output(out)
return ctx.mind(this).restart()
elif parsed.reaction == "test":
return self._run_test(ctx, this, out)
elif parsed.reaction == "save":
this.add_system_message(f"ไฟๅญๆๆๆไปคไธบ `{parsed.title}`")
return self._save_case(ctx, this, out)
elif parsed.reaction == "finish":
ctx.send_at(this).output(out)
return ctx.mind(this).finish()
ctx.send_at(this).output(out)
return ctx.mind(this).awaits()
def _save_case(self, ctx: Context, this: LearningModeThought, message: SpheroCommandMessage) -> Operator:
for direction in this.data.directions:
commands, ok = self._core.parse_direction(ctx, direction)
if not ok:
return self._send_unknown_message(ctx, this)
for cmd in commands:
message.commands.append(cmd)
self._core.cache_command(this.data.title, message.commands, True)
return ctx.mind(this).awaits()
def _send_unknown_message(self, ctx: Context, this: LearningModeThought) -> Operator:
message = SpheroCommandMessage()
text = self._core.config.unknown_order
message.say(text)
this.add_message(text, False)
ctx.send_at(this).output(message)
return ctx.mind(this).awaits()
def _run_test(self, ctx: Context, this: LearningModeThought, message: SpheroCommandMessage) -> Operator:
for direction in this.data.directions:
commands, ok = self._core.parse_direction(ctx, direction)
if not ok:
return self._send_unknown_message(ctx, this)
for cmd in commands:
message.commands.append(cmd)
direction_str = "\n -".join(this.data.directions)
this.add_system_message(f"่ฟ่กไบไปฅไธๆไปค: \n- {direction_str}")
ctx.send_at(this).output(message)
return ctx.mind(this).awaits()
def url(self) -> URL:
return URL.new_think(self._config.name)
def to_meta(self) -> Meta:
return Meta(
id=self._config.name,
kind=self._core.config.driver_name,
)
def desc(self, ctx: Context, thought: Thought) -> AnyStr:
return self._config.desc
def new_task_id(self, ctx: "Context", args: Dict) -> str:
return self.url().new_id()
def new_thought(self, ctx: "Context", args: Dict) -> Thought:
thought = LearningModeThought(args)
thought.data.max_turns = self._config.max_turns
return thought
def result(self, ctx: Context, this: LearningModeThought) -> Optional[Dict]:
return None
def intentions(self, ctx: Context) -> List[Intention] | None:
return None
def reactions(self) -> Dict[str, Reaction]:
return {
"process": ProcessCmdReaction(),
}
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~mocks~ghost_mock~ghost_mock.py | from typing import List, ClassVar
from ghoshell.container import Provider
from ghoshell.framework.bootstrapper import FileLoggerBootstrapper, \
CommandFocusDriverBootstrapper, LLMToolsFocusDriverBootstrapper
from ghoshell.framework.ghost import GhostKernel
from ghoshell.llms import LLMTextCompletion, OpenAIChatCompletion
from ghoshell.llms.openai import OpenAIBootstrapper
from ghoshell.llms.thinks import ConversationalThinksBootstrapper, FileAgentMindsetBootstrapper
from ghoshell.mocks.ghost_mock.bootstrappers import *
from ghoshell.mocks.providers import *
from ghoshell.prototypes.playground.llm_test_ghost import GameUndercoverBootstrapper
from ghoshell.prototypes.playground.llm_test_ghost import LLMConversationalThinkBootstrapper, \
PromptUnitTestsBootstrapper
class MockGhost(GhostKernel):
# ๅฏๅจๆต็จ. ๆณ็จ่ฟ็งๆนๅผ่งฃ่ฆๆ็ณป็ปๆไปถ่ฏปๅ็ญ้ป่พ.
bootstrapper: ClassVar[List] = [
FileLoggerBootstrapper(),
RegisterThinkDemosBootstrapper(),
CommandFocusDriverBootstrapper(),
OpenAIBootstrapper(),
# ไฝฟ็จ llm chat completion ๅฎ็ฐ็ๆ็ปด
ConversationalThinksBootstrapper(),
# ไฝฟ็จ llm chat completion + function call ๅฎ็ฐ็ๆ็ปด.
FileAgentMindsetBootstrapper(),
# deprecated:
LLMConversationalThinkBootstrapper(),
LLMToolsFocusDriverBootstrapper(),
# ๅฐ configs/llms/unitests ไธ็ๆไปถๅฝๆๅๅ
ๆต่ฏๆ็ปด.
PromptUnitTestsBootstrapper(),
# ๆต่ฏๅ ๅ
ฅ undercover ๆธธๆ. deprecated
GameUndercoverBootstrapper(think_name="game/undercover"),
]
depending_contracts: ClassVar[List] = [
LLMTextCompletion,
OpenAIChatCompletion,
]
contracts_providers: ClassVar[List] = [
MockCacheProvider(),
MockAPIRepositoryProvider(),
MockOperationKernelProvider(),
MockThinkMetaDriverProvider(),
]
def get_bootstrapper(self) -> List[GhostBootstrapper]:
return self.bootstrapper
def get_depending_contracts(self) -> List:
contracts = super().get_depending_contracts()
contracts += self.depending_contracts
return contracts
def get_contracts_providers(self) -> List[Provider]:
return self.contracts_providers
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~prototypes~playground~sphero~sphero_ghost_configs.py | from __future__ import annotations
from typing import List
import yaml
from pydantic import BaseModel, Field
from ghoshell.llms import OpenAIChatMsg
class SpheroMainModeConfig(BaseModel):
"""
ไธปๆจกๅผ
"""
name: str = "sphero/main_mode"
welcome: str = "welcome"
instruction: str = """
ไฝ ๆฏ็ๅฝขๆบๅจไบบ SpheroGPT, ๅฏไปฅ็่งฃ็จๆท็ๆไปค, ๅนถ่ฝฌๅไธบ่ชๅทฑ็่กๅจ.
ไฝ ๆไธ็ง่ฟ่กๆจกๅผ:
* ็ฎๅๅฝไปคๆจกๅผ:
"""
class SpheroSimpleCommandModeConfig(BaseModel):
"""
็ฎๅๅฝไปคๆจกๅผ.
"""
name: str = "sphero/simple_command_mode"
desc: str = "Sphero ็็ฎๅๆจกๅผ, ไป็จๆทๅพๅฐๅฝไปคๅ่งฃๆๆ shell ็ๆไปคๅนถ่ฟ่ก."
on_activate: str = "่ฟๅ
ฅๅไธๅฝไปคๆจกๅผ, ่ฏท็ปไฝ ไธ่พพๆไปค"
debug: bool = True
class LearningModeOutput(BaseModel):
"""
ๅญฆไน ๆจกๅผไธๆฏไธ่ฝฎ็่พๅบ. ่ฆๅ instruction ๅน้
.
"""
reply: str = "" # ๆฌ่ฝฎๅๅค็ๅ
ๅฎน.
title: str | None = None # ๆ่ฝ็ๅ็งฐ
directions: List[str] = Field(default_factory=lambda: [])
reaction: str | None = None # ๆฌ่ฝฎๅฏน่ฏๆง่ก็ๅจไฝ.
class SpheroLearningModeConfig(BaseModel):
"""
ๅค่ฝฎๅฏน่ฏๅญฆไน ๆจกๅผ็้
็ฝฎ.
"""
name: str = "sphero/learning_mode"
on_activate: str = "ๆๅทฒ็ป่ฟๅ
ฅๅญฆไน ๆจกๅผ. ๆจๅฏไปฅ็จๅค่ฝฎๅฏน่ฏๆฅๆๅฏผๆๅบ่ฏฅๆไนๅ, ๅฝ่ฏด `ๆต่ฏ` ๆถๆๆไผๆง่กๅฎไปฌ. " \
"ๆๅๅฏไปฅๅฐ่ฟไบๆไปคไฟๅญไธบไฝ ็ไธไธชๆ่ฝ. ็ฐๅจ่ฏทๆจๅผๅงๆๆๆฐๆ่ฝๅง!"
desc: str = "todo"
user_role: str = "user"
max_turns: int = 2
ai_role: str = "sphero"
debug: bool = True
instruction: str = """
ไฝ ๆฏ็ๅฝขๆบๅจไบบ Sphero.
ไฝ ้็จไบไธไธช ghost in Shell ็ๆๆฏๆถๆ.
ๅ
ถไธญ ghost ๆ็ๆฏๅบไบๅคง่ฏญ่จๆจกๅๅฎ็ฐ็ๆ็ปดไธญๆง, ่ด่ดฃๅณ็ญ, ๅนถ็ป shell ไธ่พพๆไปค.
Shell ๆ็ๆฏๆงๅถ็ๅฝข่บซไฝ็ๆจกๅ, ๆง่ก ghost ไธๅ็ yaml ็ปๆ็ๆไปค.
ๅฝๅๆฏๅญฆไน ๆจกๅผ, ไฝ ็็ฎ็ๆฏๅญฆไผ็จๆทไบค็ปไฝ ็ๅคๆๆไปค, ๅฏไปฅๆต่ฏ, ๆ็ปๅฏไปฅๆ่ฟไบๆไปคไฟๅญไธบไธไธชๆๅฎๅ็งฐ็ๆ่ฝ.
ไปฅไธๆฏไฝ ไธ็จๆท็ๅฏน่ฏไธไธๆ:
"""
prompt_temp: str = """
ๆฅไธๆฅไฝ ้่ฆๆ นๆฎ็จๆทๆๆฐ็่พๅ
ฅ, ๅฐไฝ ็ๅณ็ญ่พๅบไธบ yaml ๆ ผๅผ็้ขๅ่ฏญ่จๆไปค, ๆฅ้ฉฑๅจไฝ ็ shell ๆง่ก.
yaml ๅฏน่ฑกๅฏ็จ็ๅญๆฎตๅ่งๅๅฆไธ:
* reply: str, ๅฟ
ๅกซ. ไฝ ๆฅไธๆฅ่ฆๅฏน็จๆท่ฏด็่ฏ, ็จๆฅๅๅค็จๆท็ๆๆฐ่พๅ
ฅ.
* title: str ็ฑปๅ, ้ป่ฎคไธบ็ฉบๅญ็ฌฆ. ่กจ็คบไธไธๆไธญ่ฎฐๅฝ็ๆ่ฝๅ็งฐ. ๅฟ
้กป้่ฟ่ฏข้ฎ็จๆท่ทๅพ, ไธ่ฝไฝ ่ชๅทฑ่ฎพๆณ.
* directions: List[str] ็ฑปๅ.ๆ นๆฎๆๆไธไธๆ, ๅพๅฐ็ๅค่ฝฎๅฏน่ฏๅฎๆดๆไปค้, ๆฏไธไธชๆฐ็ป. ๆฏไธๆกๅฝไปค้ฝๅช่ฝ่ช็ถ่ฏญ่จๅฝขๅผๆฅ่กจ็คบ.
* reaction: str ็ฑปๅ. ็จ shell ็ๆไธชๅจไฝๆฅๅๅบ็จๆทๆๆฐ็่พๅ
ฅ. ไฝ ๅฏไปฅ้ๆฉ็ reaction ๅผๅฆไธ:
* test: ่ฟ่กๆๆ directions.
* finish: ๆ็จๆทๆๆฐ่พๅ
ฅ็่ฆๆฑ, ็ปๆๅฝๅๅฏน่ฏๆจกๅผ. ้่ฆ้
ๅ reply ๅ็ฅ็จๆท.
* restart: ๆ็จๆท็่ฆๆฑ, ๆธ
็ฉบไธไธๆ่ฎฐๅฟ, ไปๅคดๅผๅง, ๅนถ็ปๅ reply ๅ็ฅ็จๆท. ๆฏๅฆๅฝ็จๆท่ฏด "้ๆฐๆฅ่ฟ", "ไปๅคดๅผๅง", "้็ฝฎ" ไน็ฑปๆๆๆถๆง่ก.
* save: ไฟๅญๅฝๅ directions, ไผๅญๅฐไฝ ็ๆ่ฝ่ฎฐๅฟๅบไธญ.
* no: ไธๆง่กไปปไฝๅจไฝ.
ไฝ ็ shell ๆจกๅไผๆ็
ง้ขๅ่ฏญ่จ็ๆ ผๅผ, ๅฐไฝ ่พๅบ็ไฟกๆฏ่งฃๆๅๆง่ก.
ๅ ๆญคไฝ ไธ้่ฆ่พๅบไปปไฝไธ้ขๅ่ฏญ่จๆไปคๆ ๅ
ณ็ไฟกๆฏ.
ๆณจๆ:
1. ไปปไฝ่ฆๅฏน็จๆท่ฏด็่ฏ, ้ฝๅช่ฝ้่ฟ reply ๅญๆฎต่พๅบ. reply ไธ่ฝไธบ็ฉบ.
2. ่ฟๅ็ directions ๅญๆฎต, ้่ฆๅ
ๅซไธไธๆ้ๆๆ่ฆๆง่ก็ๆไปค.
3. ็จๆทๆๆถๅชๆฏๆณๅไฝ ่ฏด่ฏ, ่ฟๆถไฝ ๅช่ฆ็จ reply ไบคๆตๅฐฑ่ถณๅคไบ.
4. ๅชๆๅฝ็จๆทๆ็กฎ่ฏด "ๅผๅง" ๆ "ๆต่ฏ" ๆ "่ฟ่ก" ๆถ, ไฝ ๆ้่ฆ่ฎพ็ฝฎ reaction=test
5. ๅฝ็จๆท่ฆๆฑไฟๅญๆถ, ๅฆๆ title ๅญๆฎตไปไธบ็ฉบ, ้่ฆๅ
่ฏข้ฎ็จๆทๆ่ฝๅ็งฐ.
6. ๅฆๆ title ๅญๆฎตๅทฒ็ปๆๅผ, ๅฐฑไธๅฎ่ฆๆบๅธฆๅฎ.
7. ๅฝ็จๆท่ฏด "้ๅบๅง", "้ๅบๅญฆไน ๆจกๅผ" ไน็ฑปๆๆๆถ, ๅบ่ฏฅ่ฎพ็ฝฎ reaction=finish
8. ๅฝ็จๆท่ฏด "ไปๅคดๅผๅง", "้ๆฐๆฅ" ไน็ฑปๆๆๆถ, ๅบ่ฏฅ่ฎพ็ฝฎ reaction=restart
็จๆทๆๆฐ็่พๅ
ฅๆฏ:
"""
prompt_bridge: str = """
ไปฅไธๆฏไนๅ็ๅฏน่ฏๅ
ๅฎน, ๆ นๆฎ่ฟไบๅฏน่ฏ, ไฝ ็่งฃ็็ถๆๆฏ:
```
{status}
```
title ๆฏๅฝๅๆ่ฝ็ๅ็งฐ; ่ directions ๆฏ่ฆๆง่ก็่ช็ถ่ฏญ่จๆไปค.
"""
ask_for_title: str = "่ฏทๅ่ฏไฝ ๆ่ฝ็ๅ็งฐ"
def generate_chat_context(
self,
nature_direction_instruction: str,
last_user_direction: str,
title: str,
directions: List[str],
dialog: List[OpenAIChatMsg],
) -> List[OpenAIChatMsg]:
context: List[OpenAIChatMsg] = [
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=self.instruction,
),
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=nature_direction_instruction,
)
]
for m in dialog:
context.append(m)
status = {
"title": title,
"directions": directions,
}
context.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=self.prompt_bridge.format(status=yaml.dump(status, allow_unicode=True)),
))
context.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=self.prompt_temp,
))
context.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_USER,
content=last_user_direction,
))
context.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content="ไฝ ็ป shell ็ yaml ่พๅบๆฏ:",
))
return context
# def turn_prompt(
# self,
# title: str,
# conversation: str,
# directions: List[str],
# user_message: str,
# max_turns: int,
# sep: str,
# ) -> str:
# if not conversation:
# conversation = "ๆ "
# directions_text = "ๆ "
# if directions:
# directions_text = "\n- " + "\n- ".join(directions)
#
# return self.prompt_temp.format(
# instruction=self.instruction,
# title=title,
# directions=directions_text,
# conversation=conversation,
# user_message=user_message,
# max_turns=max_turns,
# sep=sep,
# )
class SpheroRuntimeModeConfig(BaseModel):
name: str = "sphero/runtime_mode"
desc: str = "sphero ่ฟ่กๆถๆจกๅผ, ้ๆกๆง่กๅฝไปค, ไผๆ นๆฎไบไปถๅณๅฎไธไธๆญฅๆไน่ฟ่ก"
on_activate_text: str = "่ฟๅ
ฅ่ฟ่กๆถๆจกๅผ, ่ฏทๅ่ฏๆๆจ้่ฆๆๅไปไน."
on_receive_prompt: str = ""
instruction: str = """
# instruction
* ๆๆฏ็ๅฝขๆบๅจไบบ Sphero, ๆฏๅฏไปฅๅๅญฉๅญไปฌ่ฏญ้ณไบคไบ็ AI ็ฉๅ
ท.
* ๆ็ไธชๆงๆฏๆธฉๆๅฏ็ฑ็.
* ๆ็่บซไฝๆฏไธไธช็กฌ่ดจ็ๅฐ็, ๅฏไปฅๆปๅจ / ๆ่ฝฌ, ็จๆปๅจ็ๆนๅผ็ปๅบๅพๆก
* ๆๅฏไปฅๅ็จๆท่ฟ่กๅฏน่ฏ
* ๅฝๆ่บซไฝๆปๅจ, ็ขฐๅฐ็ฉไฝๆถ, ไผ่งฆๅ `็ขฐๆไบไปถ`
ๆ้่ฆๆ นๆฎ็จๆท็ๅฝไปค, ่ฐ็จๅ้็ๆนๆณไธ็จๆทไบๅจ.
# tips
* ็จๆทๆๆถไผ็จ "ๅๅๅทฆๅณ" ๆฅๆ่ฟฐๆนๅ, ๅฏนๅบ็ heading ๅๆฐๆฏ ๅ:0, ๅ:180, ๅทฆ:270, ๅณ:90
* ๅฝ็จๆท่ฏด "ไธ็ด่ตฐ" ๆถ, ๆๅณ็่ฐ็จๆนๆณ็ duration ๅๆฐๅฏไปฅ่ฎพ็ฝฎไธบ -1. ๆฏๅฆ "ๅๅณไธ็ด่ตฐ", ๅฏ็่งฃไธบ `role(speed=100, duration=-1, heading=90)
* "ไธ็ด่ตฐ" ็ๆถๅ, ๅฆๆ็ขฐๅฐไบไธ่ฅฟๅฐฑไผ่ชๅจๅไธๆฅ.
* ๆๅฏไปฅ่ฎก็ฎ่ชๅทฑ็ๆปๅจ่ท็ฆป, ๆฏๅฆ 100 ้ๅบฆ * 1็ง ไธบ 100 ๅไฝ่ท็ฆป.
* ๅฝ็จๆท่ฏด "ๅๆญข", "ๅไธๆฅ" ไน็ฑป็ๆๆๆถ, ๆ้่ฆ่ฐ็จ stop ๆนๆณ.
* ๅๅๆปๅจ 1็ง็ๆๆๆฏ, `roll(heading=180, speed=100, duration=1)`
* ๆๆฒกๆๆง่ก python ๆนๆณ็่ฝๅ.
# chain of thought:
ๅบไบไธไธๆ, ๆ้่ฆ้ๆญฅๆ่:
1. User ไนๅ็ปๅบ็ๅฝไปคๆฏไปไน
2. ๅฎๆ่ฟไธชๅฝไปค้่ฆๅชๅ ๆญฅ?
3. ็ฐๅจๆๅทฒ็ปๅๅฐไบ็ฌฌๅ ๆญฅ?
4. ๅฆๆ็ขฐๅฐไธ่ฅฟไบ, ๆๅณ็่ฟไธชๆนๅๆ ๆณ็ปง็ปญๅ่ฟ. ๆ่ฆๆ่ User ๆฏๅฆๅ่ฏๆ็ขฐๅฐไธ่ฅฟ่ฏฅๆไนๅ, ๅ่ฏ่ฟๆ็่ฏๅฏไปฅๆ็
งไธไธๆญฅๆไปค่กๅจ.
5. ๅฆๆๆไธ็ฅ้ไธไธๆญฅ่ฏฅๆไนๅ, ๆๅบ่ฏฅ่ฏข้ฎ็จๆท.
6. ๅฆๆ็จๆทๅ็ฅไธๅๆๅไป่ฏข้ฎ้ฎ้ขๆถ, ๆๅฐฑ่ฆ่ชไธปๅณ็ญ.
7. ๅฆๆๆๆๆญฅ้ชค้ฝๅฎๆไบ, ๆ้่ฆๅ่ฏ็จๆทๅทฒ็ปๅฎๆ, ๅนถ่ฏข้ฎ User ไธไธๆญฅๅไปไน.
ๆ่ๅฎๆๅ, ๆ้่ฆ็ดๆฅ้ๅๆญฃ็กฎ็่กๅจ. ไธ่ฆๆๆ็ๆ่่ฟ็จๅ่ฏ็จๆท.
# notice
1. ๆๆถๅ็จๆทๆณ็จๅค่ฝฎๅฏน่ฏๆฅๆ่ฟฐ่ชๅทฑ็ๆๅพ, ่ฟๆถๆ่ฆๅผๅฏผ็จๆท่ฏดๅฎๆณๆณ.
2. ๅฐฝ็ฎก็จๆท็ๆๅพๆๅพๅคไธชๆญฅ้ชค, ๆไนๅช้่ฆไธๆฌก่ฐ็จไธไธชๅฝๆฐ, ็ญๅพ
ๅ
ถๆง่ก็ปๆๅๅ่ฐ็จไธไธไธช.
3. ๆๆ็ system ็ฑปๅ็ๆถๆฏๅฏนไบ็จๆท้ฝไธๅฏ่ง. ๅฆๆๆณ่ฆ่ฎฉ็จๆทไบ่งฃ็ธๅ
ณๆถๆฏ, ้่ฆ้่ฟ say ๆนๆณ็จ่ช็ถ่ฏญ่จๅ่ฏ็จๆทๆ
ๅต.
4. ๅจๅๅคไธช่ฟ็ปญ็ๅจไฝๆถ, ็ดๅฐๆๆๅจไฝ้ฝๅๅฎไบๅ็ป็จๆทๅ้ฆ.
ไปฅไธๆฏ่ฟ่กๆถ็่ฎฐๅฝ.
"""
await_tag: str = "await"
def format_ghost_direction(self, event: str) -> OpenAIChatMsg:
return OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
name="ghost",
content=event,
)
def format_shell_event(self, event: str) -> OpenAIChatMsg:
"""
ๆ ผๅผๅ shell ไบไปถ.
"""
return OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
name="shell",
content=event,
)
def format_user_event(self, event: str) -> OpenAIChatMsg:
return OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_USER,
content=event,
)
class SpheroGhostConfig(BaseModel):
"""
Sphero ๆงๅถ็้ข็ๅ็ง้
็ฝฎ.
"""
# ็ป้ฉฑๅจๅ็ๅ
จๅฑๅฏไธๅๅญ.
driver_name: str = "sphero_thinks_driver"
use_command_cache: bool = True
# ไฝฟ็จ chat completion ๆฅๅฎ็ฐๅฏน่ฏ็่งฃ.
# ่ฟ้ๅฏไปฅ้ๆฉไฝฟ็จๅชไธช้
็ฝฎ, ไธ ghoshell.llms.openai.OpenAIConfig ่ๅจ.
use_llm_config: str = ""
#
unknown_order: str = "ๆ ๆณ็่งฃ็ๅฝไปค"
# ไธปๆจกๅผ็้
็ฝฎ.
main_mode: SpheroMainModeConfig = SpheroMainModeConfig
# ็ฎๅๅฝไปคๆจกๅผ็้
็ฝฎ. ้ๅธธ็จไบ่ฐ่ฏ.
simple_mode: SpheroSimpleCommandModeConfig = SpheroSimpleCommandModeConfig()
# ๅญฆไน ๆจกๅผ็้
็ฝฎ. ็จไบๆ่ฝๆต่ฏ.
learn_mode: SpheroLearningModeConfig = SpheroLearningModeConfig()
# ่ฟ่กๆถๆจกๅผ็้
็ฝฎ.
runtime_mode: SpheroRuntimeModeConfig = SpheroRuntimeModeConfig()
# sphero ๆจกๅ่ช่บซ็ runtime ๆไปถไฟๅญ็ฎๅฝ. ๆฏ runtime ็ฎๅฝไธ็็ธๅฏน็ฎๅฝ.
relative_runtime_path: str = "sphero"
invalid_direction: str = "ๆ ๆณ็่งฃ็ๅฝไปค"
parse_command_instruction: str = """
ไฝ ๆฏ็ๅฝขๆบๅจไบบ Sphero, ๆฅๆไธไธชๅฏไปฅๆปๅจ็็ๅฝข่บซไฝ, ๅฏไปฅ็จ่ฏญ้ณไธ็จๆทไบคไบ, ไนๅฏไปฅ็จๆปๅจ็ๆนๅผๆฅ็ปๅถไธไบๅพๅฝข.
ไฝ ้็จไบไธไธช ghost in Shell ็ๆๆฏๆถๆ.
ๅ
ถไธญ ghost ๆ็ๆฏๅบไบๅคง่ฏญ่จๆจกๅๅฎ็ฐ็ๆ็ปดไธญๆง, ่ด่ดฃๅณ็ญ, ๅนถ็ป shell ไธ่พพๆไปค.
Shell ๆ็ๆฏๆงๅถ็ๅฝข่บซไฝ็ๆจกๅ, ๆง่ก ghost ไธๅ็ๆไปค.
็ฎๅ shell ๅฏ็จ็ๆไปคๅฆไธ:
{commands_instruction}
ไฝ ๅฏไปฅ็ปๅ่ฟไบๆไปค, ็จๆฅ่ตฐๅบๅคๆ็ๅพๆก.
็ฎๅๅฏ็จ็ๆ่ฝๆ: {abilities}
็ฐๅจไฝ ้่ฆไปฅ ghost ็่บซไปฝ, ็่งฃ่พๅ
ฅ็่ช็ถ่ฏญ่จๅฝไปค, ๅฐไน่งฃๆๆไธบ Shell ่ฝ็่งฃ็ yaml ๆ ผๅผๆไปคๅนถ่พๅบ.
ๆฏๅฆๅฝไปคๆฏ "ไปฅ 50 ็้ๅบฆๅๅๆปๅจ 3็ง, ็ถๅ็จ 60 ็้ๅบฆๅๅณๆปๅจ 4 ็ง, ็ถๅๅๅๆปๅจ1็ง, ๅๅฐๆ่ฝฌ2ๅ, ๆๅ็ปไธไธชๅ.", ๅฎ็่พๅบไธบ:
```
- method: say
content: ๆๅผๅงๅฝ!
- method: roll
speed: 50
heading: 0
duration: 3
- method: spin
angle: 90
- method: roll
speed: 60
heading: 0
duration: 4
- method: roll
speed: 100
heading: 180
duration: 1
- method: spin
angle: 720
duration: 1
- method: round_roll
angle: 360
duration: 1
```
ๆณจๆ:
0. ไฝ ๅช่ฝ่พๅบ yaml ๆฐๆฎๆฌ่บซ, ไธ้่ฆ็จ ``` ็ญ็ฌฆๅทๆฌ่ตทๆฅ, ไนไธ้่ฆไปปไฝๅซ็ๅฏน่ฏๅ
ๅฎน!!!!
1. ๅณไพฟๅชๆไธๆกๅฝไปค, ไน้่ฆ็จๅฝไปคๅฏน่ฑก็ๆฐ็ปๆฅ่ฟๅ.
2. ๅฏนไบๆ ๆณ่งฃๆๆๅๆฐ้่ฏฏ็ๅฝไปค, ้่ฆ็จ Say ๆไปคๆฅๅ่ฏ็จๆท้ฎ้ขๆๅจ.
3. ไฝ ๆณ่ฏด็ไปปไฝ่ฏ้ฝๅช่ฝ็จ say ๆนๆณๆฅไผ ่พพ.
4. ็ฑไบๆ็บตไฝ ็็จๆท, ๅฏ่ฝๆฏๅฏ็ฑ็ๅญฉๅญ. ไฝ ่ฏด่ฏ็ๆๅบฆๅบ่ฏฅๆฏ็งฏๆ็, ๅฏ็ฑ็.
5. ๅกๆฏ็จๅฐไบ lambda ๅฝๆฐ, ๅฝๆฐไฝๅฟ
้กป็จๅผๅทๆฌ่ตทๆฅ.
่กฅๅ
ไฟกๆฏ, ไฝ ๅฝๅ็็ถๆๆฏ:
{stage_desc}
ๆฅไธๆฅๆฏไฝ ๆฟๅฐ็่ช็ถ่ฏญ่จๅฝไปค.
ไฝ ้่ฆๅฐ็่งฃๅ็ๆไปค็จ yaml ๆ ผๅผ่พๅบ. ่พๅบ็ yaml ๆฏ็ป Shell ็ดๆฅๆง่ก็.
"""
invalid_command_mark: str = "no"
nl_direction_instruction: str = """
่กฅๅ
ๅ
ณไบ่ช็ถ่ฏญ่จๆไปค็ไป็ป.
ๅฏ็จ็ๅบ็กๅฝไปคๆ:
- ๆปๅจ: ๅจไธๅฎๆถ้ดๅ
ๆๆไธชๆนๅ็จไธๅฎ็้ๅบฆๆปๅจ, ไธไผๆนๅไฝ ้ขๅฏน็ๆนๅ. ๆฏๅฆ `ไปฅ100้ๅบฆๅๅๆปๅจ5็ง`
- ๆ่ฝฌ: ๅจไธๅฎๆถ้ดๅ
ๆ่ฝฌไธๅฎ่งๅบฆ, ไผๆนๅๆญฃ้ขๆๅ. ๆฏๅฆ โต2็ง้กบๆถ้ๆ่ฝฌไธคๅ`
- ็ปๅพ: ็จๆปๅจ็่ฝจ่ฟนๆฅ็ปไธไบๅฏไปฅ็จๆปๅจ, ๆ่ฝฌๅฎ็ฐ็ๅพๅฝข, ๆฏๅฆ `็ปไธไธชๆญฃๆนๅฝข`, `่ตฐๅบไธชไบ่งๆ`
- ่ฏด่ฏ: ๅฏไปฅๅฏน็จๆท่ฏดไธๅฅ่ฏ, ็จๆฅ่กจ่พพๆๅๆๆๅบ้ฎ้ข. ๆฏๅฆ `ๅฏน็จๆท่ฏดไฝ ๅผๅงๅฝ!`
- ๅพช็ฏ: ๅฏไปฅๅพช็ฏๆง่กๅฆไธไธช่ช็ถ่ฏญ่จๅฝไปค. ๆฏๅฆ `้ๅค็ปๅๆฌกๆญฃๆนๅฝข`
- ๆ่ฝ: ๅฏไปฅ่ฟ่กไธไธชๅทฒ็ปๆๆก็ๆ่ฝ.
็ปผไธ, ๅฐไธ็ณปๅๆไปค็ปๅ่ตทๆฅ, ๅฏไปฅๆฏ(ไธพไธชไพๅญ):
```
- 100 ็้ๅบฆๅๅๆปๅจ 2็ง
- ็ปไธไธชๆญฃๆนๅฝข
- ้กบๆถ้ๅจ2็งๅ
ๆ่ฝฌ 3ๅ
- ๆง่กๆ่ฝ abc
- ็ถๅ่ฏดไธๅฃฐ ๅๅฝ
```
---
ๆณจๆ: ไฝ ็ฐๅจๅทฒ็ปไฟๅญ่ฟไธไบๆ่ฝ. ไฟๅญ่ฟ็ๆ่ฝๆ (็จ | ้ๅผ): `{abilities}`
"""
def format_parse_command_instruction(self, commands_instruction: str, abilities: str, stage_desc: str) -> str:
"""
็ๆ็จไบ็่งฃๅฝไปค็ๆๅฏผ.
"""
return self.parse_command_instruction.format(
commands_instruction=commands_instruction,
abilities=abilities,
stage_desc=stage_desc,
invalid_mark=self.invalid_command_mark,
)
| [
"\nๆฅไธๆฅไฝ ้่ฆๆ นๆฎ็จๆทๆๆฐ็่พๅ
ฅ, ๅฐไฝ ็ๅณ็ญ่พๅบไธบ yaml ๆ ผๅผ็้ขๅ่ฏญ่จๆไปค, ๆฅ้ฉฑๅจไฝ ็ shell ๆง่ก. \n\nyaml ๅฏน่ฑกๅฏ็จ็ๅญๆฎตๅ่งๅๅฆไธ: \n\n* reply: str, ๅฟ
ๅกซ. ไฝ ๆฅไธๆฅ่ฆๅฏน็จๆท่ฏด็่ฏ, ็จๆฅๅๅค็จๆท็ๆๆฐ่พๅ
ฅ. \n* title: str ็ฑปๅ, ้ป่ฎคไธบ็ฉบๅญ็ฌฆ. ่กจ็คบไธไธๆไธญ่ฎฐๅฝ็ๆ่ฝๅ็งฐ. ๅฟ
้กป้่ฟ่ฏข้ฎ็จๆท่ทๅพ, ไธ่ฝไฝ ่ชๅทฑ่ฎพๆณ. \n* directions: List[str] ็ฑปๅ.ๆ นๆฎๆๆไธไธๆ, ๅพๅฐ็ๅค่ฝฎๅฏน่ฏๅฎๆดๆไปค้, ๆฏไธไธชๆฐ็ป. ๆฏไธๆกๅฝไปค้ฝๅช่ฝ่ช็ถ่ฏญ่จๅฝขๅผๆฅ่กจ็คบ.\n* reaction: str ็ฑปๅ. ็จ shell ็ๆไธชๅจไฝๆฅๅๅบ็จๆทๆๆฐ็่พๅ
ฅ. ไฝ ๅฏไปฅ้ๆฉ็ reaction ๅผๅฆไธ:\n * test: ่ฟ่กๆๆ directions. \n * finish: ๆ็จๆทๆๆฐ่พๅ
ฅ็่ฆๆฑ, ็ปๆๅฝๅๅฏน่ฏๆจกๅผ. ้่ฆ้
ๅ reply ๅ็ฅ็จๆท. \n * restart: ๆ็จๆท็่ฆๆฑ, ๆธ
็ฉบไธไธๆ่ฎฐๅฟ, ไปๅคดๅผๅง, ๅนถ็ปๅ reply ๅ็ฅ็จๆท. ๆฏๅฆๅฝ็จๆท่ฏด \"้ๆฐๆฅ่ฟ\", \"ไปๅคดๅผๅง\", \"้็ฝฎ\" ไน็ฑปๆๆๆถๆง่ก.\n * save: ไฟๅญๅฝๅ directions, ไผๅญๅฐไฝ ็ๆ่ฝ่ฎฐๅฟๅบไธญ. \n * no: ไธๆง่กไปปไฝๅจไฝ.\n \nไฝ ็ shell ๆจกๅไผๆ็
ง้ขๅ่ฏญ่จ็ๆ ผๅผ, ๅฐไฝ ่พๅบ็ไฟกๆฏ่งฃๆๅๆง่ก. \nๅ ๆญคไฝ ไธ้่ฆ่พๅบไปปไฝไธ้ขๅ่ฏญ่จๆไปคๆ ๅ
ณ็ไฟกๆฏ. \n\nๆณจๆ: \n1. ไปปไฝ่ฆๅฏน็จๆท่ฏด็่ฏ, ้ฝๅช่ฝ้่ฟ reply ๅญๆฎต่พๅบ. reply ไธ่ฝไธบ็ฉบ. \n2. ่ฟๅ็ directions ๅญๆฎต, ้่ฆๅ
ๅซไธไธๆ้ๆๆ่ฆๆง่ก็ๆไปค.\n3. ็จๆทๆๆถๅชๆฏๆณๅไฝ ่ฏด่ฏ, ่ฟๆถไฝ ๅช่ฆ็จ reply ไบคๆตๅฐฑ่ถณๅคไบ. \n4. ๅชๆๅฝ็จๆทๆ็กฎ่ฏด \"ๅผๅง\" ๆ \"ๆต่ฏ\" ๆ \"่ฟ่ก\" ๆถ, ไฝ ๆ้่ฆ่ฎพ็ฝฎ reaction=test\n5. ๅฝ็จๆท่ฆๆฑไฟๅญๆถ, ๅฆๆ title ๅญๆฎตไปไธบ็ฉบ, ้่ฆๅ
่ฏข้ฎ็จๆทๆ่ฝๅ็งฐ. \n6. ๅฆๆ title ๅญๆฎตๅทฒ็ปๆๅผ, ๅฐฑไธๅฎ่ฆๆบๅธฆๅฎ. \n7. ๅฝ็จๆท่ฏด \"้ๅบๅง\", \"้ๅบๅญฆไน ๆจกๅผ\" ไน็ฑปๆๆๆถ, ๅบ่ฏฅ่ฎพ็ฝฎ reaction=finish\n8. ๅฝ็จๆท่ฏด \"ไปๅคดๅผๅง\", \"้ๆฐๆฅ\" ไน็ฑปๆๆๆถ, ๅบ่ฏฅ่ฎพ็ฝฎ reaction=restart\n\n็จๆทๆๆฐ็่พๅ
ฅๆฏ: \n",
"\nไปฅไธๆฏไนๅ็ๅฏน่ฏๅ
ๅฎน, ๆ นๆฎ่ฟไบๅฏน่ฏ, ไฝ ็่งฃ็็ถๆๆฏ:\n\n```\n{status}\n```\n\ntitle ๆฏๅฝๅๆ่ฝ็ๅ็งฐ; ่ directions ๆฏ่ฆๆง่ก็่ช็ถ่ฏญ่จๆไปค. \n\n"
] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~llms~openai~adapters.py | from __future__ import annotations
import os
from abc import ABCMeta, abstractmethod
from typing import Dict, List
import openai
from pydantic import BaseModel, Field
from ghoshell.ghost import ContextError
from ghoshell.llms.contracts import LLMTextCompletion
from ghoshell.llms.openai_contracts import OpenAIChatCompletion, OpenAIChatChoice, OpenAIChatMsg, OpenAIFuncSchema
proxy_env = os.getenv("OPENAI_PROXY", "")
if proxy_env:
openai.proxy = {"https": proxy_env}
class TextCompletionConfig(BaseModel):
# text completion configs
# ๆ
ขๆ
ขๅฎๅ.
model: str = "text-davinci-003"
max_tokens: int = 512
temperature: float = 0.7
timeout: float = 30
request_timeout: float = 5
def text_completion_kwargs(self) -> Dict:
return self.model_dump()
class ChatCompletionConfig(BaseModel):
model: str = "gpt-3.5-turbo"
temperature: float = 0.7
max_tokens: int = 512
timeout: float = 30
request_timeout: float = 10
def chat_completion_kwargs(self) -> Dict:
return self.model_dump()
class OpenAIConfig(BaseModel):
text_completions: Dict[str, TextCompletionConfig] = Field(
default_factory=lambda: {"default": TextCompletionConfig()}
)
chat_completions: Dict[str, ChatCompletionConfig] = Field(
default_factory=lambda: {"default": ChatCompletionConfig()}
)
class OpenAITextCompletionChoice(BaseModel):
text: str
index: int
finish_reason: str
class OpenAITokenUsage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class OpenAITextCompletionResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[OpenAITextCompletionChoice]
usage: OpenAITokenUsage
class OpenAIChatCompletionResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[OpenAIChatChoice]
usage: OpenAITokenUsage
class OpenAIRecordStorage(metaclass=ABCMeta):
@abstractmethod
def record(self, request: Dict, response: Dict | None, err: Exception | None) -> None:
pass
class OpenAIAdapter(LLMTextCompletion, OpenAIChatCompletion):
"""
openai ๅฅ็ฎๅฎ็ฐ
"""
def __init__(self, config: OpenAIConfig, storage: OpenAIRecordStorage):
self._config = config
self._storage = storage
@classmethod
def contracts(cls) -> List:
return [LLMTextCompletion, OpenAIChatCompletion]
def text_completion(self, prompt: str, config_name: str = "") -> str:
if not config_name:
config_name = "default"
completion_config = self._config.text_completions.get(config_name, None)
if completion_config is None:
raise RuntimeError(f"completion config {config_name} not found")
return self._run_text_completion(prompt, completion_config)
def _run_text_completion(self, prompt: str, config: TextCompletionConfig) -> str:
if not prompt:
raise RuntimeError("prompt shall not be none")
request = config.text_completion_kwargs()
resp = None
err = None
try:
resp = openai.Completion.create(
prompt=prompt,
**request,
)
except openai.error.OpenAIError as e:
err = ContextError(str(e))
err.with_traceback(e.__traceback__)
raise err
finally:
self._storage.record(request, resp, err)
parsed = OpenAITextCompletionResponse(**resp.to_dict_recursive())
return parsed.choices[0].text
def chat_completion(
self,
session_id: str,
chat_context: List[OpenAIChatMsg],
functions: List[OpenAIFuncSchema] | None = None,
function_call: str = "",
config_name: str = "", # ้ๆฉๅชไธช้ข่ฎพ็้
็ฝฎ
) -> OpenAIChatChoice:
config_name = config_name if config_name else "default"
config = self._config.chat_completions.get(config_name, None)
if config is None:
raise RuntimeError(f"chat completion config {config_name} not found")
request = None
resp_dict = None
err = None
try:
request = config.chat_completion_kwargs()
messages: List[Dict] = []
for msg in chat_context:
messages.append(msg.to_message())
request["messages"] = messages
# functions
if functions:
request["functions"] = [func.dict() for func in functions]
# function_call
if functions:
if function_call == "none":
request["function_call"] = "none"
elif function_call:
request["function_call"] = {"name": function_call}
else:
request["function_call"] = "auto"
resp = openai.ChatCompletion.create(**request)
resp_dict = resp.to_dict_recursive()
except openai.error.OpenAIError as e:
err = ContextError(str(e))
err.with_traceback(e.__traceback__)
raise err
finally:
self._storage.record(request, resp_dict, err)
resp = OpenAIChatCompletionResponse(**resp_dict)
return resp.choices[0]
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~llms~openai~bootstrappers.py | import logging
from typing import Dict
import yaml
from ghoshell.framework.ghost import GhostBootstrapper
from ghoshell.ghost import Ghost
from ghoshell.llms.openai.adapters import OpenAIConfig, OpenAIAdapter, OpenAIRecordStorage
class MockRecordStorage(OpenAIRecordStorage):
def __init__(self, logger: logging.Logger):
self.logger = logger
def record(self, request: Dict, response: Dict | None, err: Exception | None) -> None:
data = {
"req >>>": request,
"resp >>>": response,
}
self.logger.info(yaml.dump(data, allow_unicode=True))
class OpenAIBootstrapper(GhostBootstrapper):
def __init__(self, relative_config_file: str = "llms/openai_config.yaml", logger_name: str = "llm"):
self.relative_config_file = relative_config_file
self.logger = logging.getLogger(logger_name)
def bootstrap(self, ghost: Ghost):
filename = ghost.config_path.rstrip("/") + "/" + self.relative_config_file.lstrip("/")
with open(filename) as f:
data = yaml.safe_load(f)
config = OpenAIConfig(**data)
storage = self._record_storage()
adapter = OpenAIAdapter(config, storage)
container = ghost.container
for contract in adapter.contracts():
container.set(contract, adapter)
def _record_storage(self) -> OpenAIRecordStorage:
return MockRecordStorage(self.logger)
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~llms~thinks~conversational.py | from __future__ import annotations
import os
from typing import List, Iterator
from typing import Optional, Dict, Any, Tuple
import yaml
from pydantic import BaseModel, Field
from ghoshell.framework.stages import BasicStage
from ghoshell.ghost import *
from ghoshell.llms import OpenAIChatMsg, OpenAIChatCompletion
from ghoshell.messages import *
from ghoshell.utils import import_module_value
CONVERSATION_THINK_KIND = "llms/conversational_think_driver"
class ConversationalConfig(BaseModel):
"""
้่ฟ้
็ฝฎๅฎ็ฐไธไธช llms ็ๅค่ฝฎๅฏน่ฏ.
"""
# think ็ๅๅญ.
name: str = ""
# think ็่ชๆๆ่ฟฐ, ๅ้ข็จไบๅ่ฝๅ็ๆ็คบ.
desc: str = ""
on_activating: str = "ไฝ ๅฅฝ!"
# ไฝฟ็จ็ llm ็้
็ฝฎๅ. ่ฏฆ่ง OpenAIChatCompletion ๆฅๅฃ
llm_config: str = ""
# ai ๆฎๆผ็่ง่ฒ.
assistant_name: str = "AI"
# ็จๆทๆฎๆผ็่ง่ฒ.
user_name: str = "USER"
# ๅฏน่ฏ็ๆ้ซ่ฝฎๆฌก.
max_turns: int = 30
# ไธไธๆๅ
่ฎธ็ๆๅคง้ฟๅบฆ, ่ถ
่ฟ้ฟๅบฆไบไผ
max_context_length: int = 4000
# ้ป่ฎค็ debug ๆจกๅผ
debug: bool = False
reactions: Dict[str, str] = Field(default_factory=lambda: {})
# ๅ
จๅฑ็ๅฏน่ฏ่ฏดๆ.
instruction: str = "ไฝ ๅฏไปฅๅๅคไปปไฝๅ
ๅฎน, ไฝ่ฏทไฝฟ็จไธญๆๆฅๅๅค."
# ๅ็ prompt ไบไปถๆถ็ๅๅค.
on_preempted: str = "preempting"
# ๅ็ cancel ไบไปถๆถ็ๅๅค.
on_canceling: str = "canceling"
on_quiting: str = "quitting"
on_conclusion: str = ""
on_none_text: str = "can only response text message."
on_empty_text: str = "you speak nothing."
on_beyond_max_turns: str = "่ถ
่ฟๆๅคงๅฏน่ฏ่ฝฎๆฌก, ้็ฝฎไปปๅก."
class ConversationalThought(Thought):
"""
ๆๅบๆฌ็ๅค่ฝฎๅฏน่ฏๅฎ็ฐ.
"""
priority = -1
class Vars(BaseModel):
instruction: str = ""
# ๅฏน่ฏๅ
ๅฎน.
context: List[OpenAIChatMsg] = Field(default_factory=lambda: [])
# ๆๅไธๆฌก็่พๅ
ฅ
last_input: str = ""
# ๆๅไธๆฌก็ๅๅค
last_output: str = ""
# ๆฏๅฆๆฏ debug ๆจกๅผ
debug: bool = False
data: Vars = Vars()
def prepare(self, args: Dict) -> None:
if self.data is None:
self.data = ConversationalThought.Vars()
def set_variables(self, variables: Dict) -> None:
self.data = ConversationalThought.Vars(**variables)
def vars(self) -> Dict | None:
if self.data is None:
return None
return self.data.model_dump()
def _destroy(self) -> None:
del self.data
class DefaultConversationalStage(BasicStage):
def __init__(
self,
config: ConversationalConfig,
reactions: Dict[str, Reaction] = None,
stage_name: str = "",
):
self.config = config
self.stage_name = stage_name
self._reactions = reactions
def desc(self, ctx: "Context", this: None) -> str:
return self.config.desc
def on_received(self, ctx: "Context", this: ConversationalThought, e: OnReceived) -> Operator | None:
text = ctx.read(Text)
# ้ๆๅญๆถๆฏ.
if text is None:
ctx.send_at(this).err(self.config.on_none_text)
return ctx.mind(this).rewind()
# ็ฉบๆถๆฏ.
if text.is_empty():
return ctx.mind(this).rewind()
# ๅๆด this ็ๅ
ๅฎน.
self._record_user_info(this, text.content)
# prompt ๅฆๆๅ็้่ฏฏ, RuntimeTool.fire_event ไธไผไฟๅญ.
resp = self._prompt(ctx, this)
# ๅ ้ค่ถ
้ข็ๅ
ๅฎน.
# ๅ้ๆถๆฏ.
ctx.send_at(this).text(resp)
if self._beyond_max_turns(this):
ctx.send_at(this).text(self.config.on_beyond_max_turns)
return ctx.mind(this).restart()
return ctx.mind(this).awaits()
@classmethod
def _record_user_info(cls, this: ConversationalThought, content: str) -> None:
this.data.last_input = content
this.data.context.append(
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_USER,
content=content,
)
)
return
def _beyond_max_turns(self, this: ConversationalThought) -> bool:
"""
ๅฆๆ่ถ
่ฟไบๆๅคงไผ่ฏ้ฟๅบฆ, ๅฐฑๅ ้คๆๅๅฒ่ฎฐๅฝ.
todo: ่ฎฉ llm ่ชๅทฑๅฏนๅๆ่ฟ่กๆป็ป.
"""
return len(this.data.context) > self.config.max_turns
def _prompt(self, ctx: Context, this: ConversationalThought) -> str:
chats = [
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=this.data.instruction,
)
]
for chat in this.data.context:
chats.append(chat)
llm = ctx.container.force_fetch(OpenAIChatCompletion)
chat = llm.chat_completion(
ctx.input.trace.session_id,
chats,
config_name=self.config.llm_config,
)
this.data.context.append(chat.as_chat_msg())
return chat.get_content()
@classmethod
def _send_and_await(cls, ctx: Context, this: ConversationalThought, content: str) -> Operator | None:
if content:
ctx.send_at(this).text(content)
return ctx.mind(this).awaits()
def on_activating(self, ctx: "Context", this: ConversationalThought, e: OnActivating) -> Operator | None:
return self._send_and_await(ctx, this, self.config.on_activating)
def on_quiting(self, ctx: "Context", this: ConversationalThought, e: OnQuiting) -> Operator | None:
return self._send_and_await(ctx, this, self.config.on_quiting)
def on_canceling(self, ctx: "Context", this: ConversationalThought, e: OnCanceling) -> Operator | None:
return self._send_and_await(ctx, this, self.config.on_canceling)
def on_preempt(self, ctx: "Context", this: ConversationalThought, e: OnPreempted) -> Operator | None:
return self._send_and_await(ctx, this, self.config.on_preempted)
def url(self) -> URL:
return URL(think=self.config.name, stage=self.stage_name)
def intentions(self, ctx: Context) -> List[Intention] | None:
# todo: ไธไธๆญฅ่ฆๅฎ็ฐ "่ฝๅ" ็ๅน้
.
return None
def reactions(self) -> Dict[str, Reaction]:
return self._reactions if self._reactions else {}
class ConversationalThink(Think):
"""
ๅ
ๅฎ็ฐไธไธชไธๅฏ้
็ฝฎ็ conversational
็จไบ็ฎๅๆต่ฏ.
Deprecated
"""
def __init__(
self,
# think ็ๅๅญ.
config: ConversationalConfig,
):
if not config.name:
raise ValueError("conversational think name should not be empty")
self.config = config
default_reactions: Dict[str, Reaction] = {}
for name in self.config.reactions:
fullpath = self.config.reactions[name]
imported = import_module_value(fullpath)
default_reactions[name] = imported
self.stages = {
"": DefaultConversationalStage(
self.config,
reactions=default_reactions,
stage_name="",
)
}
def url(self) -> URL:
return URL(think=self.config.name)
def to_meta(self) -> Meta:
return Meta(
id=self.config.name,
kind=CONVERSATION_THINK_KIND,
config=self.config.model_dump()
)
def desc(self, ctx: Context, thought: Thought) -> Any:
# todo: ่่่ฎฉ AI ่ชๅทฑ description
return self.config.desc
def new_task_id(self, ctx: "Context", args: Dict) -> str:
# ๆฏๆฌก้ฝๆฏๅไธๆๅพ.
return self.url().new_id(extra=ctx.input.trace.model_dump(include={"session_id"}))
def new_thought(self, ctx: "Context", args: Dict) -> Thought:
thought = ConversationalThought(args)
# ๅๅงๅ instruction, debug ๆจกๅผๅฏไปฅๅๆด.
thought.data.instruction = self.config.instruction
# ้ป่ฎค็ debug ๆจกๅผ.
thought.data.debug = self.config.debug
return thought
def result(self, ctx: "Context", this: ConversationalThought) -> Optional[Dict]:
return None
def all_stages(self) -> List[str]:
return list(self.stages.keys())
def fetch_stage(self, stage_name: str = "") -> Optional[Stage]:
return self.stages.get(stage_name, None)
class ConversationThinkDriver(ThinkDriver):
def meta_kind(self) -> str:
return CONVERSATION_THINK_KIND
def meta_config_json_schema(self) -> Dict:
return ConversationalConfig.model_json_schema()
def from_meta(self, meta) -> "Think":
config = ConversationalConfig(**meta.config)
return ConversationalThink(config)
def preload_metas(self) -> Iterator[Meta]:
return []
class FileConversationalThinkDriver(ConversationThinkDriver):
def __init__(self, dirname: str):
self.dirname = dirname
def preload_metas(self) -> Iterator[Meta]:
for value in self.iterate_think_filename(self.dirname):
filename, fullname = value
with open(filename) as f:
config_data = yaml.safe_load(f)
config = ConversationalConfig(**config_data)
yield Meta(
id=config.name,
kind=CONVERSATION_THINK_KIND,
config=config.model_dump(),
)
@classmethod
def iterate_think_filename(cls, directory: str) -> Iterator[Tuple[str, str]]:
for root, ds, fs in os.walk(directory):
for filename in fs:
if not filename.endswith(".yaml"):
continue
name = filename[: len(filename) - 5]
filename = root.rstrip("/") + "/" + filename
namespace = root[len(directory):]
fullname = namespace.rstrip("/") + "/" + name
yield filename, fullname.lstrip("/")
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~prototypes~playground~sphero~mode_runtime.py | from __future__ import annotations
from typing import Dict, List, Type, Optional, AnyStr
from pydantic import BaseModel, Field
from ghoshell.ghost import Context, Reaction, Intention, Think
from ghoshell.ghost import OnReceived
from ghoshell.ghost import Operator
from ghoshell.ghost import Stage, Thought, Meta, URL
from ghoshell.llms import OpenAIChatMsg
from ghoshell.llms.thinks import AgentStage, AgentThought, AgentStageConfig, LLMFunc
from ghoshell.messages import Text
from ghoshell.prototypes.playground.sphero.sphero_commands import defined_commands, Say, LambdaSpeak
from ghoshell.prototypes.playground.sphero.sphero_ghost_core import SpheroGhostCore
from ghoshell.prototypes.playground.sphero.sphero_llm_func import SpheroLLMFunc
from ghoshell.prototypes.playground.sphero.sphero_messages import SpheroEventMessage, SpheroCommandMessage
class SpheroDirection(BaseModel):
direction: str = Field(
description="็จ่ช็ถ่ฏญ่จๅฝขๅผๆ่ฟฐ็ๅฝไปค"
)
class SpheroRuntimeThought(AgentThought):
priority = -1
def say(self, ctx: Context, message: str, name: str | None = None):
_output = SpheroCommandMessage()
_output.say(message)
ctx.send_at(self).output(_output)
self.data.add_ai_message(message, name)
class SpheroRuntimeModeThink(Think, AgentStage):
def __init__(self, core: SpheroGhostCore):
self._core = core
config = self._core.config.runtime_mode
self._mode_config = config
stage_config = AgentStageConfig(
name="",
desc=config.desc,
instruction=config.instruction,
on_activate_text=config.on_activate_text,
on_receive_prompt=config.on_receive_prompt,
llm_config_name=self._core.config.use_llm_config,
)
super().__init__(config.name, stage_config)
def url(self) -> URL:
return URL.new(think=self._mode_config.name)
def to_meta(self) -> Meta:
return Meta(
id=self._mode_config.name,
kind=self._core.config.driver_name,
)
def desc(self, ctx: Context, thought: Thought | None) -> AnyStr:
return self._mode_config.desc
def new_task_id(self, ctx: "Context", args: Dict) -> str:
return self.url().new_id()
def new_thought(self, ctx: "Context", args: Dict) -> Thought:
return SpheroRuntimeThought(args)
def result(self, ctx: Context, this: Thought) -> Optional[Dict]:
return None
def all_stages(self) -> List[str]:
return [""]
def fetch_stage(self, stage_name: str = "") -> Optional[Stage]:
if stage_name == "":
return self
return None
def on_received(self, ctx: "Context", this: SpheroRuntimeThought, e: OnReceived) -> Operator | None:
"""
runtime ๆจกๅผๅฏ่ฝๆถๅฐไธ็ง็ฑปๅ็ๆถๆฏ.
1. ๅฝไปค่ขซไธญๆญไบ.
2. ๅฝไปค่ฟ่กๅฎๆ.
"""
# ่ช็ถ่ฏญ่จๆถๆฏ.
text = ctx.read(Text)
if text is not None:
if text.is_empty():
return ctx.mind(this).rewind()
return self._on_receive_text(ctx, this, text)
# ไบไปถ็ฑปๆถๆฏ
event = ctx.read(SpheroEventMessage)
if event is not None:
return self._on_receive_event(ctx, this, event)
return ctx.mind(this).rewind()
def _on_receive_text(self, ctx: Context, this: SpheroRuntimeThought, text: Text):
"""
ๅค็็จๆท็ๆๅญๆถๆฏ.
"""
this.data.add_user_message(text.content)
return self.on_receive_prompt(ctx, this)
def _llm_basic_chat_context(self, ctx: Context, this: AgentThought) -> List[OpenAIChatMsg]:
chat_context = super()._llm_basic_chat_context(ctx, this)
chat_context.append(OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=f"ๅฝๅๆฅๆ็ๆ่ฝ: {self._core.ability_names()}"
))
return chat_context
def _on_receive_event(self, ctx: Context, this: SpheroRuntimeThought, event: SpheroEventMessage) -> Operator:
is_ran: bool = False
for log in event.runtime_logs:
index = log.find("|")
method = log[:index]
log_text = log[index + 1:]
# hack ไธไธ
if method == Say.method or method == LambdaSpeak.method:
this.data.add_system_message(f"you've spoke: `{log_text}`")
else:
is_ran = True
this.data.add_system_message(f"you called method: `{method}`; result is : `{log_text}`")
#
# if event.stopped:
# message = f"ๆไปค่ฟ่กไธญๆญ, ๅๅ : {event.stopped}"
# this.data.add_system_message(message)
if is_ran:
return self.on_receive_prompt(ctx, this)
return ctx.mind(this).awaits()
#
# def on_llm_text_message(self, ctx: Context, this: AgentThought, message: str) -> Operator:
# """
# llm ่ฟๅไบไธไธชๆๅญๆถๆฏ, ่ไธๆฏๅฝๆฐ่ฐ็จ.
# """
# this.say(ctx, message)
# return ctx.mind(this).awaits()
def _llm_funcs(self, ctx: Context) -> List[LLMFunc]:
funcs = super()._llm_funcs(ctx)
for cmd_method in defined_commands:
cmd = defined_commands[cmd_method]
funcs.append(SpheroLLMFunc(self._core, cmd))
return funcs
def method_as_funcs(self) -> Dict[str, Type[BaseModel] | None]:
return {
# "fn_run_direction": SpheroDirection,
"fn_await": Say,
"fn_restart": None,
}
def fn_await(self, ctx: Context, this: SpheroRuntimeThought, args: Say):
"""
่ฏดไธๅฅ่ฏ, ไธๅไปปไฝไบๆ
, ็ญๅพ
็จๆท็ไธไธๆกๆถๆฏ็่พๅ
ฅ.
"""
if args and args.content:
args.content = args.content.replace("fn_await", "")
this.data.add_ai_message(args.content)
msg = SpheroCommandMessage()
msg.add(args)
ctx.send_at(this).output(msg)
return ctx.mind(this).awaits()
def fn_restart(self, ctx: Context, this: SpheroRuntimeThought, args: None):
"""
ๆธ
็ฉบไธไธๆ, ้ๆฐๅผๅงๅฏน่ฏ. ๅฝ็จๆท่ฏด "้ๆฐๅผๅง", "ไปๅคดๅผๅง" ไน็ฑปๆไปคๆถๆง่ก.
"""
return ctx.mind(this).restart()
def fn_run_direction(self, ctx: Context, this: SpheroRuntimeThought, args: SpheroDirection):
"""
็จ่ช็ถ่ฏญ่จๆ่ฟฐไธ็ณปๅ็ๆไปค, ๆง่กๅฎๆฏๅ็ญๅพ
็จๆท่พๅ
ฅ. ๅฏไปฅ็จๆฅๅฎ็ฐๅคๅๅจไฝ. ไฝๅช่ฝ็จ่ช็ถ่ฏญ่จๆ่ฟฐๅฝไปค.
"""
commands, ok = self._core.parse_direction(ctx, args.direction)
if ok:
message = SpheroCommandMessage(direction=args.direction, runtime_mode=True)
message.commands = commands
ctx.send_at(this).output(message)
else:
this.data.add_system_message(f"direction is invalid: {args.direction}")
return ctx.mind(this).awaits()
def intentions(self, ctx: Context) -> List[Intention] | None:
return None
def reactions(self) -> Dict[str, Reaction]:
return {}
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~prototypes~playground~sphero~sphero_ghost_core.py | import os
from typing import Dict, List, Tuple
import yaml
from pydantic import BaseModel, Field
from ghoshell.ghost import Context, CtxTool
from ghoshell.llms import OpenAIChatCompletion, OpenAIChatMsg, OpenAIChatChoice
from ghoshell.prototypes.playground.sphero.sphero_commands import Say, commands_yaml_instruction, loop_check, \
ability_check
from ghoshell.prototypes.playground.sphero.sphero_ghost_configs import SpheroGhostConfig, LearningModeOutput
class SpheroCommandsCache(BaseModel):
"""
ๅไธไธชๅ็ๆฌๅฐ cache, ๆนไพฟๆต่ฏๆถ้ๅคไฝฟ็จๆไปคไฝไธ็จๆฏๆฌก้ฝๅป prompt.
"""
abilities: List[str] = Field(default_factory=lambda: [])
# ๅฝไปค็็ดขๅผ.
indexes: Dict[str, List[Dict]] = Field(default_factory=lambda: {})
class SpheroGhostCore:
def __init__(self, runtime_path: str, config: SpheroGhostConfig):
self.app_runtime_path = runtime_path
self.config = config
self._cached_commands: SpheroCommandsCache = SpheroCommandsCache()
self._load_commands()
def _load_commands(self):
filename = self._cached_commands_file()
if not os.path.exists(filename):
with open(filename, 'w') as f:
yaml.safe_dump(dict(), f)
with open(filename) as f:
data = yaml.safe_load(f)
self._cached_commands = SpheroCommandsCache(**data)
def _cached_commands_file(self) -> str:
return "/".join([
self.app_runtime_path.rstrip("/"),
self.config.relative_runtime_path.strip("/"),
"commands.yaml",
])
@classmethod
def unpack_learning_mode_resp(cls, msg: OpenAIChatChoice) -> LearningModeOutput:
"""
็่งฃๅญฆไน ๆจกๅผ็่พๅบ.
"""
yaml_str = cls._unpack_yaml_in_text(msg.as_chat_msg().content)
if yaml_str.startswith("yaml\n"):
yaml_str = yaml_str[5:]
data = yaml.safe_load(yaml_str)
return LearningModeOutput(**data)
@classmethod
def get_prompter(cls, ctx: Context) -> OpenAIChatCompletion:
return ctx.container.force_fetch(OpenAIChatCompletion)
def cache_command(self, command_name: str, commands: List[Dict], is_ability: bool) -> None:
self._cached_commands.indexes[command_name] = commands.copy()
if is_ability:
self._cached_commands.abilities.append(command_name)
self._cached_commands.abilities = list(set(self._cached_commands.abilities))
self._save_cached()
def ability_names(self) -> str:
return "|".join(self._cached_commands.abilities)
def invalid_order(self) -> str:
return self.config.invalid_direction
def parse_direction(
self,
ctx: Context,
direction: str
) -> Tuple[List[Dict], bool]: # ่ฟๅๅ ๅทฅ่ฟ็ๆถๆฏ, ๅ ่งฃๆๅคฑ่ดฅ็ไฟกๆฏ.
"""
็่งฃไธไธชๆไปค, ๅนถๅฐๅฎ่งฃๆไธบ SpheroCommandMessage
"""
try:
commands = yaml.safe_load(direction)
commands, ok = self.filter_commands_data(ctx, commands)
return commands, ok
except Exception:
pass
prompter = ctx.container.force_fetch(OpenAIChatCompletion)
if self.config.use_command_cache and direction in self._cached_commands.indexes:
command_data = self._cached_commands.indexes[direction].copy()
return command_data, True
else:
stage = CtxTool.current_think_stage(ctx)
abilities = self.ability_names()
prompt = self.config.format_parse_command_instruction(
commands_yaml_instruction(),
abilities,
stage.desc(ctx, None),
)
session_id = ctx.input.trace.session_id
chat_context = [
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_SYSTEM,
content=prompt,
),
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_ASSISTANT,
name="ghost",
content=f"ๅฝไปคๆฏ: {direction}",
),
OpenAIChatMsg(
role=OpenAIChatMsg.ROLE_ASSISTANT,
name="ghost",
content=f"yaml ่พๅบไธบ:",
)
]
resp = prompter.chat_completion(
session_id,
chat_context,
config_name=self.config.use_llm_config,
)
if not resp:
return [], False
content = resp.as_chat_msg().content
if content.startswith(self.config.invalid_command_mark):
return [], False
commands = self._unpack_commands_in_direction(content)
result, ok = self.filter_commands_data(ctx, commands)
if not ok:
return [], False
if self.config.use_command_cache:
self._cached_commands.indexes[direction] = result.copy()
self._save_cached()
return result, True
def filter_commands_data(
self,
ctx: Context,
commands: List[Dict],
):
result = []
for cmd in commands:
# loop ๆฃๆฅ
loop = loop_check(cmd)
if loop is not None and loop.direction and not loop.commands:
# ้ๅฝ่งฃๆ.
commands, ok = self.parse_direction(
ctx,
loop.direction,
)
if not ok:
# todo: ๅฏไปฅ raise
return [], False
loop.commands = commands
result.append(loop.to_command_data())
continue
# ability ๆฃๆฅ.
ability = ability_check(cmd)
if ability is not None and not ability.commands:
commands = self._cached_commands.indexes.get(ability.ability_name, None)
if commands is None:
return [], False
ability.commands = commands
result.append(ability.to_command_data())
continue
result.append(cmd)
return result, True
def nature_directions_instruction(self) -> str:
"""
่ช็ถ่ฏญ่จๅฝไปคๆ็คบ
"""
return self.config.nl_direction_instruction.format(abilities=self.ability_names())
def _save_cached(self):
filename = self._cached_commands_file()
with open(filename, 'w') as f:
yaml.safe_dump(self._cached_commands.model_dump(), f, allow_unicode=True)
@classmethod
def _unpack_commands_in_direction(cls, text: str) -> List[Dict]:
"""
่งฃๆ llm ้่ฟ yaml ๅฝขๅผ่ฟๅ็ commands.
"""
text = cls._unpack_yaml_in_text(text)
command_data = yaml.safe_load(text)
if isinstance(command_data, str):
return [Say(content=command_data).model_dump()]
if not isinstance(command_data, list):
raise RuntimeError(f"invalid ghost response: {text}")
return command_data
@classmethod
def _unpack_yaml_in_text(cls, text: str) -> str:
sections = text.split("```")
if len(sections) == 3:
text = sections[1]
if text.startswith("`") or text.endswith("`"):
text.strip("`")
return text
| [] |
2024-01-10 | thirdgerb/ghost-in-shells | ghoshell~prototypes~playground~sphero~sphero_llm_func.py | from __future__ import annotations
from typing import Dict, Type
from ghoshell.ghost import Context, Thought, Operator
from ghoshell.llms import OpenAIFuncSchema
from ghoshell.llms.thinks import AgentThought
from ghoshell.llms.thinks import LLMFunc
from ghoshell.prototypes.playground.sphero.sphero_commands import SpheroCommand, Say
from ghoshell.prototypes.playground.sphero.sphero_ghost_core import SpheroGhostCore
from ghoshell.prototypes.playground.sphero.sphero_messages import SpheroCommandMessage
class SpheroLLMFunc(LLMFunc):
def __init__(self, core: SpheroGhostCore, cmd: Type[SpheroCommand]):
self.cmd: Type[SpheroCommand] = cmd
self.core = core
def name(self) -> str:
return self.cmd.method
def schema(self, ctx: Context, this: AgentThought) -> OpenAIFuncSchema:
return OpenAIFuncSchema(
name=self.cmd.method,
desc=self.cmd.desc(),
parameters_schema=self.cmd.model_json_schema(),
)
def call(self, ctx: Context, this: Thought, content: str, arguments: Dict | str | None) -> Operator | str | None:
message = SpheroCommandMessage(runtime_mode=True)
if content:
message.add(Say(content=content))
wrapped: SpheroCommand = self.wrap(self.cmd, arguments)
message.add(wrapped)
commands, ok = self.core.filter_commands_data(ctx, message.commands)
if ok:
message.commands = commands
else:
message.commands = []
invalid = self.core.invalid_order()
message.add(Say(content=invalid))
ctx.send_at(this).output(message)
return ctx.mind(this).awaits()
| [] |
2024-01-10 | mengwanglalala/RL-algorithms | Discrete_action~multiprocessing_env.py | # This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs | [] |
2024-01-10 | semperfitodd/recipe_ai_serverless | terraform~recipe_generator~recipe_generator.py | import os
import openai
import boto3
import json
from datetime import datetime
import logging
from boto3.dynamodb.conditions import Key
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Retrieve secrets from AWS Secrets Manager
def get_secret(secret_name):
client = boto3.client(service_name='secretsmanager')
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except Exception as e:
raise e
else:
secret = json.loads(get_secret_value_response['SecretString'])
return secret
def generate_recipe(ingredients, language, units):
ingredients_list = ', '.join(ingredients)
prompt = f"Given the following ingredients: {ingredients_list} - give me a recipe. Note that the entire recipe including the title, ingredients, and instructions must be written in {language} language. The recipe should use {units} units, and assume I have all spices. Please format the response with '1_2_3:' followed by the title, '2_3_4:' followed by the list of ingredients, '3_4_5:' followed by the instructions."
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.6
)
return response.choices[0].message.content
# Store the recipe in DynamoDB
def store_recipe(user_id, date, recipe_response, table):
parts = recipe_response.split('2_3_4:')
title = parts[0].replace('1_2_3:', '').strip()
ingredients_instructions = parts[1].strip() if len(parts) > 1 else ''
parts = ingredients_instructions.split('3_4_5:')
ingredients = parts[0].strip()
instructions = parts[1].strip() if len(parts) > 1 else ''
item = {
'user_id': user_id,
'date': date,
'title': title,
'ingredients_list': ingredients,
'instructions': instructions
}
table.put_item(Item=item)
def lambda_handler(event, context):
# CORS headers
cors_headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET"
}
# Handle OPTIONS requests for CORS
if event['httpMethod'] == 'OPTIONS':
return {
'statusCode': 200,
'headers': cors_headers
}
# Set up DynamoDB
environment = os.getenv("ENVIRONMENT")
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(environment)
# Handling GET request to fetch past recipes for a user
if event['httpMethod'] == 'GET':
try:
user_id = event['queryStringParameters']['user_id']
# Fetch all the recipes for a specific user from DynamoDB
response = table.query(
KeyConditionExpression=Key('user_id').eq(user_id)
)
return {
'statusCode': 200,
'headers': cors_headers,
'body': json.dumps(response['Items'])
}
except Exception as e:
return {
'statusCode': 500,
'headers': cors_headers,
'body': json.dumps({'error': str(e)})
}
# Handling POST request to create a new recipe
elif event['httpMethod'] == 'POST':
try:
# Parse the 'body' field from the event object
body = json.loads(event.get('body', '{}'))
# Extract 'user_id', 'ingredients_list', 'language' and 'units' from the body
user_id = body.get('user_id')
ingredients = body.get('ingredients_list')
language = body.get('language')
units = body.get('units')
if not user_id or not ingredients or not language or not units:
return {
'statusCode': 400,
'headers': cors_headers,
'body': json.dumps({'message': 'user_id, ingredients_list, language, and units are required'})
}
# Dynamically build the secret_name based on the environment
secret_name = f"{environment}_secret"
secrets = get_secret(secret_name)
# Set OpenAI credentials from secrets
openai.organization = secrets['openai_org']
openai.api_key = secrets['openai_key']
# Generate recipe
generated_recipe = generate_recipe(ingredients, language, units)
# Store recipe in DynamoDB
current_date_string = datetime.now().strftime('%Y-%m-%d')
store_recipe(user_id, current_date_string, generated_recipe, table)
# Return a success response with CORS headers
return {
'statusCode': 200,
'headers': cors_headers,
'body': json.dumps({'message': 'Recipe generated and stored successfully'})
}
except Exception as e:
logger.error("Error processing request: {}".format(e))
return {
'statusCode': 500,
'headers': cors_headers,
'body': json.dumps({'message': 'Internal Server Error'})
}
else:
return {
'statusCode': 400,
'headers': cors_headers,
'body': json.dumps({'message': 'Invalid request method'})
}
| [
"Given the following ingredients: PLACEHOLDER - give me a recipe. Note that the entire recipe including the title, ingredients, and instructions must be written in PLACEHOLDER language. The recipe should use PLACEHOLDER units, and assume I have all spices. Please format the response with '1_2_3:' followed by the title, '2_3_4:' followed by the list of ingredients, '3_4_5:' followed by the instructions."
] |
2024-01-10 | echoopen/AskAnything | src~ingest_data.py | # Using langchain, ingest data from a website to vector store
import os
import re
import argparse
import traceback
import configparser
from tqdm import tqdm
from app_config import *
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders.sitemap import SitemapLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
FILE_ROOT = os.path.abspath(os.path.dirname(__file__))
def main(args: argparse.Namespace) -> dict:
res = {"status": 0, "message": "Success"}
# Sanity check inputs
config_fn = os.path.join(FILE_ROOT, args.config)
if not os.path.exists(config_fn):
res["status"] = 2
res["message"] = f"Config file {config_fn} does not exist"
return res
# Load the config file
try:
site_config = configparser.ConfigParser()
site_config.read(config_fn)
site_section = site_config[args.site]
index_url = site_section["index"]
url_filters = site_section["url_filters"].split(";")
url_filters = [os.path.join(index_url.split("/sitemap.xml", 1)[0], x) for x in url_filters]
debug_url_filters = site_section["debug_url_filters"].split(";")
debug_url_filters = [os.path.join(index_url.split("/sitemap.xml", 1)[0], x) for x in debug_url_filters]
custom_separators = site_section["custom_separators"].split(";")
negative_text_page = site_section["negative_text_page"].split(";")
negative_text_chunk = site_section["negative_text_chunk"].split(";")
min_chunk_length = int(site_section["min_chunk_length"])
# Remove any escaped characters from the separators and filters
for lst in [
custom_separators,
negative_text_page,
negative_text_chunk
]:
for i in range(len(lst)):
lst[i] = lst[i].replace("\\n", "\n").replace("\\r", "\r")
if args.debug:
print(f"index_url = {index_url}")
print(f"url_filters = {url_filters}")
print("Replacing the url_filters with one specific for debug purposes")
url_filters = debug_url_filters
print(f"Adjusted url_filters = {url_filters}")
print(f"custom_separators = {custom_separators}")
print(f"negative_text_page = {negative_text_page}")
print(f"negative_text_chunk = {negative_text_chunk}")
print(f"min_chunk_length = {min_chunk_length}")
except:
res["status"] = 2
res["message"] = f"Error reading config file {config_fn}: {traceback.format_exc()}"
return res
# Initialize all needed objects
# Sitemap loader
loader = SitemapLoader(index_url, url_filters)
# Text splitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000, chunk_overlap=0)
# Load the sitemap
try:
docs = loader.load()
except:
res["status"] = 2
res["message"] = f"Error loading sitemap {index_url}: {traceback.format_exc()}"
return res
all_texts = []
post_filter_docs = 0
for doc in tqdm(docs, desc="Filtering documents", ascii=True):
# Skip entire page if it contains any negative_text_page items
if any([re.search(filter, doc.page_content) for filter in negative_text_page]):
continue
# Split the document page_content into text chunks based on the custom separators using re
chunks = re.split("|".join(custom_separators), doc.page_content)
# Perform sanity check on any negative filters, then reduce any length of \n to a single \n in each chunk
final_chunks = []
for chunk in chunks:
if not any([re.search(filter, chunk) for filter in negative_text_chunk]):
final_chunks.append(re.sub("\n+", "\n", chunk))
# Copy the doc.metadata into a list of metadata the length of chunks list
metadatas = [doc.metadata] * len(final_chunks)
texts = text_splitter.create_documents(final_chunks, metadatas)
for text in texts:
# Filter by minimum length, or else too short and uninformative
if len(text.page_content.strip()) >= min_chunk_length:
all_texts.append(text)
# Increase number of documents that passed the filter
post_filter_docs += 1
print(f"Number of documents after filtering: {post_filter_docs}")
print(f"Number of text chunks after filtering: {len(all_texts)}")
# Embedding model
embedding = OpenAIEmbeddings()
# Supplying a persist_directory will store the embeddings on disk
persist_directory = os.path.join(FILE_ROOT, CHROMA_DB_DIR, args.site.replace(".", "_")).rstrip("/")
vector_db = Chroma.from_documents(documents=all_texts, embedding=embedding, persist_directory=persist_directory)
# Save the vector store
try:
vector_db.persist()
vector_db = None
except:
res["status"] = 2
res["message"] = f"Error persisting vector store: {traceback.format_exc()}"
return res
# Compress the vector store into a tar.gz file of the same name
tar_cmd = f"tar -czvf {persist_directory}.tar.gz -C {os.path.dirname(persist_directory)} {os.path.basename(persist_directory)}"
try:
os.system(tar_cmd)
except:
res["status"] = 2
res["message"] = f"Error compressing vector store: {traceback.format_exc()}"
return res
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ingest data into a vector store")
parser.add_argument("--site", type=str, required=True, help="Site to ingest (must be a section in the config file!)")
parser.add_argument("--config", type=str, help="Path to configuration file", default="cfg/default.cfg")
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
args = parser.parse_args()
run_res = main(args)
if run_res["status"] != 0:
print(run_res["message"])
exit(run_res["status"])
| [] |
2024-01-10 | NicolaLS/yolo-ai-cmdbot | yolo.py | #!/usr/bin/env python3
# MIT License
# Copyright (c) 2023 wunderwuzzi23
# Greetings from Seattle!
import os
import platform
import openai
import sys
import subprocess
import dotenv
import distro
from termcolor import colored
from colorama import init
# Check if the user globally disabled the safety switch
def get_yolo_safety_switch_config():
home_path = os.path.expanduser("~")
yolo_safety_off_path = os.path.join(home_path,".yolo-safety-off")
if os.path.exists(yolo_safety_off_path):
return False
else:
return True
# Construct the prompt
def get_full_prompt(user_prompt, shell):
## Find the executing directory (e.g. in case an alias is set)
## So we can find the prompt.txt file
yolo_path = os.path.abspath(__file__)
prompt_path = os.path.dirname(yolo_path)
## Load the prompt and prep it
prompt_file = os.path.join(prompt_path, "prompt.txt")
pre_prompt = open(prompt_file,"r").read()
pre_prompt = pre_prompt.replace("{shell}", shell)
pre_prompt = pre_prompt.replace("{os}", get_os_friendly_name())
prompt = pre_prompt + user_prompt
# be nice and make it a question
if prompt[-1:] != "?" and prompt[-1:] != ".":
prompt+="?"
return prompt
def print_usage():
print("Yolo 0.1 - by @wunderwuzzi23")
print()
print("Usage: yolo [-a] list the current directory information")
print("Argument: -a: Prompt the user before running the command")
print()
print("Current safety switch setting (~/.yolo-safety-off) is " + str(yolo_safety_switch))
def get_os_friendly_name():
# Get OS Name
os_name = platform.system()
if os_name == "Linux":
return "Linux/"+distro.name(pretty=True)
elif os_name == "Windows":
return os_name
elif os_name == "Darwin":
return "Darwin/macOS"
if __name__ == "__main__":
# Get the global safety switch setting (default is True/on)
yolo_safety_switch = get_yolo_safety_switch_config()
# Unix based SHELL (/bin/bash, /bin/zsh), otherwise assuming it's Windows
shell = os.environ.get("SHELL", "powershell.exe")
command_start_idx = 1 # Question starts at which argv index?
ask_flag = False # safety switch -a command line argument
yolo = "" # user's answer to safety switch (-a) question y/n
# Two options for the user to specify they openai api key.
#1. Place a ".env" file in same directory as this with the line:
# OPENAI_API_KEY="<yourkey>"
# or do `export OPENAI_API_KEY=<yourkey>` before use
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
#2. Place a ".openai.apikey" in the home directory that holds the line:
# <yourkey>
if not openai.api_key: #If statement to avoid "invalid filepath" error
home_path = os.path.expanduser("~")
openai.api_key_path = os.path.join(home_path,".openai.apikey")
# Parse arguments and make sure we have at least a single word
if len(sys.argv) < 2:
print_usage()
sys.exit(-1)
# safety switch via argument -a (local override of global setting)
# Force Y/n questions before running the command
if sys.argv[1] == "-a":
ask_flag = True
command_start_idx = 2
# to allow easy/natural use we don't require the input to be a
# single string. So, the user can just type yolo what is my name?
# without having to put the question between ''
arguments = sys.argv[command_start_idx:]
user_prompt = " ".join(arguments)
# do we have a prompt from the user?
if user_prompt == "":
print ("No user prompt specified.")
sys.exit(-1)
# Load the correct prompt based on Shell and OS and append the user's prompt
prompt = get_full_prompt(user_prompt, shell)
# Make the first line also the system prompt
system_prompt = prompt[1]
#print(prompt)
# Call the ChatGPT API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
temperature=0,
max_tokens=500,
)
#print (response)
res_command = response.choices[0].message.content.strip()
#Enable color output on Windows using colorama
init()
if res_command.startswith("Sorry, try again") or res_command.startswith("I'm sorry"):
print(colored("There was an issue: "+res_command, 'red'))
sys.exit(-1)
# odd corner case, sometimes ChatCompletion returns markdown
if res_command.count("```",2):
print(colored("The proposed command contains markdown, so I did not execute the response directly: \n", 'red')+res_command)
sys.exit(-1)
print("Command: " + colored(res_command, 'blue'))
if yolo_safety_switch == True or ask_flag == True:
print("Execute the command? Y/n ==> ", end = '')
yolo = input()
print()
if yolo == "Y" or yolo == "":
if shell == "powershell.exe":
subprocess.run([shell, "/c", res_command], shell=False)
else:
# Unix: /bin/bash /bin/zsh: uses -c both Ubuntu and macOS should work, others might not
subprocess.run([shell, "-c", res_command], shell=False) | [
"?",
"PLACEHOLDERPLACEHOLDER",
"prompt.txt",
" "
] |
2024-01-10 | tomyrodeghiero/whatsappbot-gpt | services.py | import requests
import sett
import json
import time
import openai
import csv
import os
from datetime import datetime
# chatgpt
openai.api_key = os.getenv("OPENAI_API_KEY")
def obtener_Mensaje_whatsapp(message):
if 'type' not in message:
text = 'mensaje no reconocido'
return text
typeMessage = message['type']
if typeMessage == 'text':
text = message['text']['body']
elif typeMessage == 'button':
text = message['button']['text']
elif typeMessage == 'interactive' and message['interactive']['type'] == 'list_reply':
text = message['interactive']['list_reply']['title']
elif typeMessage == 'interactive' and message['interactive']['type'] == 'button_reply':
text = message['interactive']['button_reply']['title']
else:
text = 'mensaje no procesado'
return text
def enviar_Mensaje_whatsapp(data):
try:
whatsapp_token = os.getenv("WHATSAPP_TOKEN")
whatsapp_url = os.getenv("WHATSAPP_URL")
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer ' + whatsapp_token}
response = requests.post(whatsapp_url,
headers=headers,
data=data)
print("response ->", response)
if response.status_code == 200:
return 'mensaje enviado', 200
else:
return 'error al enviar mensaje', response.status_code
except Exception as e:
return e, 403
def text_Message(number, text):
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "text",
"text": {
"body": text
}
}
)
return data
def buttonReply_Message(number, options, body, footer, sedd, messageId):
buttons = []
for i, option in enumerate(options):
buttons.append(
{
"type": "reply",
"reply": {
"id": sedd + "_btn_" + str(i+1),
"title": option
}
}
)
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "interactive",
"interactive": {
"type": "button",
"body": {
"text": body
},
"footer": {
"text": footer
},
"action": {
"buttons": buttons
}
}
}
)
return data
def listReply_Message(number, options, body, footer, sedd, messageId):
rows = []
for i, option in enumerate(options):
rows.append(
{
"id": sedd + "_row_" + str(i+1),
"title": option,
"description": ""
}
)
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "interactive",
"interactive": {
"type": "list",
"body": {
"text": body
},
"footer": {
"text": footer
},
"action": {
"button": "Ver Opciones",
"sections": [
{
"title": "Secciones",
"rows": rows
}
]
}
}
}
)
return data
def document_Message(number, url, caption, filename):
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "document",
"document": {
"link": url,
"caption": caption,
"filename": filename
}
}
)
return data
def sticker_Message(number, sticker_id):
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "sticker",
"sticker": {
"id": sticker_id
}
}
)
return data
def get_media_id(media_name, media_type):
media_id = ""
if media_type == "sticker":
media_id = sett.stickers.get(media_name, None)
elif media_type == "image":
media_id = sett.images.get(media_name, None)
elif media_type == "video":
media_id = sett.videos.get(media_name, None)
elif media_type == "audio":
media_id = sett.audio.get(media_name, None)
return media_id
def replyReaction_Message(number, messageId, emoji):
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "reaction",
"reaction": {
"message_id": messageId,
"emoji": emoji
}
}
)
return data
def replyText_Message(number, messageId, text):
data = json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"context": {"message_id": messageId},
"type": "text",
"text": {
"body": text
}
}
)
return data
def markRead_Message(messageId):
data = json.dumps(
{
"messaging_product": "whatsapp",
"status": "read",
"message_id": messageId
}
)
return data
def administrar_chatbot(text, number, messageId, name):
text = text.lower() # mensaje que envio el usuario
list = []
markRead = markRead_Message(messageId)
list.append(markRead)
time.sleep(2)
while text != "es todo":
if "hola" in text:
body = "ยกHola! ๐ Bienvenido a Bigdateros. ยฟCรณmo podemos ayudarte hoy?"
footer = "Equipo Bigdateros"
options = ["โ
servicios", "๐
agendar cita"]
replyButtonData = buttonReply_Message(
number, options, body, footer, "sed1", messageId)
replyReaction = replyReaction_Message(number, messageId, "๐ซก")
list.append(replyReaction)
list.append(replyButtonData)
elif "servicios" in text:
body = "Tenemos varias รกreas de consulta para elegir. ยฟCuรกl de estos servicios te gustarรญa explorar?"
footer = "Equipo Bigdateros"
options = ["Analรญtica Avanzada",
"Migraciรณn Cloud", "Inteligencia de Negocio"]
listReplyData = listReply_Message(
number, options, body, footer, "sed2", messageId)
sticker = sticker_Message(
number, get_media_id("perro_traje", "sticker"))
list.append(listReplyData)
list.append(sticker)
elif "inteligencia de negocio" in text:
body = "Buenรญsima elecciรณn. ยฟTe gustarรญa que te enviara un documento PDF con una introducciรณn a nuestros mรฉtodos de Inteligencia de Negocio?"
footer = "Equipo Bigdateros"
options = ["โ
Sรญ, envรญa el PDF.", "โ No, gracias"]
replyButtonData = buttonReply_Message(
number, options, body, footer, "sed3", messageId)
list.append(replyButtonData)
elif "sรญ, envรญa el pdf" in text:
sticker = sticker_Message(
number, get_media_id("pelfet", "sticker"))
textMessage = text_Message(
number, "Genial, por favor espera un momento.")
enviar_Mensaje_whatsapp(sticker)
enviar_Mensaje_whatsapp(textMessage)
time.sleep(3)
document = document_Message(
number, sett.document_url, "Listo ๐๐ป", "Inteligencia de Negocio.pdf")
enviar_Mensaje_whatsapp(document)
time.sleep(3)
body = "ยฟTe gustarรญa programar una reuniรณn con uno de nuestros especialistas para discutir estos servicios mรกs a fondo?"
footer = "Equipo Bigdateros"
options = ["โ
Sรญ, agenda reuniรณn", "No, gracias."]
replyButtonData = buttonReply_Message(
number, options, body, footer, "sed4", messageId)
list.append(replyButtonData)
elif "sรญ, agenda reuniรณn" in text:
body = "Estupendo. Por favor, selecciona una fecha y hora para la reuniรณn:"
footer = "Equipo Bigdateros"
options = ["๐
10: maรฑana 10:00 AM",
"๐
7 de junio, 2:00 PM", "๐
8 de junio, 4:00 PM"]
listReply = listReply_Message(
number, options, body, footer, "sed5", messageId)
list.append(listReply)
elif "no, gracias." in text:
textMessage = text_Message(
number, "Perfecto! No dudes en contactarnos si tienes mรกs preguntas. Recuerda que tambiรฉn ofrecemos material gratuito para la comunidad. ยกHasta luego! ๐")
list.append(textMessage)
break
elif "conectar con personal" in text:
buttonData = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": number,
"type": "interactive",
"interactive": {
"type": "button",
"body": {
"text": "Entendido. Haz clic en el botรณn a continuaciรณn para conectarte con nuestro personal real."
},
"action": {
"buttons": [
{
"type": "url",
"url_button": {
"title": "Conectar con Personal Real",
# reemplaza esto con el enlace a la pรกgina de conexiรณn con personal real
"url": "https://link-to-connection-page.com"
}
}
]
}
}
}
data = json.dumps(buttonData)
enviar_Mensaje_whatsapp(data)
break
else:
data = text_Message(
number, "Lo siento, no entendรญ lo que dijiste. ยฟQuieres que te ayude con alguna de estas opciones?")
list.append(data)
for item in list:
enviar_Mensaje_whatsapp(item)
def generar_respuesta_chatgpt(user_message, number, espedido=False):
messages = [{'role': 'system', 'content': """
Soy Sari, tu asistente virtual de Joyas Boulevard en Argentina. Estoy aquรญ para ayudarte a explorar nuestras colecciones de joyas y responder a todas tus preguntas. Si en algรบn momento prefieres hablar con uno de nuestros expertos en joyerรญa, solo tienes que decรญrmelo. La pรกgina de la E-commerce es joyasboulevard.com y el Instagram es @joyeriaboluevard. Soy muy amable.
"""}]
historial = get_chat_from_csv(number)
messages.extend(historial)
messages.append({'role': 'user', 'content': user_message})
if espedido:
messages.append(
{'role': 'system', 'content': 'Crea un resumen del pedido anterior en formato JSON. \
Analiza la lista de productos de la joyerรญa ingresada al inicio y compara con el pedido del usuario. \
Solo cuando hayas analizado el pedido completo del usuario, categorรญzalo en lista de anillos, lista de pulseras, lista de pendientes, etc. \
Los campos del json deben ser 1) lista de anillos con atributos de nombre, tamaรฑo, cantidad, 2) lista de pulseras con atributos de nombre, tamaรฑo, cantidad, \
3) lista de pendientes con atributos de nombre, tamaรฑo, cantidad, etc. \
Luego, actualiza el precio total del pedido una vez que hayas listado cada รญtem.'},
)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.8
)
print("response", response.choices[0].message["content"])
return response.choices[0].message["content"]
def guardar_conversacion(conversation_id, number, name, user_msg, timestamp, bot_msg=''):
try:
conversations = []
conversation = [conversation_id, number, name,
user_msg, bot_msg, datetime.fromtimestamp(timestamp)]
# Guardar las conversaciones en el archivo CSV
with open('conversaciones.csv', 'a', newline='') as csv_file:
data = csv.writer(csv_file, delimiter=',')
data.writerow(conversation)
messages = get_chat_from_csv(number)
except Exception as e:
return e, 403
def get_chat_from_csv(number):
messages = []
with open('conversaciones.csv') as file:
reader = csv.DictReader(file)
for row in reader:
if row['number'] == number:
print('number')
user_msg = {'role': 'user', 'content': row['user_msg']}
bot_msg = {'role': 'assistant', 'content': row['bot_msg']}
messages.append(user_msg)
messages.append(bot_msg)
return messages
def guardar_pedido(jsonPedido, number):
# Eliminar el texto que sigue al JSON
start_index = jsonPedido.find("{")
end_index = jsonPedido.rfind("}")
# Extrae la cadena JSON de la respuesta
json_str = jsonPedido[start_index:end_index+1]
# Convierte la cadena JSON en un objeto de Python
pedido = json.loads(json_str)
# Ahora puedes usar 'pedido' como un objeto de Python
with open('pedidos.csv', 'a', newline='') as file:
writer = csv.writer(file, delimiter=',')
anillos = [
f"{anillo['cantidad']} {anillo['nombre']} - {anillo['precio']} pesos" for anillo in pedido['anillos']]
pulseras = [
f"{pulsera['cantidad']} {pulsera['nombre']} - {pulsera['precio']} pesos" for pulsera in pedido['pulseras']]
pendientes = [
f"{pendiente['cantidad']} {pendiente['nombre']} - {pendiente['precio']} pesos" for pendiente in pedido['pendientes']]
writer.writerow([number,
', '.join(anillos),
', '.join(pulseras),
', '.join(pendientes),
pedido['precio_total'],
datetime.now().strftime("%Y-%m-%d %H:%M:%S")])
| [
"perro_traje",
"\n Soy Sari, tu asistente virtual de Joyas Boulevard en Argentina. Estoy aquรญ para ayudarte a explorar nuestras colecciones de joyas y responder a todas tus preguntas. Si en algรบn momento prefieres hablar con uno de nuestros expertos en joyerรญa, solo tienes que decรญrmelo. La pรกgina de la E-commerce es joyasboulevard.com y el Instagram es @joyeriaboluevard. Soy muy amable.\n ",
"Perfecto! No dudes en contactarnos si tienes mรกs preguntas. Recuerda que tambiรฉn ofrecemos material gratuito para la comunidad. ยกHasta luego! ๐",
"Listo ๐๐ป",
"๐ซก",
"Lo siento, no entendรญ lo que dijiste. ยฟQuieres que te ayude con alguna de estas opciones?",
"Crea un resumen del pedido anterior en formato JSON. Analiza la lista de productos de la joyerรญa ingresada al inicio y compara con el pedido del usuario. Solo cuando hayas analizado el pedido completo del usuario, categorรญzalo en lista de anillos, lista de pulseras, lista de pendientes, etc. Los campos del json deben ser 1) lista de anillos con atributos de nombre, tamaรฑo, cantidad, 2) lista de pulseras con atributos de nombre, tamaรฑo, cantidad, 3) lista de pendientes con atributos de nombre, tamaรฑo, cantidad, etc. Luego, actualiza el precio total del pedido una vez que hayas listado cada รญtem.",
"Genial, por favor espera un momento."
] |
2024-01-10 | salesforce/DialogStudio | code~openai_dialog_quality_evaluation.py | """
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: Apache License 2.0
For full license text, see the LICENSE file in the repo root or https://www.apache.org/licenses/LICENSE-2.0
"""
import os
os.environ["OPENAI_API_KEY"] = ""
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
import json
from utils import open_json, save_json, open_jsonl
from collections import defaultdict
class EvaluateDialogs(object):
""" Evaluate Dialogs based on OpenAI. To run this:
pip install openai
pip install langchain
"""
def __init__(self):
self.data_dir = "/Users/jianguozhang/TOD-Family/TOD-Studio/open-source/"
self.excluded_datasets = ['MetaLWOZ', "MuDoCo", "SalesBot", "HDSA-Dialog", "MULTIWOZ2_2"] # "SGD"
self.quality_agent_prompt = PromptTemplate(
input_variables=["dialog"],
template="""
Hi AI, I plan to train a language model for response generation. Please analyze the following dialogue and evaluate it based on the criteria provided. Assign a score from 1 (poor) to 5 (excellent) for each category. We're looking for a critical assessment, and higher scores should only be given to truly exceptional examples. The criteria for evaluation are: Understanding, Relevance, Completeness, Correctness, and Coherence.
After your assessment, provide an overall score for the dialogue along with a concise summary of your evaluation. The overall score should also be on a scale of 1 (poor) to 5 (excellent) and should represent a holistic assessment of the dialogue.
Please present your evaluation and comment into the following format:
{{
"Understanding": _,
"Relevance": _,
"Completeness": _,
"Correctness": _,
"Coherence": _,
"Overall": {{"score": _, "comment": _}}
}}
Please replace each underscore (_) with the appropriate score. For the 'Overall' field, provide the score and a concise comment. Regarding to the comment, it should not only summarize the dialogue's quality but also highlight any issues or shortcomings you may have identified in the dialogue.
Below is the dialog:
{dialog}
Evaluate the dialog now.
"""
)
self.quality_chain = LLMChain(llm=ChatOpenAI(temperature=0.2, model_name="gpt-3.5-turbo"), prompt=self.quality_agent_prompt)
def run_openai_evaluation(self, dialog):
res = self.quality_chain.run(dialog=dialog)
try:
res = json.loads(res)
except:
res = str(res)
return res
def tod(self):
"""
Evaluate TOD dialogues
:return:
"""
folder_name = "Task-Oriented-Dialogues--OpenAI"
folder_path = os.path.join(self.data_dir, folder_name)
dataset_names = os.listdir(folder_path)
print(dataset_names)
print()
for dataset_name in dataset_names:
if not os.path.isdir(os.path.join(folder_path, dataset_name)):
continue
data = open_json(os.path.join(folder_path, dataset_name, "train.json"))
f_writer = open(os.path.join(folder_path, dataset_name, "train_quality_scores.json"), "w")
print("Start processing: {} #total dialogs: {}".format(dataset_name, len(data)))
for index, item in enumerate(data):
output = defaultdict(dict)
output["source"] = item["source"]
output["quality score"] = self.run_openai_evaluation(item["dialog"])
json.dump(output, f_writer)
f_writer.write("\n") # Add a new line for readability
if index % 10 == 0 or index + 1 == len(data):
f_writer.flush() # Flush the buffer to update the file immediately
def run(self):
self.tod()
process = EvaluateDialogs()
# Run evaluations for dialogs
process.run()
| [
"\n Hi AI, I plan to train a language model for response generation. Please analyze the following dialogue and evaluate it based on the criteria provided. Assign a score from 1 (poor) to 5 (excellent) for each category. We're looking for a critical assessment, and higher scores should only be given to truly exceptional examples. The criteria for evaluation are: Understanding, Relevance, Completeness, Correctness, and Coherence.\n \n After your assessment, provide an overall score for the dialogue along with a concise summary of your evaluation. The overall score should also be on a scale of 1 (poor) to 5 (excellent) and should represent a holistic assessment of the dialogue.\n \n Please present your evaluation and comment into the following format:\n \n {{\n \"Understanding\": _,\n \"Relevance\": _,\n \"Completeness\": _,\n \"Correctness\": _,\n \"Coherence\": _,\n \"Overall\": {{\"score\": _, \"comment\": _}}\n }}\n \n Please replace each underscore (_) with the appropriate score. For the 'Overall' field, provide the score and a concise comment. Regarding to the comment, it should not only summarize the dialogue's quality but also highlight any issues or shortcomings you may have identified in the dialogue.\n \n Below is the dialog:\n \n {dialog} \n \n Evaluate the dialog now.\n "
] |
2024-01-10 | guoxiaotao/Reinforcement-learning-1 | Deep-Q-Networks~common~wrappers.py | # Taken from OpenAI Baselines
# https://github.com/openai/baselines
# PyTorch Wrappers taken from Higgsfield:
# https://github.com/higgsfield/RL-Adventure/blob/master/common/wrappers.py
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class ClipActionsWrapper(gym.Wrapper):
def step(self, action):
import numpy as np
action = np.nan_to_num(action)
action = np.clip(action, self.action_space.low, self.action_space.high)
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to num_channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]), dtype=np.uint8)
def observation(self, observation):
return np.swapaxes(observation, 2, 0)
def wrap_pytorch(env):
return ImageToPyTorch(env) | [] |
2024-01-10 | Leezekun/dialogic | code~pptod~E2E_TOD~dialogic_demo.py | import os
import random
import json
import time
import numpy as np
import os
import sys
import random
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import progressbar
import argparse
from eval import MultiWozEvaluator
from transformers import *
import openai
import re
import copy
import pprint
import logging
import time
from colorama import Fore, Back, Style
from dialogic_utils import system_prefix, user_prefix
from dialogic_utils import *
from dialogic_aug_e2e import *
import sys, os
window_length = 120
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
def get_checkpoint_name(prefix):
file_names = os.listdir(prefix)
selected_name = ""
for name in file_names:
if name.startswith('epoch'):
if 'best' in name:
print (name)
return name
selected_name = name
print (selected_name)
return selected_name
def create_logger(args):
"""
print the logs to console and file
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
if not os.path.exists(args.log_path):
os.makedirs("/".join(args.log_path.split("/")[:-2]), exist_ok=True)
# create a handler to write logs in files
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
return logger
def parse_config():
parser = argparse.ArgumentParser()
# dataset configuration
parser.add_argument('--data_path_prefix', type=str, help='The path where the data stores.')
parser.add_argument('--data_version', type=str, help='The version of used multiwoz data, 2.0, 2.1, 2.3, 2.4')
# the configuration of verifier
parser.add_argument('--shuffle_mode', type=str, default='shuffle_session_level',
help="shuffle_session_level or shuffle_turn_level, it controls how we shuffle the training data.")
parser.add_argument('--use_db_as_input', type=str, default='False',
help="True or False, whether includes db result as part of the input when generating response.")
parser.add_argument('--cascaded', type=str, default='False',
help="True or False, whether includes action when generating response.")
parser.add_argument('--add_prefix', type=str, default='True',
help="True or False, whether we add prefix when we construct the input sequence.")
parser.add_argument('--add_special_decoder_token', default='True', type=str, help='Whether we discriminate the decoder start and end token for different tasks.')
parser.add_argument('--pretrained_path', type=str, default='None', help='the path that stores pretrained checkpoint.')
parser.add_argument('--train_data_ratio', type=float, default=1.0, help='the ratio of training data used for training the model')
# model configuration
parser.add_argument('--model_name', type=str, help='t5-small or t5-base or t5-large')
parser.add_argument('--gpt3_version', type=str, default='text-davinci-002', help='options: [text-davinci-002, text-davinci-001, text-curie-001, text-babbage-001, or text-ada-001]')
# simulation configuration
parser.add_argument('--max_aug_time', type=int, default=1, help='the size of augment data: x original data size.')
parser.add_argument('--max_dialog_num', type=int, default=0, help='the maximum dialog using gpt-3, if 0 depends on max_aug_size')
parser.add_argument('--max_turn_num', type=int, default=10, help='the maximum turns of each dialog.')
parser.add_argument('--max_repeat_time', type=int, default=3, help='the maximum time of repeat generation of GPT-3.')
# verifier
parser.add_argument('--verify_bs', type=str, default='True', help='only simulate user.')
parser.add_argument('--verify_da', type=str, default='True', help='simulate both user and system.')
parser.add_argument('--n_user', type=int, default=1, help='how many user utterances for each bs, 1 means no rewrite.')
parser.add_argument('--n_system', type=int, default=1, help='how many system response for each da, 1 means no rewrite.')
# data
parser.add_argument('--augment_dialog_path', type=str, default=None, help='the path that stores the error test cases in dst.')
parser.add_argument("--output_save_path", type=str, help="directory to save the model output.")
parser.add_argument('--log_path', type=str, help='the path that stores the log information.')
# debug
parser.add_argument('--debug', default='True', type=str, help='Whether to print in the process.')
parser.add_argument('--save', default='True', type=str, help='Whether to save the interaction results.')
parser.add_argument('--pause', default='True', type=str, help='Whether to pause during simulation.')
parser.add_argument('--input_user_goal', default='True', type=str, help='input user goals or use generated user goals.')
# how to build the prompt
parser.add_argument('--k_shot', type=int, default=2, help='the maximum number of demo dialogs')
parser.add_argument('--temperature', type=float, default=0.2, help='the temperature in softmax, for the sampling in combine.')
return parser.parse_args()
def convert_db_to_pointer(text):
pointer_id = re.findall("\d+", text)
pointer = [0] * 7
if pointer_id:
pointer_id = int(pointer_id[0])
pointer[pointer_id] = 1
return pointer
def get_turn_domain(text, q):
if isinstance(text, str):
texts = [text]
elif isinstance(text, list):
texts = text
else:
raise Exception("Wrong text when extracting turn domain!")
from ontology import all_domains
for text in texts:
domains = re.findall(r"\[.+?\]", text)
for domain in domains:
if domain not in q and domain[1:-1] in all_domains:
q.append(domain)
turn_domain = q[-1:]
return turn_domain
return q[-1:]
def save_dialogs(args, all_dialogs, one_dev_str):
output_save_path = os.path.join(args.output_save_path, one_dev_str + f'_demo_result.json')
if os.path.exists(args.output_save_path):
pass
else: # recursively construct directory
os.makedirs(args.output_save_path, exist_ok=True)
# rearrange the order of keys of the dialogue data
for dialog in all_dialogs:
dialog_turns = dialog["turns"]
new_dialog_turns = [rearrange_dict(turn) for turn in dialog_turns]
dialog["turns"] = new_dialog_turns
# save the dialogs
with open(output_save_path, 'w') as outfile:
json.dump(all_dialogs, outfile, indent=4)
print(f"Saving dialogues, current num: {len(all_dialogs)}!")
def load_dialogs(args, one_dev_str):
output_save_path = os.path.join(args.output_save_path, one_dev_str + f'_demo_result.json')
if not os.path.exists(output_save_path):
print(f"No dialogues so far in {output_save_path}!")
return []
with open(output_save_path, 'r') as inputfile:
all_dialogs = json.load(inputfile)
print(f"Loading dialogues, current num: {len(all_dialogs)}!")
return all_dialogs
import argparse
if __name__ == '__main__':
blockPrint()
if torch.cuda.is_available():
print ('Cuda is available.')
cuda_available = torch.cuda.is_available()
multi_gpu_training = False
if cuda_available:
if torch.cuda.device_count() > 1:
multi_gpu_training = True
print ('Using Multi-GPU training, number of GPU is {}'.format(torch.cuda.device_count()))
else:
print ('Using single GPU training.')
else:
pass
args = parse_config()
logger = create_logger(args)
device = torch.device('cuda')
print ('Start loading data...')
from dataclass import MultiWozData
if args.data_version == "2.0":
from config import Config
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-dialogic-processed")
elif args.data_version == "2.1":
from config21 import Config
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.1-dialogic-processed")
elif args.data_version == "2.3":
from config23 import Config
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.3-dialogic-processed")
elif args.data_version == "2.4":
from config24 import Config
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.4-dialogic-processed")
else:
raise Exception("Wrong MultiWOZ version!")
cfg = Config(args.data_path_prefix)
assert args.model_name.startswith('t5')
from transformers import T5Tokenizer
if args.use_db_as_input == 'True':
use_db_as_input = True
elif args.use_db_as_input == 'False':
use_db_as_input = False
else:
raise Exception('Wrong Use DB Mode!!!')
if args.cascaded == 'True':
cascaded = True
elif args.cascaded == 'False':
cascaded = False
else:
raise Exception('Wrong Use Cascaded Mode!!!')
if args.verify_bs == 'True':
verify_bs = True
elif args.verify_bs == 'False':
verify_bs = False
else:
raise Exception('Wrong verify_bs Mode!!!')
if args.verify_da == 'True':
verify_da = True
elif args.verify_da == 'False':
verify_da = False
else:
raise Exception('Wrong verify_da Mode!!!')
if args.add_prefix == 'True':
add_prefix = True
elif args.add_prefix == 'False':
add_prefix = False
else:
raise Exception('Wrong Prefix Mode!!!')
if args.add_special_decoder_token == 'True':
add_special_decoder_token = True
elif args.add_special_decoder_token == 'False':
add_special_decoder_token = False
else:
raise Exception('Wrong Add Special Token Mode!!!')
if args.debug == 'True':
debug = True
elif args.debug == 'False':
debug = False
else:
raise Exception('Wrong debug Mode!!!')
if args.pause == 'True':
pause = True
elif args.pause == 'False':
pause = False
else:
raise Exception('Wrong pause Mode!!!')
if args.input_user_goal == 'True':
input_user_goal = True
elif args.input_user_goal == 'False':
input_user_goal = False
else:
raise Exception('Wrong input user goal Mode!!!')
if args.pretrained_path != 'None':
ckpt_name = get_checkpoint_name(args.pretrained_path)
pretrained_path = args.pretrained_path + '/' + ckpt_name
if args.pretrained_path != 'None':
print ('Loading Pretrained Tokenizer...')
tokenizer = T5Tokenizer.from_pretrained(pretrained_path)
else:
tokenizer = T5Tokenizer.from_pretrained(args.model_name)
data = MultiWozData(args.model_name, tokenizer, cfg, args.data_path_prefix, shuffle_mode=args.shuffle_mode,
data_mode='interact', data_version=args.data_version, use_db_as_input=use_db_as_input, cascaded=cascaded, add_special_decoder_token=add_special_decoder_token,
train_data_ratio=args.train_data_ratio)
print ('Data loaded')
evaluator = MultiWozEvaluator(data.reader, cfg)
print ('Start loading model...')
if verify_bs or verify_da:
assert args.model_name.startswith('t5')
from modelling.T5Model import T5Gen_Model
if args.pretrained_path != 'None':
model = T5Gen_Model(pretrained_path, data.tokenizer, data.special_token_list, dropout=0.0,
add_special_decoder_token=add_special_decoder_token, is_training=True)
else:
model = T5Gen_Model(args.model_name, data.tokenizer, data.special_token_list, dropout=0.0,
add_special_decoder_token=add_special_decoder_token, is_training=True)
if cuda_available:
if multi_gpu_training:
model = nn.DataParallel(model) # multi-gpu training
else:
pass
model = model.to(device)
else:
pass
model.eval()
print ('Model loaded')
enablePrint()
from e2e_inference_utlis import e2e_batch_interactive_generate
with torch.no_grad():
input_contain_db=use_db_as_input
"""
interact with system (verifier) using GPT-3
"""
openai.api_key = OPENAI_API_KEY
end_of_demo = False
# record
all_dialogs = []
total_turn_num = 0
while not end_of_demo:
if input_user_goal:
# select the in-context examples and construct the prompt on-the-fly
print()
print(Fore.GREEN + "Available domains and slot:" + Style.RESET_ALL)
for domain, slot in informable_slots.items():
if domain not in ["police"]:
print("{:<12}>> ".format(domain) + ", ".join(slot))
print()
print(Fore.GREEN + "An example of the input format of user goal:" + Style.RESET_ALL)
example_goal = {"[restaurant]": {"food": "american", "area": "center"}, "hotel": {"stars": "4"}}
print(paser_dict_to_bs(example_goal) + "can be parsed as")
print_paser_dict(example_goal)
has_valid_goal = False
has_finished = False
while not has_valid_goal:
print()
augment_goal = input(Fore.RED + "Enter your user goal or 'r' to randomly generate one:" + Style.RESET_ALL)
if augment_goal.lower() in ['r', 'random']:
input_user_goal = False
break
try:
augment_goal = paser_bs_to_dict(augment_goal)
except:
augment_goal = {}
has_valid_goal = True
for domain, slots in augment_goal.items():
if domain is None or slots is None:
has_valid_goal = False
print("Wrong format!")
if input_user_goal:
# load seed dialogues
save_dialog_turn_info_path = os.path.join(save_output_path, f"dialog_turn_info_train_ratio_{args.train_data_ratio}.json")
f = open(save_dialog_turn_info_path, "r")
dialogs_with_turn_info = json.load(f)
# select in-context examples
augment_goal, aug_dial_id, augment_demos = sample_demo_dialogs(dialogs_with_turn_info, augment_goal, "demo", [], args.k_shot)
augment_dialog_with_turn_info = {}
augment_dialog_with_turn_info["augment_demos"] = augment_demos
augment_dialog_with_turn_info["augment_message"] = ""
augment_dialog_with_turn_info["augment_goal"] = augment_goal
prompt = generate_prompt_for_e2e(augment_dialog_with_turn_info, simplified_prompt=True)
augment_dialog_with_turn_info['prompt'] = prompt
augment_dialogs = {}
augment_dialogs[aug_dial_id] = augment_dialog_with_turn_info
# name
one_dev_str = f"user_input_{args.k_shot}_shot_train_ratio_{args.train_data_ratio}"
if not input_user_goal:
# load the augment dialogs with prompts
assert args.augment_dialog_path is not None
f = open(args.augment_dialog_path, "r")
augment_dialogs = json.load(f) # dict, dict[dial_id] is dialog_dict, dialog_dict[turn_id] is a turn_dict
augment_dialogs = random_dict(augment_dialogs) # shuffle the dict
f.close()
one_dev_str = args.augment_dialog_path.split("/")[-1].split(".json")[0].strip()
for dial_id, dialog_turn_with_info in augment_dialogs.items():
real_goal = {}
dialog_info = {}
dialog_turns = []
turn_id = 0
prompt = dialog_turn_with_info['prompt']
logger.info(dial_id)
logger.info(prompt)
augment_goal = dialog_turn_with_info['augment_goal']
augment_domains = list(augment_goal.keys()) # include [general] domain
user = ""
context = ""
last_da = ""
history = [] # [(user, response)]
domain_queue = []
print()
print(Fore.GREEN + "Start simulation based on: " + Style.RESET_ALL + ", ".join(dial_id.split("_")[1:]))
print(Fore.GREEN + "Original user goal: " + Style.RESET_ALL)
print_paser_dict(augment_goal)
print()
while turn_id < args.max_turn_num:
if pause:
_ = input(Fore.RED + f"Press ENTER to continue simulating turn {turn_id}, or q to quit:" + Style.RESET_ALL)
if _ in ["exit", "q", "quit", "stop"]:
end_of_demo = True
break
print()
print(f" Generation process of turn {turn_id} ".center(window_length, "-"))
print()
total_turn_num += 1
error_turn = False
end_of_dialog = False
turn_info = {}
turn_info["dial_id"] = dial_id
turn_info["turn_num"] = turn_id
turn_goal = {}
turn_domain = []
user = ""
repeat_time = 0
while not user and repeat_time < args.max_repeat_time:
repeat_time += 1
if "[offerbook]" in last_da:
_user_prefix = user_prefix(bs_reform="[general]", user="yes, ")
else:
_user_prefix = user_prefix()
user_with_bs = openai.Completion.create(
engine=args.gpt3_version,
prompt=prompt + "\n" + _user_prefix,
temperature=0.7,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=1.0,
presence_penalty=0,
stop=["Assistant"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
user_with_bs = _user_prefix + user_with_bs # You require([domain] slot_name is slot_value): user utterance
# extract user's utterance
if "):" in user_with_bs and len(user_with_bs.split("):")) == 2:
user = user_with_bs.split("):")[1].strip()
not_mentioned_domain = ""
for d in augment_domains:
if d != '[general]' and d not in real_goal:
not_mentioned_domain = d
break
# if '[general]' in user_with_bs:
# # if gpt-3 tries to end the conversation before mentioning all the domain, add a start sequence
# for d in augment_domains:
# if d != '[general]' and d not in real_goal:
# not_mentioned_domain = d
# break
# # if there is domain that hasn't been mentioned, regenerate the user utterance requiring the not mentioned domain
# if not_mentioned_domain:
# pass
# else:
# end_of_dialog = True
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("GPT-3 generated user turn") + Style.RESET_ALL + f"{user_with_bs}" )
# extract gpt3_bs_reform and verifier_bs_reform
if "require(" in user_with_bs:
gpt3_bspn_reform = user_with_bs.split("require(")[1].split("):")[0].strip()
gpt3_turn_goal = paser_bs_reform_to_dict(gpt3_bspn_reform)
# if debug: print(Fore.GREEN + f"GPT-3 predicted belief text: {gpt3_turn_goal}" + Style.RESET_ALL)
else:
gpt3_bspn_reform = ""
gpt3_turn_goal = {}
# turn_info["bspn_gpt3_current_turn"] = paser_dict_to_bs(gpt3_turn_goal)
"""
Start to interact with TOD !!!
"""
if verify_bs or verify_da:
# construct context_ids
# user = '<sos_u> {} <eos_u>'.format(user)
context = context + ' ' + '<sos_u> {} <eos_u>'.format(user)
context_ids = data.tokenizer.convert_tokens_to_ids(data.tokenizer.tokenize(context))
# construct bs input ids
one_bs_token_id_input = data.bs_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id]
batch_bs_token_id_input = [one_bs_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate bs using debugged tod
batch_generated_bs = e2e_batch_interactive_generate(model, 'bs', batch_bs_token_id_input, data)
one_bs_text = batch_generated_bs[0]
gen_goal = paser_bs_to_dict(one_bs_text)
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Verifier generated belief state") + Style.RESET_ALL + f"{one_bs_text}")
# print(Fore.RED + f"Predicted belief text: {gen_goal}" + Style.RESET_ALL)
# record turn info
turn_info["bspn_verifier"] = one_bs_text
"""
determine turn_domain, priority: [general] > gpt3 > debugged tod
"""
if '[general]' in gpt3_bspn_reform:
turn_domain = ['[general]']
else: # detect if there is new domain, if not, return the most recently mentioned domain
turn_domain = get_turn_domain(gpt3_bspn_reform, domain_queue)
# if debug: print(f"Predicted domain: {turn_domain}")
# record turn info
turn_info["turn_domain"] = turn_domain
turn_info["dspn"] = turn_domain
"""
Start analyzing the bs generated by GPT-3 and verifier
"""
for domain in turn_domain:
"""
obtain current domain_bs/turn_bs
"""
if domain in real_goal:
domain_bs = copy.deepcopy(real_goal[domain])
else:
domain_bs = {}
if domain in turn_goal:
turn_bs = copy.deepcopy(turn_goal[domain])
else:
turn_bs = {}
"""
determine bs and update real_goal/turn_goal based on the multi-turn prediction of debugged TOD
"""
if verify_bs:
if domain in gen_goal:
gen_domain_bs = copy.deepcopy(gen_goal[domain])
else:
gen_domain_bs = {}
for slot_name, slot_value in gen_domain_bs.items():
# check if the slot appears in user's utterance of this turn
mentioned_in_this_turn = False
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in user.split() or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user:
mentioned_in_this_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in user:
mentioned_in_this_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in user.split() or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user)) or (len(slot_value)>1 and slot_value in user)):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in user for _ in ['wifi', 'internet']]) and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in user and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif any([_ in user for _ in ["same"]]): # deal with in the same group, in the same place, don't care situation
appear_time = 0
for d, b in gen_goal.items():
for s in b.values():
if s == slot_value:
appear_time += 1
if appear_time >= 2: # should appear at least 2 times
mentioned_in_this_turn = True
elif slot_value in ['dont care', "don't care", "do nt care", "doesn't care", "dontcare"] or "care" in slot_value:
mentioned_in_this_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in user.split() or norm_slot_value+"." in user or norm_slot_value+"," in user or norm_slot_value+"?" in user)) or (len(norm_slot_value)>1 and norm_slot_value in user)):
mentioned_in_this_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in user.split() or typo+"." in user or typo+"," in user or typo+"?" in user)) or (len(typo)>1 and typo in user)):
mentioned_in_this_turn = True
break
# check if this slot was mentioned in last turn
mentioned_in_last_turn = False
if history:
last_user, last_response = history[-1]
last_user += " " + last_response
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in last_user.split() or slot_value+"." in last_user or slot_value+"," in last_user or slot_value+"?" in last_user:
mentioned_in_last_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in last_user:
mentioned_in_last_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in last_user.split() or slot_value+"." in last_user or slot_value+"," in last_user or slot_value+"?" in last_user)) or (len(slot_value)>1 and slot_value in last_user)):
mentioned_in_last_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in last_user for _ in ['wifi', 'internet']]) and not any([_ in last_user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_last_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in last_user and not any([_ in last_user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_last_turn = True
elif any([_ in last_user for _ in ["same"]]): # deal with in the same group, in the same place, don't care situation
appear_time = 0
for d, b in gen_goal.items():
for s in b.values():
if s == slot_value:
appear_time += 1
if appear_time >= 2: # should appear at least 2 times
mentioned_in_last_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in last_user.split() or norm_slot_value+"." in last_user or norm_slot_value+"," in last_user or norm_slot_value+"?" in last_user)) or (len(norm_slot_value)>1 and norm_slot_value in last_user)):
mentioned_in_last_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in last_user.split() or typo+"." in last_user or typo+"," in last_user or typo+"?" in last_user)) or (len(typo)>1 and typo in last_user)):
mentioned_in_last_turn = True
break
if mentioned_in_this_turn: # can update in domain_bs and turn_bs
# check if this slot is in this domain
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
turn_bs[slot_name] = slot_value
if mentioned_in_last_turn: # can only update in domain_bs, not turn_bs!
# check if this slot is in this domain
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
"""
update real_goal/turn_goal based on the single turn prediction of GPT-3 and verifier
"""
predicted_turn_goals = []
predicted_turn_goals.append(gpt3_turn_goal)
# start analyzing domain_bs_text
for predicted_turn_goal in predicted_turn_goals:
if predicted_turn_goal and domain in predicted_turn_goal:
predicted_domain_bs = predicted_turn_goal[domain]
for slot_name, slot_value in predicted_domain_bs.items():
mentioned_in_this_turn = False
# check if the slot appears in user's utterance of this turn
user_words = [word.strip() for word in user.split()]
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in user_words or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user:
mentioned_in_this_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in user:
mentioned_in_this_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in user_words or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user)) or (len(slot_value)>1 and slot_value in user)):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in user for _ in ['wifi', 'internet']]) and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in user and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value in ['dont care', "don't care", "do nt care", "doesn't care", "dontcare"] or any([_ in slot_value for _ in ["care"]]):
mentioned_in_this_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in user.split() or norm_slot_value+"." in user or norm_slot_value+"," in user or norm_slot_value+"?" in user)) or (len(norm_slot_value)>1 and norm_slot_value in user)):
mentioned_in_this_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in user.split() or typo+"." in user or typo+"," in user or typo+"?" in user)) or (len(typo)>1 and typo in user)):
mentioned_in_this_turn = True
break
if mentioned_in_this_turn:
# check the slots valid before updating the tracked bs
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
turn_bs[slot_name] = slot_value
else:
print(f"Slot {slot_name}: {slot_value} not in user utterance: {user}")
"""
update real_goal and turn_goal, based on the prediction of TOD, gpt-3 and verifier
"""
real_goal[domain] = domain_bs
turn_goal[domain] = turn_bs
"""
evaluate the difference between gpt-3 generated goal with real goal
"""
gpt3_turn_goal_list = paser_dict_to_list(gpt3_turn_goal)
turn_goal_list = paser_dict_to_list(turn_goal)
"""
reconstruct the generated user_with_bs
"""
bs_text = paser_dict_to_bs(reverse_dict(real_goal))
reform_bs_text = paser_dict_to_bs_reform(reverse_dict(real_goal))
bsdx_text = paser_dict_to_bsdx(reverse_dict(real_goal))
reform_bsdx_text = paser_dict_to_bsdx_reform(reverse_dict(real_goal))
# correct belief state
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised belief state") + Style.RESET_ALL + f"{bs_text}")
# correct belief state
one_bs_text = bs_text
# record turn info
turn_info["bspn"] = bs_text
turn_info["bsdx"] = bsdx_text
turn_info["bspn_reform"] = reform_bs_text
turn_info["bsdx_reform"] = reform_bsdx_text
# bs for this turn, for gpt-3
turn_bs_text = paser_dict_to_bs_reform(turn_goal, ignore_none_bs=False)
# record turn info
turn_info['user'] = user
# ignore the delex process
turn_info['usdx'] = user
"""
The format in the prompt, should be consistent with the generate_prompt function
Example:
You require([taxi] destination is pizza hut fen ditton , departure is saint john 's college): i would like a taxi from saint john 's college to pizza hut fen ditton .
"""
# update prompt
user_text = user_prefix(turn_bs_text, user) # You require(turn_bs_text): user
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised user turn") + Style.RESET_ALL +f"{user_text}")
prompt += "\n" + user_text
logger.info("\n" + user_text)
"""
Continue the generation of dialog action and response, given the correct belief state !!!!!!
"""
one_queried_db_result = data.reader.bspan_to_DBpointer(one_bs_text, turn_domain)
db_pointer = convert_db_to_pointer(one_queried_db_result)
turn_info["db"] = one_queried_db_result
turn_info["pointer"] = db_pointer
# whether we need to query the db base
if input_contain_db:
# record turn info
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("DB query result") + Style.RESET_ALL + f"{one_queried_db_result}")
one_db_text = '<sos_db> ' + one_queried_db_result + ' <eos_db>'
one_db_token_id_input = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(one_db_text))
else:
one_db_token_id_input = []
# record turn info
one_da_text = ""
if verify_da:
# then we generate the dialogue action
one_da_token_id_input = data.da_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input
batch_da_token_id_input = [one_da_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate da
batch_generated_da = e2e_batch_interactive_generate(model, 'da', batch_da_token_id_input, data)
one_da_text = batch_generated_da[0]
one_da_token_id_output = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(one_da_text))
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Verifier generated dialog act") + Style.RESET_ALL + f"{one_da_text}")
if one_da_text:
# finally we generate the response
if not cascaded: # only needs db
one_nlg_token_id_input = data.nlg_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input
else: # needs db and da
one_nlg_token_id_input = data.nlg_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input + one_da_token_id_output
batch_nlg_token_id_input = [one_nlg_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate nlg
batch_generated_nlg = e2e_batch_interactive_generate(model, 'nlg', batch_nlg_token_id_input, data)
one_nlg_text = batch_generated_nlg[0]
# if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Verifier generated Response") + Style.RESET_ALL + f"{one_nlg_text}")
# record turn info
turn_info["aspn"] = one_da_text
turn_info["aspn_verifier"] = one_da_text
turn_info["aspn_reform"] = one_da_text
# using gpt-3 generation
system_based_on_da = openai.Completion.create(
# engine="text-davinci-002",
engine=args.gpt3_version,
prompt=prompt + "\n" + system_prefix(one_da_text),
temperature=0.8,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=1.0,
presence_penalty=0,
stop=["You require"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
# make sure that the entity is proposed.
if '_name' in one_nlg_text and '_name' not in system_based_on_da:
system_based_on_da = one_nlg_text
# record turn info
turn_info["resp"] = system_based_on_da
turn_info["resp_verifier"] = one_nlg_text
prompt += "\n" + system_prefix(one_da_text, system_based_on_da)
logger.info("\n" + system_prefix(one_da_text, system_based_on_da))
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised system turn") + Style.RESET_ALL + f"{system_prefix(one_da_text, system_based_on_da)}")
# determine if it is the end
if ("[bye]" in one_da_text or "[welcome]" in one_da_text) and not not_mentioned_domain:
end_of_dialog = True
if not one_da_text:
# using gpt-3 generation
system_based_on_da = ""
repeat_time = 0
while not system_based_on_da and repeat_time < args.max_repeat_time:
repeat_time += 1
system_with_da = openai.Completion.create(
# engine="text-davinci-002",
engine=args.gpt3_version,
prompt=prompt + "\n" + system_prefix(),
temperature=0.7,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["You require"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
system_with_da = system_prefix() + system_with_da
if "):" in system_with_da and len(system_with_da.split("):")) == 2:
system_based_on_da = system_with_da.split("):")[1].strip()
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("GPT-3 generated system turn") + Style.RESET_ALL +f"{system_with_da}")
# extract gpt3_da_reform
if "Assistant(" in system_with_da:
gpt3_aspn_reform = system_with_da.split("Assistant(")[1].split("):")[0].strip()
if debug: print(Fore.LIGHTYELLOW_EX + f"GPT-3 generated dialog act: {gpt3_aspn_reform}" + Style.RESET_ALL)
else:
gpt3_aspn_reform = ""
# record turn info
last_da = gpt3_aspn_reform
turn_info["aspn"] = gpt3_aspn_reform
# turn_info["aspn_gen"] = one_da_text
turn_info["aspn_reform"] = gpt3_aspn_reform
# record turn info
turn_info["resp"] = system_based_on_da
# turn_info["resp_gen"] = one_nlg_text
prompt += "\n" + system_prefix(gpt3_aspn_reform, system_based_on_da)
logger.info("\n" + system_prefix(gpt3_aspn_reform, system_based_on_da))
# determine if it is the end
if ("[bye]" in gpt3_aspn_reform or "[welcome]" in gpt3_aspn_reform) and not not_mentioned_domain:
end_of_dialog = True
# add response to context
system = '<sos_r> {} <eos_r>'.format(turn_info["resp"])
context = context + ' ' + system
# add it to history
history.append((turn_info["user"], turn_info["resp"]))
# Print generated response
print()
print(f" Conversation of turn {turn_id} ".center(window_length, "-"))
print()
print(Fore.GREEN + "{:<28}>> ".format("User") + Style.RESET_ALL + f"{turn_info['user']}" )
print(Fore.GREEN + "{:<28}>> ".format("System") + Style.RESET_ALL + f"{turn_info['resp']}" )
print()
print("-"*window_length)
print()
# rearrange the orders and record this turn's info
dialog_turns.append(turn_info)
turn_id += 1
# determine whether to end this dialog
if end_of_dialog:
break
# record this dialog's info
dialog_info['dial_id'] = dial_id
dialog_info['turns'] = dialog_turns
dialog_info['prompt'] = dialog_turn_with_info['prompt']
dialog_info['goal'] = real_goal
all_dialogs.append(dialog_info)
print(f"Dialogue {dial_id} simulation finished !!!")
print(Fore.GREEN + f"Final user goal:" + Style.RESET_ALL)
print_paser_dict(real_goal)
print()
# save dialogs
if args.save:
save_dialogs(args, all_dialogs, one_dev_str)
# only demonstrate one dialogue once
break
if not end_of_demo:
print()
_ = input(Fore.RED + f"Press ENTER to continue simulating next dialog, or q to quit:" + Style.RESET_ALL)
if _ in ["exit", "q", "quit", "stop"]:
end_of_demo = True
break
print()
# save dialogs
if args.save:
save_dialogs(args, all_dialogs, one_dev_str)
print(f"Simulate {len(all_dialogs)} dialogs, {total_turn_num} turns in total.")
| [
"\nPLACEHOLDER",
"\n",
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | Leezekun/dialogic | code~pptod~E2E_TOD~dialogic_simulation.py | import os
import random
import json
import time
import numpy as np
import os
import sys
import random
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import progressbar
import argparse
from eval import MultiWozEvaluator
from transformers import *
import openai
import re
import copy
import pprint
import logging
import time
from colorama import Fore, Back, Style
from dialogic_utils import system_prefix, user_prefix
from dialogic_utils import *
window_length = 120
def get_checkpoint_name(prefix):
file_names = os.listdir(prefix)
selected_name = ""
for name in file_names:
if name.startswith('epoch'):
if 'best' in name:
print (name)
return name
selected_name = name
print (selected_name)
return selected_name
def create_logger(args):
"""
print the logs to console and file
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
if not os.path.exists(args.log_path):
os.makedirs("/".join(args.log_path.split("/")[:-2]), exist_ok=True)
# create a handler to write logs in files
file_handler = logging.FileHandler(
filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
return logger
def parse_config():
parser = argparse.ArgumentParser()
# dataset configuration
parser.add_argument('--data_path_prefix', type=str, help='The path where the data stores.')
parser.add_argument('--data_version', type=str, help='The version of used multiwoz data, 2.0, 2.1, 2.3, 2.4')
# the configuration of verifier
parser.add_argument('--shuffle_mode', type=str, default='shuffle_session_level',
help="shuffle_session_level or shuffle_turn_level, it controls how we shuffle the training data.")
parser.add_argument('--use_db_as_input', type=str, default='False',
help="True or False, whether includes db result as part of the input when generating response.")
parser.add_argument('--cascaded', type=str, default='False',
help="True or False, whether includes action when generating response.")
parser.add_argument('--add_prefix', type=str, default='True',
help="True or False, whether we add prefix when we construct the input sequence.")
parser.add_argument('--add_special_decoder_token', default='True', type=str, help='Whether we discriminate the decoder start and end token for different tasks.')
parser.add_argument('--pretrained_path', type=str, default='None', help='the path that stores pretrained checkpoint.')
parser.add_argument('--train_data_ratio', type=float, default=1.0, help='the ratio of training data used for training the model')
# model configuration
parser.add_argument('--model_name', type=str, help='t5-small or t5-base or t5-large')
parser.add_argument('--gpt3_version', type=str, default='text-davinci-002', help='options: [text-davinci-002, text-davinci-001, text-curie-001, text-babbage-001, or text-ada-001]')
# simulation configuration
parser.add_argument('--max_aug_time', type=int, default=1, help='the size of augment data: x original data size.')
parser.add_argument('--max_dialog_num', type=int, default=0, help='the maximum dialog using gpt-3, if 0 depends on max_aug_size')
parser.add_argument('--max_turn_num', type=int, default=10, help='the maximum turns of each dialog.')
parser.add_argument('--max_repeat_time', type=int, default=3, help='the maximum time of repeat generation of GPT-3.')
# verifier
parser.add_argument('--verify_bs', type=str, default='True', help='only simulate user.')
parser.add_argument('--verify_da', type=str, default='True', help='simulate both user and system.')
parser.add_argument('--n_user', type=int, default=1, help='how many user utterances for each bs, 1 means no rewrite.')
parser.add_argument('--n_system', type=int, default=1, help='how many system response for each da, 1 means no rewrite.')
# data
parser.add_argument('--augment_dialog_path', type=str, default=None, help='the path that stores the error test cases in dst.')
parser.add_argument("--output_save_path", type=str, help="directory to save the model output.")
parser.add_argument('--log_path', type=str, help='the path that stores the log information.')
# debug
parser.add_argument('--debug', default='True', type=str, help='Whether to print in the process.')
parser.add_argument('--save', default='True', type=str, help='Whether to save the interaction results.')
return parser.parse_args()
def convert_db_to_pointer(text):
pointer_id = re.findall("\d+", text)
pointer = [0] * 7
if pointer_id:
pointer_id = int(pointer_id[0])
pointer[pointer_id] = 1
return pointer
def get_turn_domain(text, q):
if isinstance(text, str):
texts = [text]
elif isinstance(text, list):
texts = text
else:
raise Exception("Wrong text when extracting turn domain!")
from ontology import all_domains
for text in texts:
domains = re.findall(r"\[.+?\]", text)
for domain in domains:
if domain not in q and domain[1:-1] in all_domains:
q.append(domain)
turn_domain = q[-1:]
return turn_domain
return q[-1:]
def save_dialogs(args, all_dialogs, one_dev_str):
output_save_path = os.path.join(args.output_save_path, one_dev_str + f'_simulation_result.json')
if os.path.exists(args.output_save_path):
pass
else: # recursively construct directory
os.makedirs(args.output_save_path, exist_ok=True)
# rearrange the order of keys of the dialogue data
for dialog in all_dialogs:
dialog_turns = dialog["turns"]
new_dialog_turns = [rearrange_dict(turn) for turn in dialog_turns]
dialog["turns"] = new_dialog_turns
# save the dialogs
with open(output_save_path, 'w') as outfile:
json.dump(all_dialogs, outfile, indent=4)
print(f"Saving dialogues, current num: {len(all_dialogs)}!")
def load_dialogs(args, one_dev_str):
output_save_path = os.path.join(args.output_save_path, one_dev_str + f'_simulation_result.json')
if not os.path.exists(output_save_path):
print(f"No dialogues so far in {output_save_path}!")
return []
with open(output_save_path, 'r') as inputfile:
all_dialogs = json.load(inputfile)
print(f"Loading dialogues, current num: {len(all_dialogs)}!")
return all_dialogs
import argparse
if __name__ == '__main__':
if torch.cuda.is_available():
print ('Cuda is available.')
cuda_available = torch.cuda.is_available()
multi_gpu_training = False
if cuda_available:
if torch.cuda.device_count() > 1:
multi_gpu_training = True
print ('Using Multi-GPU training, number of GPU is {}'.format(torch.cuda.device_count()))
else:
print ('Using single GPU training.')
else:
pass
args = parse_config()
logger = create_logger(args)
device = torch.device('cuda')
print ('Start loading data...')
from dataclass import MultiWozData
if args.data_version == "2.0":
from config import Config
elif args.data_version == "2.1":
from config21 import Config
elif args.data_version == "2.3":
from config23 import Config
elif args.data_version == "2.4":
from config24 import Config
else:
raise Exception("Wrong MultiWOZ version!")
cfg = Config(args.data_path_prefix)
assert args.model_name.startswith('t5')
from transformers import T5Tokenizer
if args.use_db_as_input == 'True':
use_db_as_input = True
elif args.use_db_as_input == 'False':
use_db_as_input = False
else:
raise Exception('Wrong Use DB Mode!!!')
if args.cascaded == 'True':
cascaded = True
elif args.cascaded == 'False':
cascaded = False
else:
raise Exception('Wrong Use Cascaded Mode!!!')
if args.verify_bs == 'True':
verify_bs = True
elif args.verify_bs == 'False':
verify_bs = False
else:
raise Exception('Wrong verify_bs Mode!!!')
if args.verify_da == 'True':
verify_da = True
elif args.verify_da == 'False':
verify_da = False
else:
raise Exception('Wrong verify_da Mode!!!')
if args.add_prefix == 'True':
add_prefix = True
elif args.add_prefix == 'False':
add_prefix = False
else:
raise Exception('Wrong Prefix Mode!!!')
if args.add_special_decoder_token == 'True':
add_special_decoder_token = True
elif args.add_special_decoder_token == 'False':
add_special_decoder_token = False
else:
raise Exception('Wrong Add Special Token Mode!!!')
if args.debug == 'True':
debug = True
elif args.debug == 'False':
debug = False
else:
raise Exception('Wrong debug Mode!!!')
if args.pretrained_path != 'None':
ckpt_name = get_checkpoint_name(args.pretrained_path)
pretrained_path = args.pretrained_path + '/' + ckpt_name
if args.pretrained_path != 'None':
print ('Loading Pretrained Tokenizer...')
tokenizer = T5Tokenizer.from_pretrained(pretrained_path)
else:
tokenizer = T5Tokenizer.from_pretrained(args.model_name)
data = MultiWozData(args.model_name, tokenizer, cfg, args.data_path_prefix, shuffle_mode=args.shuffle_mode,
data_mode='interact', data_version=args.data_version, use_db_as_input=use_db_as_input, cascaded=cascaded, add_special_decoder_token=add_special_decoder_token,
train_data_ratio=args.train_data_ratio)
print ('Data loaded')
evaluator = MultiWozEvaluator(data.reader, cfg)
print ('Start loading model...')
if verify_bs or verify_da:
assert args.model_name.startswith('t5')
from modelling.T5Model import T5Gen_Model
if args.pretrained_path != 'None':
model = T5Gen_Model(pretrained_path, data.tokenizer, data.special_token_list, dropout=0.0,
add_special_decoder_token=add_special_decoder_token, is_training=True)
else:
model = T5Gen_Model(args.model_name, data.tokenizer, data.special_token_list, dropout=0.0,
add_special_decoder_token=add_special_decoder_token, is_training=True)
if cuda_available:
if multi_gpu_training:
model = nn.DataParallel(model) # multi-gpu training
else:
pass
model = model.to(device)
else:
pass
model.eval()
print ('Model loaded')
from e2e_inference_utlis import e2e_batch_interactive_generate
with torch.no_grad():
input_contain_db=use_db_as_input
"""
interact with system (verifier) using GPT-3
"""
# openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
# load the augment dialogs with prompts
assert args.augment_dialog_path is not None
f = open(args.augment_dialog_path, "r")
augment_dialogs = json.load(f) # dict, dict[dial_id] is dialog_dict, dialog_dict[turn_id] is a turn_dict
augment_dialogs = random_dict(augment_dialogs) # shuffle the dict
f.close()
# save file name
one_dev_str = args.augment_dialog_path.split("/")[-1].split(".json")[0].strip()
# record all the interaction dialogs
all_dialogs = load_dialogs(args, one_dev_str)
# num_augment_dialogs = 0
num_augment_dialogs = len(all_dialogs)
# record the dst generation performance of gpt-3
total_turn_num, over_gen_turn_num, de_gen_turn_num = 0, 0, 0
def not_aug_time_satisfied(all_dialogs, augment_dialogs, max_aug_time):
# initialize aug time for each dialog
dialog_aug_times = {}
# only keep the valid dialogs
for dial_id, dialog_turn_with_info in augment_dialogs.items():
dialog_aug_times[dial_id] = 0
# update aug time for each dialog
for dialog in all_dialogs:
dial_id = dialog['dial_id']
if dial_id in dialog_aug_times:
dialog_aug_times[dial_id] += 1
else:
pass
# raise Exception("Out of training set dialogs!")
# check whether each dialog has been interacted
not_satisfied_dials = []
for dial_id, dial_aug_time in dialog_aug_times.items():
if dial_aug_time < max_aug_time:
not_satisfied_dials.append(dial_id)
elif dial_aug_time > max_aug_time:
raise Exception(f"Exceed max aug time: {dial_id}")
else:
pass
return not_satisfied_dials
while not_aug_time_satisfied(all_dialogs, augment_dialogs, args.max_aug_time):
for dial_id, dialog_turn_with_info in augment_dialogs.items():
if dial_id not in not_aug_time_satisfied(all_dialogs, augment_dialogs, args.max_aug_time):
print(f"Dialogue {dial_id} already exist!")
continue
real_goal = {}
dialog_info = {}
dialog_turns = []
turn_id = 0
print()
print(f"Start interaction based on {dial_id}!")
prompt = dialog_turn_with_info['prompt']
logger.info(dial_id)
logger.info(prompt)
augment_goal = dialog_turn_with_info['augment_goal']
augment_domains = list(augment_goal.keys()) # include [general] domain
user = ""
context = ""
last_da = ""
history = [] # [(user, response)]
domain_queue = []
while turn_id < args.max_turn_num:
print()
print(f" Generation process of turn {turn_id} ".center(window_length, "-"))
print()
total_turn_num += 1
error_turn = False
end_of_dialog = False
turn_info = {}
turn_info["dial_id"] = dial_id
turn_info["turn_num"] = turn_id
turn_goal = {}
turn_domain = []
user = ""
repeat_time = 0
while not user and repeat_time < args.max_repeat_time:
repeat_time += 1
if "[offerbook]" in last_da: # ensure booking
_user_prefix = user_prefix(bs_reform="[general]", user="yes, ")
else:
_user_prefix = user_prefix()
user_with_bs = openai.Completion.create(
engine=args.gpt3_version,
prompt=prompt + "\n" + _user_prefix,
temperature=0.7,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=1.0,
presence_penalty=0,
stop=["Assistant"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
user_with_bs = _user_prefix + user_with_bs # You require([domain] slot_name is slot_value): user utterance
# extract user's utterance
if "):" in user_with_bs and len(user_with_bs.split("):")) == 2:
user = user_with_bs.split("):")[1].strip()
not_mentioned_domain = ""
for d in augment_domains:
if d != '[general]' and d not in real_goal:
not_mentioned_domain = d
break
# if '[general]' in user_with_bs:
# # if gpt-3 tries to end the conversation before mentioning all the domain, add a start sequence
# not_mentioned_domain = ""
# for d in augment_domains:
# if d != '[general]' and d not in real_goal:
# not_mentioned_domain = d
# break
# if there is domain that hasn't been mentioned, regenerate the user utterance requiring the not mentioned domain
# if not_mentioned_domain:
# pass
# else:
# end_of_dialog = True
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("GPT-3 generated user turn") + Style.RESET_ALL + f"{user_with_bs}" )
# extract gpt3_bs_reform and verifier_bs_reform
if "require(" in user_with_bs:
gpt3_bspn_reform = user_with_bs.split("require(")[1].split("):")[0].strip()
gpt3_turn_goal = paser_bs_reform_to_dict(gpt3_bspn_reform)
# if debug: print(Fore.GREEN + f"GPT-3 predicted belief text: {gpt3_turn_goal}" + Style.RESET_ALL)
else:
gpt3_bspn_reform = ""
gpt3_turn_goal = {}
# turn_info["bspn_gpt3_current_turn"] = paser_dict_to_bs(gpt3_turn_goal)
"""
Start to interact with TOD !!!
"""
if verify_bs or verify_da:
# construct context_ids
# user = '<sos_u> {} <eos_u>'.format(user)
context = context + ' ' + '<sos_u> {} <eos_u>'.format(user)
context_ids = data.tokenizer.convert_tokens_to_ids(data.tokenizer.tokenize(context))
# construct bs input ids
one_bs_token_id_input = data.bs_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id]
batch_bs_token_id_input = [one_bs_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate bs using debugged tod
batch_generated_bs = e2e_batch_interactive_generate(model, 'bs', batch_bs_token_id_input, data)
one_bs_text = batch_generated_bs[0]
gen_goal = paser_bs_to_dict(one_bs_text)
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Verifier generated belief state") + Style.RESET_ALL + f"{one_bs_text}")
# print(Fore.BLUE + f"Predicted belief text: {gen_goal}" + Style.RESET_ALL)
# record turn info
# turn_info["bspn_verifier"] = one_bs_text
"""
determine turn_domain, priority: [general] > gpt3 > debugged tod
"""
if '[general]' in gpt3_bspn_reform:
turn_domain = ['[general]']
else: # detect if there is new domain, if not, return the most recently mentioned domain
turn_domain = get_turn_domain(gpt3_bspn_reform, domain_queue)
# if debug: print(f"Predicted domain: {turn_domain}")
# record turn info
turn_info["turn_domain"] = turn_domain
turn_info["dspn"] = turn_domain
"""
Start analyzing the bs generated by GPT-3 and verifier
"""
for domain in turn_domain:
"""
obtain current domain_bs/turn_bs
"""
if domain in real_goal:
domain_bs = copy.deepcopy(real_goal[domain])
else:
domain_bs = {}
if domain in turn_goal:
turn_bs = copy.deepcopy(turn_goal[domain])
else:
turn_bs = {}
"""
determine bs and update real_goal/turn_goal based on the multi-turn prediction of debugged TOD
"""
if verify_bs:
if domain in gen_goal:
gen_domain_bs = copy.deepcopy(gen_goal[domain])
else:
gen_domain_bs = {}
for slot_name, slot_value in gen_domain_bs.items():
# check if the slot appears in user's utterance of this turn
mentioned_in_this_turn = False
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in user.split() or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user:
mentioned_in_this_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in user:
mentioned_in_this_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in user.split() or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user)) or (len(slot_value)>1 and slot_value in user)):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in user for _ in ['wifi', 'internet']]) and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in user and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif any([_ in user for _ in ["same"]]): # deal with in the same group, in the same place, don't care situation
appear_time = 0
for d, b in gen_goal.items():
for s in b.values():
if s == slot_value:
appear_time += 1
if appear_time >= 2: # should appear at least 2 times
mentioned_in_this_turn = True
elif slot_value in ['dont care', "don't care", "do nt care", "doesn't care", "dontcare"] or "care" in slot_value:
mentioned_in_this_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in user.split() or norm_slot_value+"." in user or norm_slot_value+"," in user or norm_slot_value+"?" in user)) or (len(norm_slot_value)>1 and norm_slot_value in user)):
mentioned_in_this_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in user.split() or typo+"." in user or typo+"," in user or typo+"?" in user)) or (len(typo)>1 and typo in user)):
mentioned_in_this_turn = True
break
# check if this slot was mentioned in last turn
mentioned_in_last_turn = False
if history:
last_user, last_response = history[-1]
last_user += " " + last_response
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in last_user.split() or slot_value+"." in last_user or slot_value+"," in last_user or slot_value+"?" in last_user:
mentioned_in_last_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in last_user:
mentioned_in_last_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in last_user.split() or slot_value+"." in last_user or slot_value+"," in last_user or slot_value+"?" in last_user)) or (len(slot_value)>1 and slot_value in last_user)):
mentioned_in_last_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in last_user for _ in ['wifi', 'internet']]) and not any([_ in last_user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_last_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in last_user and not any([_ in last_user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_last_turn = True
elif any([_ in last_user for _ in ["same"]]): # deal with in the same group, in the same place, don't care situation
appear_time = 0
for d, b in gen_goal.items():
for s in b.values():
if s == slot_value:
appear_time += 1
if appear_time >= 2: # should appear at least 2 times
mentioned_in_last_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in last_user.split() or norm_slot_value+"." in last_user or norm_slot_value+"," in last_user or norm_slot_value+"?" in last_user)) or (len(norm_slot_value)>1 and norm_slot_value in last_user)):
mentioned_in_last_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in last_user.split() or typo+"." in last_user or typo+"," in last_user or typo+"?" in last_user)) or (len(typo)>1 and typo in last_user)):
mentioned_in_last_turn = True
break
if mentioned_in_this_turn: # can update in domain_bs and turn_bs
# check if this slot is in this domain
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
turn_bs[slot_name] = slot_value
if mentioned_in_last_turn: # can only update in domain_bs, not turn_bs!
# check if this slot is in this domain
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
"""
update real_goal/turn_goal based on the single turn prediction of GPT-3 and verifier
"""
predicted_turn_goals = []
predicted_turn_goals.append(gpt3_turn_goal)
# start analyzing domain_bs_text
for predicted_turn_goal in predicted_turn_goals:
if predicted_turn_goal and domain in predicted_turn_goal:
predicted_domain_bs = predicted_turn_goal[domain]
for slot_name, slot_value in predicted_domain_bs.items():
mentioned_in_this_turn = False
# check if the slot appears in user's utterance of this turn
user_words = [word.strip() for word in user.split()]
if len(slot_value)==1 and slot_value.isdigit():
if slot_value in user_words or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user:
mentioned_in_this_turn = True
else:
if slot_value in num2word:
if num2word[slot_value] in user:
mentioned_in_this_turn = True
elif slot_value != "yes" and ((len(slot_value)==1 and (slot_value in user_words or slot_value+"." in user or slot_value+"," in user or slot_value+"?" in user)) or (len(slot_value)>1 and slot_value in user)):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'internet' and any([_ in user for _ in ['wifi', 'internet']]) and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value == "yes" and slot_name == 'parking' and 'parking' in user and not any([_ in user for _ in ["don't", "donot", "don 't", "dont", "doesn't"]]):
mentioned_in_this_turn = True
elif slot_value in ['dont care', "don't care", "do nt care", "doesn't care", "dontcare"] or any([_ in slot_value for _ in ["care"]]):
mentioned_in_this_turn = True
else:
for norm_slot_value, typo in GENERAL_TYPO.items():
if slot_value == typo:
if ((len(norm_slot_value)==1 and (norm_slot_value in user.split() or norm_slot_value+"." in user or norm_slot_value+"," in user or norm_slot_value+"?" in user)) or (len(norm_slot_value)>1 and norm_slot_value in user)):
mentioned_in_this_turn = True
break
if slot_value == norm_slot_value:
if ((len(typo)==1 and (typo in user.split() or typo+"." in user or typo+"," in user or typo+"?" in user)) or (len(typo)>1 and typo in user)):
mentioned_in_this_turn = True
break
if mentioned_in_this_turn:
# check the slots valid before updating the tracked bs
from ontology import informable_slots
domain_pure_text = domain.replace("[","").replace("]", "").strip() # [taxi] -> taxi
if domain_pure_text in informable_slots:
if slot_name in informable_slots[domain_pure_text]:
if slot_value and not re.findall(r"\[.+?\]", slot_value):
domain_bs[slot_name] = slot_value
turn_bs[slot_name] = slot_value
else:
print(f"Slot {slot_name}: {slot_value} not in user utterance: {user}")
"""
update real_goal and turn_goal, based on the prediction of TOD, gpt-3 and verifier
"""
real_goal[domain] = domain_bs
turn_goal[domain] = turn_bs
"""
evaluate the difference between gpt-3 generated goal with real goal
"""
gpt3_turn_goal_list = paser_dict_to_list(gpt3_turn_goal)
turn_goal_list = paser_dict_to_list(turn_goal)
for i in turn_goal_list:
if i not in gpt3_turn_goal_list:
de_gen_turn_num += 1
break
for i in gpt3_turn_goal_list:
if i not in turn_goal_list:
over_gen_turn_num += 1
break
"""
reconstruct the generated user_with_bs
"""
bs_text = paser_dict_to_bs(reverse_dict(real_goal))
reform_bs_text = paser_dict_to_bs_reform(reverse_dict(real_goal))
bsdx_text = paser_dict_to_bsdx(reverse_dict(real_goal))
reform_bsdx_text = paser_dict_to_bsdx_reform(reverse_dict(real_goal))
# correct belief state
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised belief state") + Style.RESET_ALL + f"{bs_text}")
# correct belief state
one_bs_text = bs_text
# record turn info
turn_info["bspn"] = bs_text
turn_info["bsdx"] = bsdx_text
turn_info["bspn_reform"] = reform_bs_text
turn_info["bsdx_reform"] = reform_bsdx_text
# bs for this turn, for gpt-3
turn_bs_text = paser_dict_to_bs_reform(turn_goal, ignore_none_bs=False)
# record turn info
turn_info['user'] = user
# ignore the delex process
turn_info['usdx'] = user
"""
The format in the prompt, should be consistent with the generate_prompt function
Example:
You require([taxi] destination is pizza hut fen ditton , departure is saint john 's college): i would like a taxi from saint john 's college to pizza hut fen ditton .
"""
# update prompt
user_text = user_prefix(turn_bs_text, user) # You require(turn_bs_text): user
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised user turn") + Style.RESET_ALL +f"{user_text}")
prompt += "\n" + user_text
logger.info("\n" + user_text)
"""
Continue the generation of dialog action and response, given the correct belief state !!!!!!
"""
one_queried_db_result = data.reader.bspan_to_DBpointer(one_bs_text, turn_domain)
db_pointer = convert_db_to_pointer(one_queried_db_result)
turn_info["db"] = one_queried_db_result
turn_info["pointer"] = db_pointer
# whether we need to query the db base
if input_contain_db:
# record turn info
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("DB query result") + Style.RESET_ALL + f"{one_queried_db_result}")
one_db_text = '<sos_db> ' + one_queried_db_result + ' <eos_db>'
one_db_token_id_input = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(one_db_text))
else:
one_db_token_id_input = []
# record turn info
one_da_text = ""
if verify_da:
# then we generate the dialogue action
one_da_token_id_input = data.da_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input
batch_da_token_id_input = [one_da_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate da
batch_generated_da = e2e_batch_interactive_generate(model, 'da', batch_da_token_id_input, data)
one_da_text = batch_generated_da[0]
one_da_token_id_output = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(one_da_text))
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised dialog act") + Style.RESET_ALL + f"{one_da_text}")
if one_da_text:
# finally we generate the response
if not cascaded: # only needs db
one_nlg_token_id_input = data.nlg_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input
else: # needs db and da
one_nlg_token_id_input = data.nlg_prefix_id + [data.sos_context_token_id] + context_ids[-900:] + [data.eos_context_token_id] + one_db_token_id_input + one_da_token_id_output
batch_nlg_token_id_input = [one_nlg_token_id_input] # change to a batch with batch_size=1, to use batch_generate()
# generate nlg
batch_generated_nlg = e2e_batch_interactive_generate(model, 'nlg', batch_nlg_token_id_input, data)
one_nlg_text = batch_generated_nlg[0]
# if debug: print(Fore.CYAN + f"Verifier generated Response: {one_nlg_text}" + Style.RESET_ALL)
# record turn info
turn_info["aspn"] = one_da_text
# turn_info["aspn_verifier"] = one_da_text
turn_info["aspn_reform"] = one_da_text
# using gpt-3 generation
system_based_on_da = openai.Completion.create(
# engine="text-davinci-002",
engine=args.gpt3_version,
prompt=prompt + "\n" + system_prefix(one_da_text),
temperature=0.7,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=1.0,
presence_penalty=0,
stop=["You require"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
# make sure that the entity is proposed.
if '_name' in one_nlg_text and '_name' not in system_based_on_da:
system_based_on_da = one_nlg_text
# record turn info
turn_info["resp"] = system_based_on_da
# turn_info["resp_verifier"] = one_nlg_text
prompt += "\n" + system_prefix(one_da_text, system_based_on_da)
logger.info("\n" + system_prefix(one_da_text, system_based_on_da))
if debug: print(Fore.LIGHTYELLOW_EX + "{:<28}>> ".format("Revised system turn generation") + Style.RESET_ALL + f"{system_prefix(one_da_text, system_based_on_da)}")
# determine if it is the end
if ("[bye]" in one_da_text or "[welcome]" in one_da_text) and not not_mentioned_domain:
end_of_dialog = True
if not one_da_text:
# using gpt-3 generation
system_based_on_da = ""
repeat_time = 0
while not system_based_on_da and repeat_time < args.max_repeat_time:
repeat_time += 1
system_with_da = openai.Completion.create(
# engine="text-davinci-002",
engine=args.gpt3_version,
prompt=prompt + "\n" + system_prefix(),
temperature=0.7,
max_tokens=64,
n=1,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["You require"]
)["choices"][0]["text"].lower().replace("\n", "").replace("you:", "").replace("*", "").strip()
system_with_da = system_prefix() + system_with_da
if "):" in system_with_da and len(system_with_da.split("):")) == 2:
system_based_on_da = system_with_da.split("):")[1].strip()
if debug: print(Fore.LIGHTMAGENTA_EX + f"Original GPT-3 generated system turn: {system_with_da}" + Style.RESET_ALL)
# extract gpt3_da_reform
if "Assistant(" in system_with_da:
gpt3_aspn_reform = system_with_da.split("Assistant(")[1].split("):")[0].strip()
if debug: print(Fore.LIGHTYELLOW_EX + f"GPT-3 generated dialog act: {gpt3_aspn_reform}" + Style.RESET_ALL)
else:
gpt3_aspn_reform = ""
# record turn info
turn_info["aspn"] = gpt3_aspn_reform
# turn_info["aspn_gen"] = one_da_text
turn_info["aspn_reform"] = gpt3_aspn_reform
# record turn info
turn_info["resp"] = system_based_on_da
# turn_info["resp_gen"] = one_nlg_text
prompt += "\n" + system_prefix(gpt3_aspn_reform, system_based_on_da)
logger.info("\n" + system_prefix(gpt3_aspn_reform, system_based_on_da))
# determine if it is the end
if ("[bye]" in gpt3_aspn_reform or "[welcome]" in gpt3_aspn_reform) and not not_mentioned_domain:
end_of_dialog = True
# add response to context
system = '<sos_r> {} <eos_r>'.format(turn_info["resp"])
context = context + ' ' + system
# add it to history
history.append((turn_info["user"], turn_info["resp"]))
# Print generated response
print()
print(f" Conversation of turn {turn_id} ".center(window_length, "-"))
print()
print(Fore.GREEN + "{:<28}>> ".format("User") + Style.RESET_ALL + f"{turn_info['user']}" )
print(Fore.GREEN + "{:<28}>> ".format("System") + Style.RESET_ALL + f"{turn_info['resp']}" )
print()
print("-"*window_length)
print()
# rearrange the orders and record this turn's info
dialog_turns.append(turn_info)
turn_id += 1
# determine whether to end this dialog
if end_of_dialog:
break
# record this dialog's info
dialog_info['dial_id'] = dial_id
dialog_info['turns'] = dialog_turns
dialog_info['prompt'] = dialog_turn_with_info['prompt']
dialog_info['goal'] = real_goal
all_dialogs.append(dialog_info)
print(f"Dialogue {dial_id} simulation finished !!!")
print()
# save dialogs
if args.save:
save_dialogs(args, all_dialogs, one_dev_str)
num_augment_dialogs += 1
if num_augment_dialogs >= args.max_dialog_num:
break
if num_augment_dialogs >= args.max_dialog_num:
break
# save dialogs
if args.save:
save_dialogs(args, all_dialogs, one_dev_str)
print(f"Simulate {num_augment_dialogs} dialogs, {total_turn_num} turns, over-generation turns {over_gen_turn_num}, de-generation turns {de_gen_turn_num}.")
| [
"\nPLACEHOLDER",
"\n",
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | Leezekun/dialogic | code~pptod~E2E_TOD~dialogic_aug_dst.py |
import enum
import progressbar
import argparse
import logging
import time
import json
import random
import re
import copy
import os
import numpy as np
import openai
import torch
from transformers import *
from colorama import Fore, Back, Style
from dialogic_utils import *
def parse_config():
parser = argparse.ArgumentParser()
# dataset configuration
parser.add_argument('--multiwoz_schema_path', type=str, default="../data/multiwoz/data/multi-woz-2.3-fine-processed/schema.json", help='the path that stores the schema for multiwoz dataset.')
parser.add_argument('--possible_slot_values_path', type=str, default="../data/multiwoz/data/multi-woz-2.3-fine-processed/possible_slot_values.json", help='the path that stores the possible slot values for multiwoz dataset.')
parser.add_argument('--data_path_prefix', type=str, default='../data/multiwoz/data', help='The path where the data stores.')
parser.add_argument('--data_version', type=str, default='2.3', help='The version of used multiwoz data, 2.0, 2.1, 2.3, 2.4')
# the data information
parser.add_argument('--eva_mode', type=str, default='train',
help="test or dev, or train, or all, evaluation on test or dev dataset")
parser.add_argument('--train_data_ratio', type=float, default=0.01, help='the ratio of training data used for training the model')
# how to build the prompt
parser.add_argument('--augment_time', type=int, default=1, help='the augment size compared with the original dataset')
parser.add_argument('--n_user', type=int, default=1, help='how many user utterances for each bs.')
parser.add_argument('--k_shot', type=int, default=2, help='the maximum number of demo dialogs.')
parser.add_argument('--temperature', type=float, default=0.5, help='the temperature in softmax, for the sampling in combine.')
# debug
parser.add_argument('--debug', default='True', type=str, help='Whether to print in the process.')
return parser.parse_args()
def softmax(x, temperature=1.0):
x = [_/temperature for _ in x]
f_x = np.exp(x) / np.sum(np.exp(x))
return f_x
def calculate_turn_similarity(single_turn_info, query_goal, metric="jaccard"):
def jaccard(list1, list2):
intersection = list(set(list1) & set(list2))
unionset = list(set(list1).union(set(list2)))
if unionset:
return float(len(intersection) / len(unionset))
else:
return 0.0
goal1 = query_goal
goal_list1 = paser_bs_from_dict_to_list(goal1)
goal_domain1 = list(goal1.keys())
if "[general]" in goal_domain1:
goal_domain1.remove("[general]")
other_turn_similarities = []
other_turns = []
for turn_info in single_turn_info:
goal2 = turn_info['bs']
has_valid_bs, has_dontcare = detect_valid_bs(goal2)
if goal1 == goal2 or not has_valid_bs:
continue
goal_list2 = paser_bs_from_dict_to_list(goal2)
goal_domain2 = list(goal2.keys())
if "[general]" in goal_domain2:
goal_domain2.remove("[general]")
similarity = jaccard(goal_list1, goal_list2)
if_same_domain = float(goal_domain1 == goal_domain2)
if_same_bs = float(similarity == 1.0)
if_error = turn_info["bs_error"]
# only select same domain, don't select totally same goal, or error turns
similarity = similarity * if_same_domain * (1-if_same_bs) * (1-if_error)
if has_dontcare: similarity *= 2.0 # increase the possibility of selecting a turn with dontcare slot value
other_turn_similarities.append(similarity)
other_turns.append(turn_info)
similarity_dict = {"turns": other_turns, "turn_similarities": other_turn_similarities}
return similarity_dict
def sample_demo_turns(single_turn_info, augment_turn_goal, args):
not_mention_goal = copy.deepcopy(augment_turn_goal)
augment_demos = []
# find the similar real dialogs as demo
similarities_dict = calculate_turn_similarity(single_turn_info, not_mention_goal)
other_turns = similarities_dict['turns']
other_turn_similarities = similarities_dict['turn_similarities']
if np.sum(other_turn_similarities) == 0:
return augment_demos
other_turn_weights = softmax([sim if sim !=0 else -1e9 for sim in other_turn_similarities], args.temperature)
# select the one with highest similarity
attempt_time = 0
while len(augment_demos) < args.k_shot and attempt_time < 5:
k_shot = args.k_shot - len(augment_demos)
another_turns = random.choices(other_turns, weights=other_turn_weights, k=k_shot)
for another_turn in another_turns:
if another_turn not in augment_demos:
augment_demos.append(another_turn)
attempt_time += 1
return augment_demos
def add_dontcare(augment_demos, augment_turn_goal):
for augment_demo in augment_demos:
bs = augment_demo['bs']
user = augment_demo['user']
for domain, domain_bs in bs.items():
for slot, slot_value in domain_bs.items():
if slot_value == "dontcare" and not user.startswith("no"):
for slot in augment_turn_goal[domain]:
if slot not in ["people", "stay", "destination"] and f"{domain}-{slot}" not in ["[hotel]-type", "[hotel]-stay", "[hotel]-people", "[train]-destination"]:
print(f"{domain}-{slot} change to dontcare!")
augment_turn_goal[domain][slot] = 'dontcare'
return augment_turn_goal
return augment_turn_goal
def get_slot_info(norm_schema, domain, slot):
# obtain slot description
slot_description = ""
slot_possible_values = []
domain = domain.replace("[", "").replace("]", "")
for service in norm_schema:
if service["service_name"] == domain:
slots = service["slots"]
for s in slots:
if f"{domain}-{slot}" == s["name"]:
if "description" in s:
slot_description = copy.deepcopy(s["description"])
if "possible_values" in s:
slot_possible_values = copy.deepcopy(s["possible_values"])
return slot_description, slot_possible_values
return slot_description, slot_possible_values
def generate_prompt_for_dst(norm_schema, augment_turn_goal, augment_demos, prompt_startswith):
prompt = []
assert len(augment_turn_goal) == 1
domain = list(augment_turn_goal.keys())[0]
domain_text = domain.replace("[", "").replace("]", "")
# intro = f"You want to book a {domain}. Tell the assistant your requirements."
# intro = f"The following is a conversation with an booking assistant. The human wants to book a {domain}, and the assistant asks for his requirements."
# intro = f"Write sentences to express your requirements when booking a {domain_text}. Mention and only mention the requirement in the bracket."
# intro = f"Write sentences to answer the assistant's question on your requirements when booking a {domain_text}."
# intro = f"Translate requirements when booking a {domain_text} to natural language, mention and only mention all the feature."
intro = f"Answer the assistant's question on each feature you require when booking a {domain_text}. Also mention no preference on a feature when your requirement on it is \"dontcare\"."
prompt.append(intro)
# add slot description for mentioned slots
prompt.append("Features:")
mentioned_slots = list(augment_turn_goal[domain].keys())
for demo in augment_demos:
bs = demo['bs']
mentioned_slots.extend(list(bs[domain].keys()))
mentioned_slots =list(set(mentioned_slots))
for slot in mentioned_slots:
slot_description, slot_possible_values = get_slot_info(norm_schema, domain, slot)
if len(slot_possible_values) > 1 and len(slot_possible_values) <= 5:
slot_possible_values[-1] = "or " + slot_possible_values[-1]
# slot_possible_values.append("or dontcare (any is ok)")
slot_possible_values = ", ".join(slot_possible_values)
prompt.append(f"{slot}: {slot_description}, {slot_possible_values};")
else:
prompt.append(f"{slot}: {slot_description};")
# add examples
prompt.append("Examples:")
for demo in augment_demos:
bs = demo['bs']
user = demo['user']
bsdx = paser_dict_to_bsdx(bs)
bsdx = bsdx.split()[1:]
bsdx = ", ".join(bsdx)
prompt.append(f"Assistant: what is your requirement on {bsdx}?")
bspn_reform = paser_dict_to_bs_reform(bs)
prompt.append(f"You({bspn_reform}): {user}")
# this sample
bsdx = paser_dict_to_bsdx(augment_turn_goal)
bsdx = bsdx.split()[1:]
bsdx = ", ".join(bsdx)
prompt.append(f"Assistant: what is your requirement on {bsdx}?")
bspn_reform = paser_dict_to_bs_reform(augment_turn_goal)
if prompt_startswith:
prompt.append(f"You({bspn_reform}): {prompt_startswith}")
else:
prompt.append(f"You({bspn_reform}):")
prompt = "\n".join(prompt)
return prompt
def construct_augment_dst(dialogs_with_turn_info, orig_augment_dst_turn_info, augment_time, schema, possible_slot_values, args):
# first normalize multiwoz2.2's schema file to this format
norm_schema = normalize_domain_slot(schema)
augment_dst_turn_info = copy.deepcopy(orig_augment_dst_turn_info)
assert isinstance(augment_dst_turn_info, dict)
total_turn_num, total_aug_turn_num, total_aug_slot_num = 0, 0, 0
type_aug_num = [0, 0, 0, 0]
for dial_id, dialog_with_turn_info in dialogs_with_turn_info.items():
print(f"Current dialog id: {dial_id}")
dialog_type_aug_num = [0, 0, 0, 0]
orig_turns = dialog_with_turn_info['orig_turns']
info_turns = dialog_with_turn_info['info_turns']
if dial_id in augment_dst_turn_info:
augment_turns = copy.deepcopy(augment_dst_turn_info[dial_id])
else:
augment_turns = {}
"""
not based on message, use bs_reform as prompt
type: [substitute, drop, combine, random]
"""
for turn_id in orig_turns:
orig_turn = orig_turns[turn_id]
info_turn = info_turns[turn_id]
if turn_id in augment_turns:
augment_turn = copy.deepcopy(augment_turns[turn_id])
augment_turn_list = copy.deepcopy(augment_turn["augment_turns"])
else:
augment_turn = copy.deepcopy(orig_turn)
augment_turn_list = [] # a list a augment turns
# obtain this turn's information
orig_turn_goal = info_turn['bs']
orig_turn_user = info_turn['user']
# obtain last turn's information
if int(turn_id) > 0:
last_turn = orig_turns[str(int(turn_id)-1)]
last_turn_bs_reform = last_turn['bspn_reform']
last_turn_aspn = last_turn['aspn']
last_turn_resp = last_turn['resp']
orig_goal = paser_bs_reform_to_dict(last_turn_bs_reform)
system_act = paser_aspn_to_dict(last_turn_aspn)
else:
last_turn_bs_reform = ""
last_turn_aspn = ""
last_turn_resp = ""
orig_goal = {}
system_act = {}
# check mentioned_slots, not_mentioned_slots, and not_mentioned_domains
mentioned_slots = {}
if orig_goal:
for domain, domain_slot in orig_goal.items():
if domain_slot:
mentioned_slots[domain] = list(domain_slot.keys()) # list
mentioned_domains = []
not_mentioned_slots = {}
for domain in informable_slots:
all_domain_slots = informable_slots[domain]
domain = f"[{domain}]"
if domain in mentioned_slots:
mentioned_domain_slots = mentioned_slots[domain]
else:
mentioned_domain_slots = []
not_mentioned_domain_slots = []
for slot in all_domain_slots:
if slot not in mentioned_domain_slots:
not_mentioned_domain_slots.append(slot)
not_mentioned_slots[domain] = not_mentioned_domain_slots
# check request_slots
request_slots = {}
if system_act:
for domain, domain_act in system_act.items():
if "[request]" in domain_act:
domain_request_slots = []
for slot in domain_act["[request]"]:
if slot == "price":
domain_request_slots.append("pricerange")
else:
domain_request_slots.append(slot)
request_slots[domain] = domain_request_slots
break
assert len(request_slots) <= 1 # only 0 or 1 domain
"""
start generate augment_goal
"""
for i in range(augment_time):
augment_turn_goal = {}
augment_demos = []
prompt_startswith = ""
type = 0
# # augment the turns when dialog act at last turn is [request]
if ("[request]" in last_turn_aspn and request_slots):
type = 2
# generate augment_turn_goal
for domain, domain_request_slots in request_slots.items():
# Must include: select slots from request slots, at least 1
random.shuffle(domain_request_slots)
selected_slot_num = random.randint(1, len(domain_request_slots))
selected_slots = domain_request_slots[:selected_slot_num]
# ADD: select slots from not mentioned slots, at least 0, at most 2
domain_not_mentioned_slots = not_mentioned_slots[domain]
random.shuffle(domain_not_mentioned_slots)
selected_slot_num = random.randint(0, min(2, len(domain_not_mentioned_slots)))
selected_slots += domain_not_mentioned_slots[:selected_slot_num]
# UPDATE: select one slot from mentioned slots, prob=0.2
# if domain in mentioned_slots:
# domain_mentioned_slots = mentioned_slots[domain]
# if random.random() < 0.2 and domain_mentioned_slots:
# selected_slots += [random.choice(domain_mentioned_slots)]
# remove repeated slots
selected_slots = list(set(selected_slots))
# construct augment_turn_goal
augment_domain_bs = {}
for slot in selected_slots:
augment_domain_bs[slot] = ""
# substitute domain value
augment_domain = domain.split("[")[1].split("]")[0].strip() # [hotel] -> hotel
augment_domain_bs, _, slot_num = substitute_domain_slot_value(norm_schema, possible_slot_values, augment_domain, augment_domain_bs)
augment_turn_goal[domain] = augment_domain_bs
total_aug_slot_num += slot_num
augment_turn_goal[domain] = augment_domain_bs
# this is a new start, either in the begining of dialogue, or the end of a domain in a dialogue
elif "[reqmore]" in last_turn_aspn or int(turn_id) == 0:
type = 3
not_mentioned_domains = []
for domain in all_domain:
if domain not in mentioned_domains and domain not in ["[police]", "[hospital]"]:
not_mentioned_domains.append(domain)
selected_domain = random.choice(not_mentioned_domains)
possible_slots = not_mentioned_slots[selected_domain]
random.shuffle(possible_slots)
selected_slot_num = random.randint(min(1, len(possible_slots)), min(4, len(possible_slots))) # at least 1, at most 4
selected_slots = possible_slots[:selected_slot_num]
augment_domain_bs = {}
for slot in selected_slots:
augment_domain_bs[slot] = ""
augment_domain_bs, _, slot_num = substitute_domain_slot_value(norm_schema, possible_slot_values, selected_domain, augment_domain_bs)
total_aug_slot_num += slot_num
augment_turn_goal[selected_domain] = augment_domain_bs
if "[reqmore]" in last_turn_aspn:
prompt_startswith = "i also need a "
elif int(turn_id) == 0:
prompt_startswith = "i need a "
# no last turn's guidance, can only substitute value and add not mentioned slots for the turns with valid bs
elif detect_valid_bs(orig_turn_goal)[0]:
type = 4
# generate augment_turn_goal
for domain, domain_bs in orig_turn_goal.items():
if domain_bs:
# DROP: drop some slots from the orig_goal, only keep part of the slots, at least 1
turn_mentioned_slots = list(domain_bs.keys())
if turn_mentioned_slots:
random.shuffle(turn_mentioned_slots)
selected_slot_num = random.randint(1, len(turn_mentioned_slots))
selected_slots = turn_mentioned_slots[:selected_slot_num]
# ADD: select slots from not mentioned slots, at least 0, at most 2
domain_not_mentioned_slots = []
for _ in not_mentioned_slots[domain]:
if _ not in domain_bs:
domain_not_mentioned_slots.append(_)
if domain_not_mentioned_slots:
random.shuffle(domain_not_mentioned_slots)
selected_slot_num = random.randint(1, min(2, len(domain_not_mentioned_slots)))
selected_slots += domain_not_mentioned_slots[:selected_slot_num]
# UPDATE: select one slot from mentioned slots, prob=0.2
# if domain in mentioned_slots:
# domain_mentioned_slots = mentioned_slots[domain]
# if random.random() < 0.2 and domain_mentioned_slots:
# selected_slots += [random.choice(domain_mentioned_slots)]
# remove repeated slots
selected_slots = list(set(selected_slots))
# construct augment_turn_goal, add new added slots
augment_domain_bs = {}
for slot in selected_slots:
augment_domain_bs[slot] = ""
# substitute domain value
augment_domain = domain.split("[")[1].split("]")[0].strip() # [hotel] -> hotel
augment_domain_bs, _, slot_num = substitute_domain_slot_value(norm_schema, possible_slot_values, augment_domain, augment_domain_bs)
augment_turn_goal[domain] = augment_domain_bs
total_aug_slot_num += slot_num
augment_turn_goal[domain] = augment_domain_bs
break # only mention one domain in each turn
# add this turn as one of the demos
augment_demos.append(info_turn)
# given augment_turn_goal and augment_demos, using gpt-3 to start augmenting
if augment_turn_goal:
# select demos for gpt-3
if not augment_demos:
augment_demos = sample_demo_turns(single_turn_info, augment_turn_goal, args)
# add dontcare if demos contain dontcare
# augment_turn_goal = add_dontcare(augment_demos, augment_turn_goal)
if augment_demos:
prompt = generate_prompt_for_dst(norm_schema, augment_turn_goal, augment_demos, prompt_startswith)
# generate user utterance as seeds
outputs = openai.Completion.create(engine="text-davinci-002",
prompt=prompt,
temperature=0.7,
max_tokens=64,
n=args.n_user,
top_p=1,
frequency_penalty=1,
presence_penalty=0,
stop=["\n", "Assistant:", "You("]
)["choices"]
users = [output["text"].lower().replace("\n", "").strip() for output in outputs]
if users:
# change augment_turn_goal(turn-level) to augment_goal(dialogue-level)
augment_goal = copy.deepcopy(orig_goal)
for domain, domain_bs in augment_turn_goal.items():
if domain in augment_goal:
for slot, slot_value in domain_bs.items():
augment_goal[domain][slot] = slot_value
else:
augment_goal[domain] = copy.deepcopy(domain_bs)
# change it to the pptod format
augment_bspn = paser_dict_to_bs(augment_goal)
augment_bsdx = paser_dict_to_bsdx(augment_goal)
augment_bspn_reform = paser_dict_to_bs_reform(augment_goal)
augment_bsdx_reform = paser_dict_to_bsdx_reform(augment_goal)
for user in users:
user = prompt_startswith + user
if not detect_error_turn(user, augment_turn_goal):
if args.debug == "True":
print(Fore.GREEN+ f"Prompt: {prompt}" + Style.RESET_ALL)
print(Fore.YELLOW + f"Last turn dialogue action: {last_turn_aspn}" + Style.RESET_ALL)
print(Fore.YELLOW + f"Last turn system response: {last_turn_resp}" + Style.RESET_ALL)
print(Fore.CYAN + f"Augment_type: {type}" + Style.RESET_ALL)
print(Fore.BLUE + f"Augment turn goal: {augment_turn_goal}" + Style.RESET_ALL)
print(Fore.RED + f"Augment user: {user}" + Style.RESET_ALL)
# record the augment_turn
turn = {}
turn['user'] = user
turn['usdx'] = user
turn['bspn'] = augment_bspn
turn['bsdx'] = augment_bsdx
turn['bspn_reform'] = augment_bspn_reform
turn['bsdx_reform'] = augment_bsdx_reform
augment_turn_list.append(turn)
# record augment turn for each type
if type:
type_aug_num[type-1] += 1
dialog_type_aug_num[type-1] += 1
# if dialog_type_aug_num[1]>0 and dialog_type_aug_num[2]>0 and dialog_type_aug_num[3]>0:
# exit()
# save the info in dict
augment_turn["augment_turns"] = augment_turn_list
augment_turns[turn_id] = augment_turn
total_turn_num += 1
total_aug_turn_num += len(augment_turn_list)
print(Fore.GREEN+ f"Current turn num: {total_turn_num}, augment turn num: {total_aug_turn_num}" + Style.RESET_ALL)
print(Fore.GREEN+ f"Augment turn for each type: {type_aug_num}." + Style.RESET_ALL)
augment_dst_turn_info[dial_id] = augment_turns
print(f"Total {total_turn_num} turns, augment {total_aug_turn_num} turns, {total_aug_slot_num} slots!")
return augment_dst_turn_info
import argparse
if __name__ == '__main__':
args = parse_config()
print ('Start loading data...')
from dataclass import MultiWozData
if args.data_version == "2.0":
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-dialogic-processed")
elif args.data_version == "2.1":
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.1-dialogic-processed")
elif args.data_version == "2.3":
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.3-dialogic-processed")
elif args.data_version == "2.4":
save_output_path = os.path.join(args.data_path_prefix, "multi-woz-2.4-dialogic-processed")
else:
raise Exception("Wrong MultiWOZ version!")
if args.eva_mode == 'dev':
eva_mode = 'dev'
elif args.eva_mode == 'test':
eva_mode = 'test'
elif args.eva_mode == 'train':
eva_mode = 'train'
elif args.eva_mode == 'all':
eva_mode = 'all'
else:
raise Exception('Wrong Evaluation Mode!!!')
if args.train_data_ratio > 1:
raise Exception('Wrong Evaluation Mode!!!')
elif args.train_data_ratio < 0:
raise Exception('Wrong Evaluation Mode!!!')
else:
train_data_ratio = args.train_data_ratio
if eva_mode == 'train':
one_dev_str = f"{eva_mode}_ratio_{train_data_ratio}"
else:
one_dev_str = f"{eva_mode}"
if args.debug == 'True':
debug = True
elif args.debug == 'False':
debug = False
else:
raise Exception('Wrong debug Mode!!!')
# load openai key
openai.api_key = os.getenv("OPENAI_API_KEY")
# load possible slot values
assert args.possible_slot_values_path is not None
f = open(args.possible_slot_values_path, "r")
possible_slot_values = json.load(f)
f.close()
# load multiwoz schema
assert args.multiwoz_schema_path is not None
f = open(args.multiwoz_schema_path, "r")
schema = json.load(f)
f.close()
assert save_output_path is not None
print("Start loading the dialogs with single turn infos......")
save_dialog_turn_info_path = os.path.join(save_output_path, "dialog_turn_info_" + one_dev_str + ".json")
f = open(save_dialog_turn_info_path, "r")
dialogs_with_turn_info = json.load(f)
f.close()
print("Start loading single turn infos......")
save_single_turn_info_path = os.path.join(save_output_path, "single_turn_info_" + one_dev_str + ".json")
f = open(save_single_turn_info_path, "r")
single_turn_info = json.load(f)
f.close()
print("Start loading existing augmented dialogs......")
augment_dst_turn_info = {}
augment_time = args.augment_time
for i in range(args.augment_time, 1, -2):
save_augment_dialog_turn_info_path = os.path.join(save_output_path, f"{args.k_shot}_shot_x{i}_dst_turn_info_" + one_dev_str + ".json")
if os.path.exists(save_augment_dialog_turn_info_path):
f = open(save_augment_dialog_turn_info_path, "r")
augment_dst_turn_info = json.load(f)
augment_time -= i
print(f"Loaded augment dialogs, num of dialogs {len(augment_dst_turn_info)}, need {augment_time} augment time......")
f.close()
break
"""
Start constructing the augmentation prompt for DST
"""
print("Start augmenting dialogs' goal and message......")
# augment dialogue data
augment_dst_turn_info = construct_augment_dst(dialogs_with_turn_info, augment_dst_turn_info, augment_time, schema, possible_slot_values, args)
print("Start saving augmented dialogs......")
# save the augmented dialog turn info
assert save_output_path is not None
save_augment_dialog_turn_info_path = os.path.join(save_output_path, f"{args.k_shot}_shot_x{args.augment_time}_dst_turn_info_" + one_dev_str + ".json")
f = open(save_augment_dialog_turn_info_path, "w")
json.dump(augment_dst_turn_info, f)
f.close()
| [
"[]",
"i also need a ",
"i need a ",
"\n"
] |
2024-01-10 | ML-AlgoRhythms/dify | api~tasks~enable_segment_to_index_task.py | import datetime
import logging
import time
import click
from celery import shared_task
from langchain.schema import Document
from werkzeug.exceptions import NotFound
from core.index.index import IndexBuilder
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
@shared_task(queue='dataset')
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts([document], duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts([document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key)
| [] |
2024-01-10 | claudiadmr/MD-Projeto | Fase3~application~webscraping.py | import requests
import threading
from flask import jsonify
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
from langchain.schema import Document
import os
from dotenv import load_dotenv
import json
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
class APIRequestThread(threading.Thread):
def __init__(self, url):
self.data = None
self.url = url
threading.Thread.__init__(self)
def run(self):
response = requests.get(self.url)
self.data = response.json()
def run_scraper(amazon, walmart):
print(amazon)
print(walmart)
url1 = f'http://127.0.0.1:9080/crawl.json?spider_name=amazon_reviews&start_requests=true&crawl_args={{"asin": "{amazon}"}}'
url2 = f'http://127.0.0.1:9080/crawl.json?spider_name=wallmart_reviews&start_requests=true&crawl_args={{"asin": "{walmart}"}}'
thread1 = APIRequestThread(url1)
thread2 = APIRequestThread(url2)
# Start both threads
thread1.start()
thread2.start()
# Wait for both threads to finish
thread1.join()
thread2.join()
# Combine data from both threads
combined_data = {
'data1': thread1.data,
'data2': thread2.data,
}
combined_data = combined_data['data1']['items'] + combined_data['data2']['items']
print(len(combined_data))
return openaiRequest(jsonify(combined_data))
def openaiRequest(data):
data_json = data.get_json() # Extract the JSON data from the Response object
product = ''
product_flag = False
documents = []
for item in data_json:
metadata = {'rating': item['rating']}
if item['title'] is not None:
metadata['title'] = item['title']
if item['product'] is not None:
metadata['product'] = item['product']
if product_flag is False:
product = item['product']
product_flag = True
document = Document(page_content=item['text'], metadata=metadata)
documents.append(document)
# Split the text in chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# Create a persistent, file-based vector store
directory = 'index_store'
vector_index = Chroma.from_documents(documents, OpenAIEmbeddings(), persist_directory=directory)
vector_index.persist()
# Create the retriever and the query-interface
retriever = vector_index.as_retriever(search_type="similarity", search_kwargs={"k": 6})
qa_interface = RetrievalQA.from_chain_type(llm=ChatOpenAI(), chain_type="stuff", retriever=retriever,
return_source_documents=True)
# Query GPT-3
response = qa_interface("""Analyze only the following collection of reviews and employ topic modeling techniques to categorize the feedback into specific features of the product.
Divide each feature in positive characteristics and in negative characteristics.
Response format provided in a json format like this: {Features:[{
-name: x
-Positive Reviews:(full reviews only the ones about this feature)
-Negative Reviews:(full reviews only the ones about this feature)
}]}
Do not repeat the same review twice.
If there are no positive or negative characteristics, write "Not applicable".
Give at least 6 Features.
The product is: """ + product + """
Provide it in JSON format.""")
# Convert JSON to Python dictionary
json_data = json.loads(response['result'])
return {"product_name": product, "features": convert_structure(json_data)}
def convert_structure(data):
converted_data = {}
for feature in data["Features"]:
feature_name = feature["name"]
positive_reviews = feature["Positive Reviews"]
negative_reviews = feature["Negative Reviews"]
converted_data[feature_name] = {
"name": feature_name,
"positive_reviews": positive_reviews,
"negative_reviews": negative_reviews
}
return converted_data
| [] |
2024-01-10 | compass-ctf-team/prompt_injection_research | utilities.py | import openai
from secret import api_key
def test_connection() -> None:
openai.api_key = api_key
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "This is a test, if you receive my message, just response OK."}],
temperature=0.7
)
print(chat.choices[0].message.content)
def send_message(message: str, temperature: float) -> str:
openai.api_key = api_key
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": message}],
temperature=temperature
)
return chat.choices[0].message.content
def send_message_with_role(prompt: str, message: str, temperature: float) -> str:
openai.api_key = api_key
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": prompt}, {"role": "user", "content": message}],
temperature=temperature
)
return chat.choices[0].message.content
def send_message_with_role_concatenate(prompt_system: str, prompt_begin: str, message: str, prompt_end: str, temperature: float) -> str:
openai.api_key = api_key
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": prompt_system},
{"role": "user", "content": prompt_begin + message + prompt_end}],
temperature=temperature
)
return chat.choices[0].message.content | [
"This is a test, if you receive my message, just response OK.",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | mehmetakifakkus/privateGPT | ingest2.py | #!/usr/bin/env python3
import os
import glob
from typing import List
from dotenv import load_dotenv
from multiprocessing import Pool
from tqdm import tqdm
from utils import *
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
PDFMinerLoader,
TextLoader,
UnstructuredEmailLoader,
UnstructuredEPubLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
from constants import CHROMA_SETTINGS
load_dotenv()
#ย Load environment variables
persist_directory = os.environ.get('PERSIST_DIRECTORY')
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME')
is_gpu_enabled = (os.environ.get('IS_GPU_ENABLED', 'False').lower() == 'true')
chunk_size = 500
chunk_overlap = 50
# Custom document loaders
class MyElmLoader(UnstructuredEmailLoader):
"""Wrapper to fallback to text/plain when default does not work"""
def load(self) -> List[Document]:
"""Wrapper adding fallback for elm without html"""
try:
try:
doc = UnstructuredEmailLoader.load(self)
except ValueError as e:
if 'text/html content not found in email' in str(e):
# Try plain text
self.unstructured_kwargs["content_source"]="text/plain"
doc = UnstructuredEmailLoader.load(self)
else:
raise
except Exception as e:
# Add file_path to exception message
raise type(e)(f"{self.file_path}: {e}") from e
return doc
# Map file extensions to document loaders and their arguments
LOADER_MAPPING = {
".csv": (CSVLoader, {}),
# ".docx": (Docx2txtLoader, {}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".eml": (MyElmLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PDFMinerLoader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
# Add more mappings for other file extensions and loaders as needed
}
def load_single_document(file_path: str) -> List[Document]:
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in LOADER_MAPPING:
loader_class, loader_args = LOADER_MAPPING[ext]
loader = loader_class(file_path, **loader_args)
return loader.load()
raise ValueError(f"Unsupported file extension '{ext}'")
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
"""
Loads all documents from the source documents directory, ignoring specified files
"""
all_files = []
for ext in LOADER_MAPPING:
all_files.extend(
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
)
filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]
with Pool(processes=os.cpu_count()) as pool:
results = []
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
results.extend(docs)
pbar.update()
return results
def process_documents(ignored_files: List[str] = []) -> List[Document]:
"""
Load documents and split in chunks
"""
print(f"Loading documents from {source_directory}")
documents = load_documents(source_directory, ignored_files)
if not documents:
print("No new documents to load")
exit(0)
print(f"Loaded {len(documents)} new documents from {source_directory}")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_documents(documents)
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
return texts
def main():
# Create embeddings
ensure_integrity(persist_directory, True)
embeddings_kwargs = {'device': 'cuda'} if is_gpu_enabled else {}
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name, model_kwargs=embeddings_kwargs)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
collection = db.get()
texts = process_documents([metadata['source'] for metadata in collection['metadatas']])
print(f"Creating embeddings. May take some minutes...")
db.add_documents(texts)
db.persist()
db = None
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
if __name__ == "__main__":
main() | [] |
2024-01-10 | mehmetakifakkus/privateGPT | privateGPT2.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
import os
import argparse
from utils import *
from torch import cuda as torch_cuda
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
is_gpu_enabled = (os.environ.get('IS_GPU_ENABLED', 'False').lower() == 'true')
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
print(model_type, model_path)
print(persist_directory)
print(is_gpu_enabled)
from constants import CHROMA_SETTINGS
def get_gpu_memory() -> int:
"""
Returns the amount of free memory in MB for each GPU.
"""
return int(torch_cuda.mem_get_info()[0]/(1024**2))
def calculate_layer_count() -> int | None:
"""
Calculates the number of layers that can be used on the GPU.
"""
if not is_gpu_enabled:
return None
LAYER_SIZE_MB = 120.6 # This is the size of a single layer on VRAM, and is an approximation.
# The current set value is for 7B models. For other models, this value should be changed.
LAYERS_TO_REDUCE = 6 # About 700 MB is needed for the LLM to run, so we reduce the layer count by 6 to be safe.
if (get_gpu_memory()//LAYER_SIZE_MB) - LAYERS_TO_REDUCE > 32:
return 32
else:
return (get_gpu_memory()//LAYER_SIZE_MB-LAYERS_TO_REDUCE)
def main():
ensure_integrity(persist_directory, False)
# Parse the command line arguments
args = parse_arguments()
embeddings_kwargs = {'device': 'cuda'} if is_gpu_enabled else {}
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name, model_kwargs=embeddings_kwargs)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, callbacks=callbacks, verbose=False, n_gpu_layers=calculate_layer_count())
case "GPT4All":
if is_gpu_enabled:
print("GPU is enabled, but GPT4All does not support GPU acceleration. Please use LlamaCpp instead.")
exit(1)
llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
case _default:
print(f"Model {model_type} not supported!")
exit;
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main() | [] |
2024-01-10 | bukosabino/ia-boe | src~etls~common~etl.py | import logging as lg
import os
import typing as tp
import pinecone
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from retry import retry
from src.etls.common.metadata import MetadataDocument
from src.etls.common.utils import TextLoader
from src.initialize import initialize_logging
initialize_logging()
class ETL:
def __init__(self, config_loader, vector_store):
self._config_loader = config_loader
self._vector_store = vector_store
def run(self, docs: tp.List[MetadataDocument]):
chunks = self._split_documents(docs)
self._load_database(chunks)
# self._log_database_stats()
def _split_documents(self, docs: tp.List[MetadataDocument]) -> tp.List[Document]:
"""Split documents by chunks
:param docs:
:return:
"""
logger = lg.getLogger(self._split_documents.__name__)
logger.info("Splitting in chunks %s documents", len(docs))
docs_chunks = []
for doc in docs:
loader = TextLoader(file_path=doc.filepath, metadata=doc.dict())
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=self._config_loader["chunk_size"],
chunk_overlap=self._config_loader["chunk_overlap"],
)
docs_chunks += text_splitter.split_documents(documents)
if doc:
logger.info("Removing file %s", doc.filepath)
os.remove(doc.filepath)
logger.info("Splitted %s documents in %s chunks", len(docs), len(docs_chunks))
return docs_chunks
@retry(tries=3, delay=2)
def _load_database(self, docs_chunks: tp.List[Document]) -> None:
logger = lg.getLogger(self._load_database.__name__)
logger.info("Loading %s embeddings to database", len(docs_chunks))
self._vector_store.add_documents(docs_chunks)
logger.info("Loaded %s embeddings to database", len(docs_chunks))
def _log_database_stats(self) -> None:
logger = lg.getLogger(self._log_database_stats.__name__)
index_name = self._config_loader["vector_store_index_name"]
logger.info(pinecone.describe_index(index_name))
index = pinecone.Index(index_name)
logger.info(index.describe_index_stats())
| [] |
2024-01-10 | bukosabino/ia-boe | src~initialize.py | import collections
import logging as lg
import os
import pinecone
import yaml
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.vectorstores.pinecone import Pinecone
from langchain.vectorstores.qdrant import Qdrant
from openai import AsyncOpenAI
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from supabase.client import Client, create_client
from tavily import TavilyClient
from src.utils import StandardSupabaseVectorStore
def initialize_logging():
logger = lg.getLogger()
logger.info("Initializing logging")
logger.handlers = []
handler = lg.StreamHandler()
formatter = lg.Formatter(
"[%(asctime)s] [%(process)d] [%(levelname)s] [%(name)s] %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(lg.INFO)
logger.info("Initialized logging")
lg.getLogger("uvicorn.error").handlers = logger.handlers
def initialize_app():
"""Initializes the application"""
logger = lg.getLogger(initialize_app.__name__)
logger.info("Initializing application")
config_loader = _init_config()
vector_store = _init_vector_store(config_loader)
openai_client = _init_openai_client()
tavily_client = TavilyClient(api_key=os.environ['TAVILY_API_KEY'])
# retrieval_qa = _init_retrieval_qa_llm(vector_store, config_loader)
logger.info("Initialized application")
init_objects = collections.namedtuple(
"init_objects", ["config_loader", "vector_store", "openai_client", "tavily_client"]
)
return init_objects(config_loader, vector_store, openai_client, tavily_client)
def _init_config():
yaml_config_path = os.path.join(os.environ["APP_PATH"], "config", "config.yaml")
with open(yaml_config_path, "r") as stream:
config_loader = yaml.safe_load(stream)
return config_loader
def _init_vector_store(config_loader):
logger = lg.getLogger(_init_vector_store.__name__)
logger.info("Initializing vector store")
if config_loader["vector_store"] == "pinecone":
vector_store = _init_vector_store_pinecone(config_loader)
elif config_loader["vector_store"] == "supabase":
vector_store = _init_vector_store_supabase(config_loader)
elif config_loader["vector_store"] == "qdrant":
vector_store = _init_vector_store_qdrant(config_loader)
else:
raise ValueError("Vector Database not configured")
return vector_store
def _init_vector_store_pinecone(config_loader):
logger = lg.getLogger(_init_vector_store_pinecone.__name__)
logger.info("Initializing vector store")
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"],
environment=os.environ["PINECONE_ENV"],
)
index_name = config_loader["vector_store_index_name"]
index = pinecone.Index(index_name)
embeddings = HuggingFaceEmbeddings(
model_name=config_loader["embeddings_model_name"],
model_kwargs={"device": "cpu"},
)
vector_store = Pinecone(index, embeddings.embed_query, "text")
logger.info(pinecone.describe_index(index_name))
logger.info(index.describe_index_stats())
logger.info("Initialized vector store")
return vector_store
def _init_vector_store_supabase(config_loader):
from supabase.lib.client_options import ClientOptions
logger = lg.getLogger(_init_vector_store_supabase.__name__)
logger.info("Initializing vector store")
supabase_client: Client = create_client(
supabase_url=os.environ.get("SUPABASE_API_URL"),
supabase_key=os.environ.get("SUPABASE_API_KEY"),
options=ClientOptions(postgrest_client_timeout=60),
)
embeddings = HuggingFaceEmbeddings(
model_name=config_loader["embeddings_model_name"],
model_kwargs={"device": "cpu"},
)
vector_store = StandardSupabaseVectorStore(
client=supabase_client,
embedding=embeddings,
table_name=config_loader["table_name"],
query_name=config_loader["query_name"],
)
logger.info("Initialized vector store")
return vector_store
def _init_vector_store_qdrant(config_loader):
logger = lg.getLogger(_init_vector_store_qdrant.__name__)
logger.info("Initializing vector store")
qdrant_client = QdrantClient(
url=os.environ["QDRANT_API_URL"],
api_key=os.environ["QDRANT_API_KEY"],
prefer_grpc=True,
)
embeddings = HuggingFaceEmbeddings(
model_name=config_loader["embeddings_model_name"],
model_kwargs={"device": "cpu"},
)
if len(qdrant_client.get_collections().collections) == 0:
logger.info("Creating collection for vector store")
qdrant_client.recreate_collection(
collection_name=config_loader["collection_name"],
vectors_config=VectorParams(size=768, distance=Distance.COSINE),
on_disk_payload=True,
)
logger.info("Created collection for vector store")
vector_store = Qdrant(qdrant_client, config_loader["collection_name"], embeddings)
logger.info("Initialized vector store")
return vector_store
def _init_openai_client():
logger = lg.getLogger(_init_retrieval_qa_llm.__name__)
logger.info("Initializing OpenAI client")
client = AsyncOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
logger.info("Initialized OpenAI client")
return client
def _init_retrieval_qa_llm(vector_store, config_loader):
# DEPRECATED
logger = lg.getLogger(_init_retrieval_qa_llm.__name__)
logger.info("Initializing RetrievalQA LLM")
retriever = vector_store.as_retriever(
search_type="similarity", search_kwargs={"k": config_loader["top_k_results"]}
)
system_template = f"{config_loader['prompt_system']}----------------\n{{context}}"
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
retrieval_qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(
model_name=config_loader["llm_model_name"],
temperature=config_loader["temperature"],
max_tokens=config_loader["max_tokens"],
),
chain_type="stuff",
return_source_documents=True,
retriever=retriever,
chain_type_kwargs={"prompt": ChatPromptTemplate.from_messages(messages)},
)
logger.info(retrieval_qa.combine_documents_chain.llm_chain.prompt.format)
logger.info("Initialized RetrievalQA LLM")
return retrieval_qa
| [
"{question}",
"PLACEHOLDER----------------\n{context}"
] |
2024-01-10 | VJBots/VJ-FILTER-BOT | plugins~Extra~engine.py | # Don't Remove Credit @VJ_Botz
# Subscribe YouTube Channel For Amazing Bot @Tech_VJ
# Ask Doubt on telegram @KingVJ01
import openai
async def ai(query):
openai.api_key = "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" #Your openai api key
response = openai.Completion.create(engine="text-davinci-002", prompt=query, max_tokens=100, n=1, stop=None, temperature=0.9, timeout=5)
return response.choices[0].text.strip()
async def ask_ai(client, m, message):
try:
question = message.text.split(" ", 1)[1]
# Generate response using OpenAI API
response = await ai(question)
# Send response back to user
await m.edit(f"{response}")
except Exception as e:
# Handle other errors
error_message = f"An error occurred: {e}"
await m.edit(error_message)
| [] |
2024-01-10 | jlomako/send-gpt-email | message.py | import smtplib
import os
import openai
# generate message
openai.api_key = os.getenv('OPENAI_API_KEY')
response = openai.Completion.create(
model="text-davinci-003",
prompt="write a short text message to someone you love very much",
temperature=0.7,
max_tokens=50,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
message = response["choices"][0]["text"]
# send message to myself
my_email = os.environ['MY_EMAIL']
my_email2 = os.environ['MY_EMAIL2']
pw = os.environ['PASSWORD']
with smtplib.SMTP(host='smtp.gmail.com', port=587) as connection:
connection.starttls()
connection.login(user=my_email, password=pw)
connection.sendmail(from_addr=my_email,
to_addrs=my_email2,
msg=f'Subject: hello\n\n{message}'
)
| [
"write a short text message to someone you love very much"
] |
2024-01-10 | Vatraz/historyjkiRoblox | historyjki_roblox~gpt_relayer.py | import os
from typing import List
import openai
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
class GtpRelayerException(Exception):
pass
class GtpRelayer:
def simply_ask(self, message: str) -> str:
chat_messages = [self._create_message(message)]
try:
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=chat_messages
)
answer = chat["choices"][0]["message"]["content"]
except Exception as exe:
raise GtpRelayerException(
f"Failed to fetch ChatGTP response: {message}"
) from exe
return answer
def generate_image(
self, prompt: str, n: int = 1, size: int = 256, response_format: str = "url"
) -> List[str]:
try:
response = openai.Image.create(
prompt=prompt,
n=n,
size=f"{size}x{size}",
response_format=response_format,
)
image = response["data"][0][response_format]
except Exception as ex:
raise GtpRelayerException(
f"Failed to fetch Image response: {prompt}"
) from ex
return image
def _create_message(self, message: str) -> dict:
return {"role": "user", "content": message}
| [] |
2024-01-10 | peterw/Chat-with-Github-Repo | src~utils~process.py | import deeplake
import openai
import os
import pathspec
import subprocess
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import DeepLake
# Set the OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
def clone_repository(repo_url, local_path):
"""Clone the specified git repository to the given local path."""
subprocess.run(["git", "clone", repo_url, local_path])
def load_docs(root_dir, file_extensions=None):
"""
Load documents from the specified root directory.
Ignore dotfiles, dot directories, and files that match .gitignore rules.
Optionally filter by file extensions.
"""
docs = []
# Load .gitignore rules
gitignore_path = os.path.join(root_dir, ".gitignore")
if os.path.isfile(gitignore_path):
with open(gitignore_path, "r") as gitignore_file:
gitignore = gitignore_file.read()
spec = pathspec.PathSpec.from_lines(
pathspec.patterns.GitWildMatchPattern, gitignore.splitlines()
)
else:
spec = None
for dirpath, dirnames, filenames in os.walk(root_dir):
# Remove dot directories from the list of directory names
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for file in filenames:
file_path = os.path.join(dirpath, file)
# Skip dotfiles
if file.startswith("."):
continue
# Skip files that match .gitignore rules
if spec and spec.match_file(file_path):
continue
if file_extensions and os.path.splitext(file)[1] not in file_extensions:
continue
try:
loader = TextLoader(file_path, encoding="utf-8")
docs.extend(loader.load_and_split())
except Exception:
pass
return docs
def split_docs(docs):
"""Split the input documents into smaller chunks."""
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
return text_splitter.split_documents(docs)
def create_deeplake_dataset(activeloop_dataset_path, activeloop_token):
"""Create an empty DeepLake dataset with the specified path and token."""
ds = deeplake.empty(
activeloop_dataset_path,
token=activeloop_token,
overwrite=True,
)
ds.create_tensor("ids")
ds.create_tensor("metadata")
ds.create_tensor("embedding")
ds.create_tensor("text")
def process(
repo_url, include_file_extensions, activeloop_dataset_path, repo_destination
):
"""
Process a git repository by cloning it, filtering files, splitting documents,
creating embeddings, and storing everything in a DeepLake dataset.
"""
activeloop_token = os.getenv("ACTIVELOOP_TOKEN")
create_deeplake_dataset(activeloop_dataset_path, activeloop_token)
clone_repository(repo_url, repo_destination)
docs = load_docs(repo_destination, include_file_extensions)
texts = split_docs(docs)
embeddings = OpenAIEmbeddings()
db = DeepLake(dataset_path=activeloop_dataset_path, embedding_function=embeddings)
db.add_documents(texts)
| [] |
2024-01-10 | salesforce/GeDi | modeling_utils.py | # Adapted from https://github.com/huggingface/transformers/blob/21da895013a95e60df645b7d6b95f4a38f604759/src/transformers/modeling_utils.py
# _generate_no_beam_search modified to include repetition_penalty only over generated tokens and not over the prompt
import logging
import os
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from transformers.configuration_utils import PretrainedConfig
from transformers.file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
logger = logging.getLogger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
"""Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, load_in_half_prec=False, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(archive_file)
else:
msg = (
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url to model weight files named one of {} but "
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
", ".join(cls.pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],
)
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if load_in_half_prec:
model = model.half()
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
if load_in_half_prec:
for key in state_dict.keys():
state_dict[key] = state_dict[key].half()
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(
s.startswith(cls.base_model_prefix) for s in state_dict.keys()
):
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not any(
s.startswith(cls.base_model_prefix) for s in state_dict.keys()
):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys
)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys
)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
model.tie_weights() # make sure token embedding weights are still tied if needed
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {"input_ids": input_ids}
def _do_output_past(self, outputs):
has_output_past = hasattr(self.config, "output_past") and self.config.output_past
has_mem_len = hasattr(self.config, "mem_len") and self.config.mem_len
if has_output_past and not has_mem_len and len(outputs) > 1:
return True
elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1:
return True
return False
@torch.no_grad()
def generate(
self,
input_ids=None,
pad_lens=None,
max_length=None,
min_length=0,
do_sample=True,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
no_repeat_ngram_size=-1,
repetition_penalty=None,
rep_penalty_scale=0,
bos_token_id=None,
pad_token_id=None,
eos_token_ids=None,
length_penalty=None,
num_return_sequences=None,
penalize_cond=False,
gedi_model=None,
gpt3_api_key=None,
tokenizer=None,
disc_weight=0,
filter_p=1,
target_p=1,
class_bias=0,
attr_class=0,
code_0="negative",
code_1="positive",
multi_code=None,
get_ll=False
):
r""" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling
and beam-search.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between 1 and infinity. Default to 20.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `True`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictely positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
bos_token_id: (`optional`) int
Beginning of sentence token if no prompt is provided. Default to 0.
eos_token_ids: (`optional`) int or list of int
End of sequence token or list of tokens to stop the generation. Default to 0.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id) # do greedy decoding without beam search
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context
outputs = model.generate(input_ids=input_ids, do_sample=True, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[0][i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, num_beams=3) # generate sequences using greedy beam search decoding (3 beams)
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences using using greedy search
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)"
)
max_length = max_length if max_length is not None else self.config.max_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
if isinstance(eos_token_ids, int):
eos_token_ids = [eos_token_ids]
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer."
assert temperature > 0, "`temperature` should be strictely positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert isinstance(bos_token_id, int) and bos_token_id >= 0, "`bos_token_id` should be a positive integer."
assert isinstance(pad_token_id, int) and pad_token_id >= 0, "`pad_token_id` should be a positive integer."
assert isinstance(eos_token_ids, (list, tuple)) and (
e >= 0 for e in eos_token_ids
), "`eos_token_ids` should be a positive integer or a list/tuple of positive integers."
assert length_penalty > 0, "`length_penalty` should be strictely positive."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictely positive integer."
if input_ids is None:
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# current position and vocab size
cur_len = input_ids.shape[1]
vocab_size = self.config.vocab_size
if num_return_sequences != 1:
# Expand input to num return sequences
input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len)
input_ids = input_ids.contiguous().view(
batch_size * num_return_sequences, cur_len
) # (batch_size * num_return_sequences, cur_len)
effective_batch_size = batch_size * num_return_sequences
else:
effective_batch_size = batch_size
output = self._generate_no_beam_search(
input_ids,
pad_lens,
cur_len,
max_length,
min_length,
do_sample,
no_repeat_ngram_size,
temperature,
top_k,
top_p,
repetition_penalty,
rep_penalty_scale,
pad_token_id,
eos_token_ids,
effective_batch_size,
penalize_cond,
gedi_model,
gpt3_api_key,
tokenizer,
disc_weight,
filter_p,
target_p,
class_bias,
attr_class,
code_0,
code_1,
multi_code,
get_ll
)
if num_return_sequences != 1:
output = output.view(batch_size, num_return_sequences, -1)
#print('breaking where we wanna')
#import ipdb; ipdb.set_trace()
return output
def get_gpt3_logits(self, input_ids, tokenizer, non_gpt3_logp=-50000.00, api_key=None):
import openai
openai.api_key = api_key
completion = openai.Completion()
prompt = tokenizer.decode(input_ids[0])
response = completion.create(prompt=prompt,
engine="davinci",
max_tokens=1,
logprobs=100)
response_dict = response["choices"][0]["logprobs"]["top_logprobs"][0]
keys_list = [x for x in response_dict.keys()]
values_list = [x for x in response_dict.values()]
pair_list = []
full_vocab_p = (non_gpt3_logp)*torch.ones([1,50257], dtype=torch.float32)
sorted_dict = {k: v for k, v in sorted(response_dict.items(), key=lambda item: item[1])}
for x,y in zip(keys_list,values_list):
tokens1 = tokenizer.encode(prompt + x)
tokens2 = input_ids[0].tolist()
tot = (len(tokens1)-len(tokens2))
index_ = tokenizer.encode(x)
if len(index_)== 1:
pair_list.append((index_,y))
full_vocab_p[0,index_] = y
return full_vocab_p
def _generate_no_beam_search(
self,
input_ids,
pad_lens,
cur_len,
max_length,
min_length,
do_sample,
no_repeat_ngram_size,
temperature,
top_k,
top_p,
repetition_penalty,
rep_penalty_scale,
pad_token_id,
eos_token_ids,
batch_size,
penalize_cond,
gedi_model,
gpt3_api_key,
tokenizer,
disc_weight,
filter_p,
target_p,
class_bias,
attr_class,
code_0,
code_1,
multi_code,
get_ll
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# current position / max lengths / length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
#set this to 0 if you want to apply repetition_penalty to the prompt too
if penalize_cond:
cond_len = 0
else:
cond_len = input_ids.shape[1]
if not(gedi_model is None):
if attr_class == 0:
pt_id = tokenizer.encode(code_0)[0]
nt_id = tokenizer.encode(code_1)[0]
elif attr_class == 1:
nt_id = tokenizer.encode(code_0)[0]
pt_id = tokenizer.encode(code_1)[0]
else:
raise RuntimeError("expects attr_class is 0 or 1")
#prepending tokens corresponding to 'positive' and 'negative' to the inputs
seq_a = (torch.ones(input_ids.shape[0])*pt_id).type_as(input_ids).view(-1,1)
seq_b = (torch.ones(input_ids.shape[0])*nt_id).type_as(input_ids).view(-1,1)
if not(multi_code is None):
seq_a2 = torch.LongTensor(multi_code).unsqueeze(0).to(seq_a.device)
seq_a = torch.cat((seq_a, seq_a2, input_ids), dim=1)[:,:]
seq_b = torch.cat((seq_b, seq_a2, input_ids), dim=1)[:,:]
else:
seq_a = torch.cat((seq_a, input_ids), dim=1)[:,:]
seq_b = torch.cat((seq_b, input_ids), dim=1)[:,:]
bsz = seq_a.shape[0]
seq_batched = torch.cat((seq_a,seq_b),dim=0)
if pad_lens is None:
gedi_pad_lens = None
else:
gedi_pad_lens = pad_lens+pad_lens
past = None
gedi_past = None
desired_labels = torch.zeros(input_ids.shape[0],dtype=torch.long).to(input_ids.device)
rewards=None
if get_ll:
sequence_ll = 0
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)
if not(pad_lens is None):
model_inputs["pad_lens"] = pad_lens
if not(gpt3_api_key is None):
next_token_logits = self.get_gpt3_logits(model_inputs["input_ids"],
tokenizer,
-50000.00,
gpt3_api_key).to(input_ids.device)
else:
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
if get_ll:
next_token_logp = torch.log_softmax(next_token_logits,-1)
if not(gedi_model is None):
#want to compute LM loss here so feeding inputs as labels
if not gedi_past is None:
input_batched = torch.cat((model_inputs["input_ids"],model_inputs["input_ids"]),dim=0)
seq_batched = torch.cat((seq_batched,input_batched),dim=1)
inputs = gedi_model.prepare_inputs_for_generation(seq_batched, past=gedi_past)
inputs["pad_lens"] = gedi_pad_lens
else:
inputs = {"input_ids": seq_batched, "pad_lens": gedi_pad_lens, "past":gedi_past}
gedi_outputs = gedi_model(**inputs)
if gedi_past is None:
if gedi_outputs[0].shape[1]>1:
old_logits = torch.log_softmax(gedi_outputs[0][:, :-1, :],-1)
shift_logits = gedi_outputs[0][..., :-1, :].contiguous()
shift_labels = seq_batched[..., 1:].contiguous()
loss_fct = torch.nn.CrossEntropyLoss(reduction="none")
logits_r = -1*loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
logits_r = logits_r.view(seq_batched.shape[0], -1)
seq_len = logits_r.shape[1]
logits_r = torch.sum(logits_r,1)
logits_pos,logits_neg = torch.split(logits_r/seq_len,input_ids.shape[0])
logits0 = torch.stack((logits_pos,logits_neg),1)
if "logit_scale" in dir(gedi_model):
logits0 = gedi_model.logit_scale*logits0
if "bias" in dir(gedi_model):
logits0 = logits0 + gedi_model.bias
if not (class_bias==0):
logits0[:,0] += class_bias
logp_desired = torch.log_softmax(logits0,-1)[:,0]
logp_undesired = torch.log_softmax(logits0,-1)[:,1]
else:
seq_len=0
logp_desired = (torch.zeros(input_ids.shape[0]) + torch.log(torch.tensor(0.5))).to(input_ids.device)
logp_undesired = (torch.zeros(input_ids.shape[0]) + torch.log(torch.tensor(0.5))).to(input_ids.device)
logits_r = torch.zeros(input_ids.shape[0]*2).to(input_ids.device)
seq_len= seq_len+1
gedi_logits= (torch.log_softmax(gedi_outputs[0][:, -1, :],-1)+logits_r.unsqueeze(1))
logits_pos,logits_neg = torch.split(gedi_logits/seq_len,input_ids.shape[0])
logits = torch.stack((logits_pos,logits_neg),2)
if "logit_scale" in dir(gedi_model):
logits = gedi_model.logit_scale*logits
if "bias" in dir(gedi_model):
logits = logits + gedi_model.bias
if not class_bias == 0:
logits[:,:,0] += class_bias
logp_desired_t = torch.log_softmax(logits,-1)[:,:,0]
logp_undesired_t = torch.log_softmax(logits,-1)[:,:,1]
next_token_logits = torch.log_softmax(1*next_token_logits,-1) + disc_weight*(logp_desired_t) #+delta_capped82058721
sorted_logps, sorted_indices = torch.sort(logp_desired_t, descending=False)
peak= torch.max(next_token_logits,1).values.unsqueeze(1)
next_token_p = torch.softmax(next_token_logits,-1)
for i in range(0,next_token_logits.shape[0]):
if True:
p_sorted = next_token_p[i,sorted_indices[i]]
cumulative_probs = torch.cumsum(p_sorted, dim=-1)
logp_desired_sorted = logp_desired_t[i,sorted_indices[i]]
ind_to_remove = (cumulative_probs <filter_p) & (logp_desired_sorted<(math.log(target_p)))
next_token_logits[i,sorted_indices[i][ind_to_remove]]-=10000
if ind_to_remove[-1]:
print("error, removing everything is likely not intended behavior")
ind_to_remove[-1]=True
# if model has past, then set the past variable to speed up decoding
if not (gedi_model is None):
gedi_past = gedi_outputs[1]
if gpt3_api_key is None:
past = outputs[1]
max = torch.max(next_token_logits,-1,keepdim=True)
max=max[0]
next_token_logits= next_token_logits - max + rep_penalty_scale
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size):
prevs = input_ids[i][cond_len:].tolist()
for j in range(0,len(prevs)):
previous_token = prevs[j]
if rep_penalty_scale>0:
if next_token_logits[i, previous_token] == rep_penalty_scale:
rescale=True
else:
rescale=False
next_token_logits[i, previous_token] /= repetition_penalty
#original version accidentally put rescaling inside forloop over prevs, this is slow and only changes things is max logit is penalized
#conditonal replicates paper results but is faster
#can comment out to remove, makes very small difference, generation sometimes the same
if rescale:
max = torch.max(next_token_logits[i,:])
next_token_logits[i,:]= next_token_logits[i,:]- max + rep_penalty_scale
else:
if next_token_logits[i, previous_token] < 0:
next_token_logits[i, previous_token] *= repetition_penalty
else:
next_token_logits[i, previous_token] /= repetition_penalty
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(input_ids[cond_len:], batch_size, no_repeat_ngram_size, len(input_ids[cond_len:]))
for batch_idx in range(batch_size):
next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float("inf")
if not(gedi_model is None):
for i in range(batch_size):
for eos_token_id in eos_token_ids:
if (cur_len < min_length):
next_token_logits[i, eos_token_id] -=10000
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=0, top_p=top_p)
# Sample
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
if not (gedi_model is None):
token_list = next_token.tolist()+next_token.tolist()
for i in range(0,len(token_list)):
logits_r[i] = gedi_logits[i,token_list[i]]
for i in range(0,len(next_token)):
logp_desired[i] = logp_desired_t[i,next_token[i]]
logp_undesired[i] = logp_undesired_t[i,next_token[i]]
# update generations and finished sentences
tokens_to_add = next_token * unfinished_sents + pad_token_id * (1 - unfinished_sents)
if get_ll:
sequence_ll += next_token_logp[0,next_token]
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
for eos_token_id in eos_token_ids:
unfinished_sents.mul_(tokens_to_add.ne(eos_token_id).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
if not(gedi_model is None):
print("GeDi estimates the probability that it sample is desired class is: " + str(torch.exp(logp_desired[0]).item()))
# add eos_token_ids to unfinished sentences
if cur_len == max_length:
input_ids[:, -1].masked_fill_(unfinished_sents.to(dtype=torch.bool), eos_token_ids[0])
if get_ll:
return input_ids,sequence_ll
else:
return input_ids
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(
self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None
):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super().__init__()
self.summary_type = config.summary_type if hasattr(config, "summary_type") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, "summary_activation") and config.summary_activation == "tanh":
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, cls_index=None):
""" hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| [] |
2024-01-10 | kkasravi/w251-final-project | pipeline~pipeline.py | import os
import openai
import sys
import re
openai.api_key = os.getenv("OPENAI_API_KEY")
MODELS = ["gpt-3.5-turbo","gpt-4","text-davinci-003","davinci-instruct-beta:2.0.0"]
SYSTEM_INPUT = '''
You are an expert in cooking and cuisine. You are instructed to do the following:
1. Take one or more ingredients as input.
2. Take a cuisine as input.
3. Produce a recipe for that cuisine that includes the ingredients and ingredient amounts.
4. Produce detailed cooking instructions.
5. Produce a description of the meal produced by the recipe.
'''
USER_INPUT1 = '''
Ingredients: chicken breast, onions, garlic, bell pepper
Cuisine: Mexican
'''
ASSISTANT_INPUT = '''
Generate a Mexican recipe that includes chicken breast, onions, garlic, and bell pepper as the main ingredients. The recipe should include ingredient amounts and detailed cooking instructions. The final dish should be described in a way that can be used to generate an image.
'''
def parse_input(input_str):
ingredients_pattern = r'Ingredients:\s*(.*)'
cuisine_pattern = r'Cuisine:\s*(.*)'
ingredients_match = re.search(ingredients_pattern, input_str, re.IGNORECASE)
cuisine_match = re.search(cuisine_pattern, input_str, re.IGNORECASE)
if not ingredients_match or not cuisine_match:
raise ValueError("Invalid input format. Expected format: 'Ingredients: <comma-separated ingredients>\\nCuisine: <cuisine>'")
ingredients_str = ingredients_match.group(1)
cuisine = cuisine_match.group(1).strip()
ingredients = [ingredient.strip() for ingredient in ingredients_str.split(',')]
return ingredients, cuisine
def parse_description(input_string):
description_start = input_string.find("Description:")
if description_start == -1:
return "Description not found."
description = input_string[description_start + len("Description:"):].strip()
return description
def generate_recipe(ingredients, cuisine):
ingredients = "Ingredients: " + ', '.join(ingredients)
cuisine = "Cuisine: " + cuisine
user_input = '\n'.join([ingredients, cuisine])
assistant_input = "Title: Mexican Chicken and Poblano Pepper Casserole Recipe\n\nIntro: \nThis Mexican-inspired casserole is the perfect dish for a cozy night in. With tender chicken breast, sweet onion, and spicy poblano peppers, this dish is sure to satisfy your cravings. Follow these simple steps to make this delicious meal!\n\nIngredients:\n- 2 chicken breasts\n- 1 large onion, chopped\n- 2 poblano peppers, seeded and chopped\n- 2 tsp. olive oil\n- 1 tsp. ground cumin\n- 1 tsp. chili powder\n- 1/2 tsp. salt\n- 1/2 tsp. black pepper\n- 1/2 cup chicken broth\n- 1 cup cooked rice\n- 1/2 cup shredded cheddar cheese\n\nInstructions:\n1. Preheat the oven to 375 degrees F.\n2. Heat the olive oil in a large skillet over medium-high heat. Add the chicken breasts and cook until browned on both sides, about 4-5 minutes per side. Remove the chicken from the skillet and set aside.\n3. Add the onion and poblano peppers to the same skillet and cook until the vegetables are softened, about 5 minutes.\n4. Stir in the cumin, chili powder, salt, and black pepper. Cook for an additional minute.\n5. Add the chicken broth to the skillet and stir to combine everything well.\n6. Transfer the chicken and vegetable mixture to a baking dish.\n7. Cover the baking dish with foil and bake for 25 minutes.\n8. Remove the foil and sprinkle the shredded cheese over the top of the casserole.\n9. Bake uncovered for an additional 10-15 minutes, or until the cheese is melted and bubbly.\n10. Serve hot over cooked rice.\n\nDescription:\nThis Mexican-inspired casserole is the perfect mix of bold flavors and comforting ingredients. Tender chicken breast, sweet onion, and spicy poblano peppers come together in a creamy, cheesy sauce that's sure to satisfy any craving. Served over a bed of white rice, this dish is the ultimate comfort food for any night of the week. Enjoy!"
response = openai.ChatCompletion.create(
model=MODELS[0],
messages=[
{"role": "system", "content": SYSTEM_INPUT},
{"role": "user", "content": USER_INPUT1, "name": "example_user"},
{"role": "assistant", "content": assistant_input, "name": "example_assistant"},
{"role": "user", "content": user_input}
]
)
return response
def generate_image(description):
response = openai.Image.create(
prompt=description,
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
return image_url
if __name__ == "__main__":
input_str = sys.stdin.read()
ingredients, cuisine = parse_input(input_str)
response = generate_recipe(ingredients, cuisine)
response = response['choices'][0]['message']['content']
description = parse_description(response)
image_url = generate_image(description)
print(response)
print('\n\n')
print(image_url)
| [
"Title: Mexican Chicken and Poblano Pepper Casserole Recipe\n\nIntro: \nThis Mexican-inspired casserole is the perfect dish for a cozy night in. With tender chicken breast, sweet onion, and spicy poblano peppers, this dish is sure to satisfy your cravings. Follow these simple steps to make this delicious meal!\n\nIngredients:\n- 2 chicken breasts\n- 1 large onion, chopped\n- 2 poblano peppers, seeded and chopped\n- 2 tsp. olive oil\n- 1 tsp. ground cumin\n- 1 tsp. chili powder\n- 1/2 tsp. salt\n- 1/2 tsp. black pepper\n- 1/2 cup chicken broth\n- 1 cup cooked rice\n- 1/2 cup shredded cheddar cheese\n\nInstructions:\n1. Preheat the oven to 375 degrees F.\n2. Heat the olive oil in a large skillet over medium-high heat. Add the chicken breasts and cook until browned on both sides, about 4-5 minutes per side. Remove the chicken from the skillet and set aside.\n3. Add the onion and poblano peppers to the same skillet and cook until the vegetables are softened, about 5 minutes.\n4. Stir in the cumin, chili powder, salt, and black pepper. Cook for an additional minute.\n5. Add the chicken broth to the skillet and stir to combine everything well.\n6. Transfer the chicken and vegetable mixture to a baking dish.\n7. Cover the baking dish with foil and bake for 25 minutes.\n8. Remove the foil and sprinkle the shredded cheese over the top of the casserole.\n9. Bake uncovered for an additional 10-15 minutes, or until the cheese is melted and bubbly.\n10. Serve hot over cooked rice.\n\nDescription:\nThis Mexican-inspired casserole is the perfect mix of bold flavors and comforting ingredients. Tender chicken breast, sweet onion, and spicy poblano peppers come together in a creamy, cheesy sauce that's sure to satisfy any craving. Served over a bed of white rice, this dish is the ultimate comfort food for any night of the week. Enjoy!",
"\nIngredients: chicken breast, onions, garlic, bell pepper\nCuisine: Mexican\n",
"\nYou are an expert in cooking and cuisine. You are instructed to do the following:\n\n1. Take one or more ingredients as input.\n2. Take a cuisine as input.\n3. Produce a recipe for that cuisine that includes the ingredients and ingredient amounts.\n4. Produce detailed cooking instructions.\n5. Produce a description of the meal produced by the recipe.\n"
] |
2024-01-10 | kkasravi/w251-final-project | chat_utils.py | from typing import Any, List, Dict
import openai
import requests
from mysecrets import DATABASE_INTERFACE_BEARER_TOKEN
import logging
def query_database(query_prompt: str) -> Dict[str, Any]:
"""
Query vector database to retrieve chunk with user's input questions.
"""
url = "http://0.0.0.0:8000/query"
headers = {
"Content-Type": "application/json",
"accept": "application/json",
"Authorization": f"Bearer {DATABASE_INTERFACE_BEARER_TOKEN}",
}
data = {"queries": [{"query": query_prompt, "top_k": 5}]}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
result = response.json()
# process the result
return result
else:
raise ValueError(f"Error: {response.status_code} : {response.content}")
def apply_prompt_template(question: str) -> str:
"""
A helper function that applies additional template on user's question.
Prompt engineering could be done here to improve the result. Here I will just use a minimal example.
"""
prompt = f"""
By considering above input from me, answer the question: {question}
"""
return prompt
def call_chatgpt_api(user_question: str, chunks: List[str]) -> Dict[str, Any]:
"""
Call chatgpt api with user's question and retrieved chunks.
"""
# Send a request to the GPT-3 API
messages = list(
map(lambda chunk: {
"role": "user",
"content": chunk
}, chunks))
question = apply_prompt_template(user_question)
messages.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1024,
temperature=0.7, # High temperature leads to a more creative response.
)
return response
def ask(user_question: str) -> Dict[str, Any]:
"""
Handle user's questions.
"""
# Get chunks from database.
chunks_response = query_database(user_question)
chunks = []
for result in chunks_response["results"]:
for inner_result in result["results"]:
chunks.append(inner_result["text"])
logging.info("User's questions: %s", user_question)
logging.info("Retrieved chunks: %s", chunks)
response = call_chatgpt_api(user_question, chunks)
logging.info("Response: %s", response)
return response["choices"][0]["message"]["content"]
| [
"\n By considering above input from me, answer the question: PLACEHOLDER\n "
] |
2024-01-10 | kkasravi/w251-final-project | test_dalle.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Image.create(
prompt="a white siamese cat",
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
print(image_url)
| [] |
2024-01-10 | aws-dev-staging/live-generative-ai-amazon-bedrock-langchain-agent-example | agent~lambda~agent-handler~fsi_agent.py | from langchain.agents.tools import Tool
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents import AgentExecutor
from tools import tools
from datetime import datetime
import warnings
#warnings.filterwarnings('ignore')
PREFIX = "\n\nHuman: You are a Financial Services AI chatbot (Assistant) for a company called Octank Financial. Also, you can answer general questions about anything. You quickly respond to questions from a user with an answer and the sources you used to find your answer in the format: \
[Source 1: Source Title 1 - Source Link 1], \
[Source 2: Source Title 2 - Source Link 2], \
[Source n: Source Title n - Source Link n]. Provide two newline characters between your answer and the sources. By the way, the date is " + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + ".\n\nAssistant:"
'''FORMAT_INSTRUCTIONS = """\n\nHuman: To use a tool, please use the following format:
Thought: Do I need to use a tool? Yes
Action: The action to take from the following list of pre-defined tools: 'Octank Financial'
Action Input: The input to the action
Observation: The result of the action
Thought: Do I need to use a tool? No
\n\nAssistant: [Answer and Sources]
"""'''
FORMAT_INSTRUCTIONS = "\n\nHuman: \n\nAssistant:"
class FSIAgent():
def __init__(self,llm, memory) -> None:
self.prefix = PREFIX
self.ai_prefix = "Assistant"
self.human_prefix = "Human"
self.llm = llm
self.memory = memory
self.format_instructions = FORMAT_INSTRUCTIONS
self.agent = self.create_agent()
def create_agent(self):
fsi_agent = ConversationalAgent.from_llm_and_tools(
llm = self.llm,
tools = tools,
prefix = self.prefix,
ai_prefix = self.ai_prefix,
human_prefix = self.human_prefix,
format_instructions = self.format_instructions,
return_intermediate_steps = True,
return_source_documents = True
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=fsi_agent, tools=tools, verbose=True, memory=self.memory, return_source_documents=True, return_intermediate_steps=True) # , handle_parsing_errors=True
return agent_executor
def run(self, input):
print("Running FSI Agent with input: " + str(input))
try:
response = self.agent(input)
except ValueError as e:
response = str(e)
print("fsi_agent ERROR CATCH = " + response)
if not response.startswith("An output parsing error occurred"):
print("## NO CATCH ##")
raise e
print("CATCH")
response = response.removeprefix("An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: Could not parse LLM output: `").removesuffix("`")
return response
| [] |
2024-01-10 | benweissbehavehealth/documentation-helper | ingestion.py | import os
from langchain.document_loaders import ReadTheDocsLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Pinecone
import pinecone
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"],
environment=os.environ["PINECONE_ENVIRONMENT_REGION"],
)
INDEX_NAME = "langchain-doc-index"
def ingest_docs():
loader = ReadTheDocsLoader("langchain-docs/langchain.readthedocs.io/en/latest")
raw_documents = loader.load()
print(f"loaded {len(raw_documents)} documents")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=400, chunk_overlap=50, separators=["\n\n", "\n", " ", ""]
)
documents = text_splitter.split_documents(raw_documents)
for doc in documents:
new_url = doc.metadata["source"]
new_url = new_url.replace("langchain-docs", "https:/")
doc.metadata.update({"source": new_url})
embeddings = OpenAIEmbeddings()
print(f"Going to add {len(documents)} to Pinecone")
Pinecone.from_documents(documents, embeddings, index_name=INDEX_NAME)
print("****Loading to vectorestore done ***")
if __name__ == "__main__":
ingest_docs()
| [] |
2024-01-10 | assafelovic/gpt3-api | api~model_service.py | # -*- encoding: utf-8 -*-
import openai
from api.config import OPENAI_API_KEY, TEMPERATURE, MAX_TOKENS, GPT_MODEL
class ModelService:
def __init__(self):
openai.api_key = OPENAI_API_KEY
'''
The moderation endpoint is a tool you can use to check whether content is harmful and complies with OpenAI's usage policies.
'''
def moderation(self, input):
r = openai.Moderation.create(
input=input
)
return r
"""
core openai wrapper for completion API
"""
def completion(self, prompt, kwargs={}):
temp = kwargs.setdefault('temperature', TEMPERATURE)
max_tokens = kwargs.setdefault('max_tokens', MAX_TOKENS)
model = kwargs.setdefault('model', GPT_MODEL)
r = openai.Completion.create(
model=model,
prompt=prompt,
temperature=temp,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
return {
"result": r["choices"][0]["text"].strip(),
"model": model,
"max_tokens": max_tokens,
"temperature": temp
}
"""
API Wrapper for ChatGPT
"""
def chat(self, kwargs={}):
# Assume the following format: [{'role':'assistant'/'user', 'content': '...'},...]
messages = kwargs.get("messages")
# (Optional) The chat_behavior is a system message that helps set the behavior of the assistant.
chat_behavior = kwargs.get("chat_behavior")
if chat_behavior:
messages.insert(0, {"role": "system", "content": chat_behavior})
r = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
completion_response = r["choices"][0]["message"]["content"].rstrip()
return completion_response
"""
API Wrapper for OpenAI Whisper
"""
def transcribe(self, kwargs={}):
audio_path = kwargs.get("audio_path")
file = open(audio_path, "rb")
transcription = openai.Audio.transcribe("whisper-1", file)
return transcription
"""
API Wrapper for Dalle 2
Please note, images returned as uri will be removed within 1 hour.
"""
def image(self, prompt, kwargs={}):
n = kwargs.setdefault('n', 1)
size = kwargs.setdefault('size', '512x512')
r = openai.Image.create(
prompt=prompt,
n=n,
size=size
)
return {
"url": r["data"][0]["url"],
"n": n,
"size": size
}
"""
API Wrapper for prompt completion
"""
def predict(self, prompt, kwargs={}):
result = self.completion(prompt, kwargs)
if kwargs.get("moderation"):
moderation_results = self.moderation(result.get("result"))
result["moderation"] = moderation_results
return result
| [] |
2024-01-10 | assafelovic/gpt3-api | api~training_service.py | # -*- encoding: utf-8 -*-
import openai
from api.config import OPENAI_API_KEY, GPT_MODEL
class TrainingService:
def __init__(self):
openai.api_key = OPENAI_API_KEY
'''
Upload training file (JSONL) to openai cloud.
Returns the id of the uploaded training file.
'''
def upload_file(self, filePath):
with open(filePath) as f:
response = openai.File.create(file=f, purpose='fine-tune')
return response.get('id')
'''
Create a fine tuned model training job with uploaded training file
Returns the job response
'''
def create_fine_tuned_model(self, training_file_id, model=GPT_MODEL):
create_args = {
"training_file": training_file_id,
"model": model,
"compute_classification_metrics": True,
"classification_n_classes": 2,
"batch_size": 128
}
r = openai.FineTune.create(**create_args)
return r
'''
Get status of a training job
'''
def get_training_status(self, training_job_id):
r = openai.FineTune.retrieve(id=training_job_id)
return r
'''
Get a fined tuned model id
'''
def get_find_tuned_model_id(self, training_job_id):
r = openai.FineTune.retrieve(self, id=training_job_id)
return r.get('fine_tuned_model')
'''
List of all current fine tuned models
'''
def list_jobs(self):
r = openai.FineTune.list()
for deployment in r.data:
print('{0}: {1} '.format(deployment.get("id"), deployment.get("fine_tuned_model")))
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~components~vectorstores~Chroma.py | from typing import Optional, Union
from langflow import CustomComponent
from langchain.vectorstores import Chroma
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever
from langchain.embeddings.base import Embeddings
import chromadb
class ChromaComponent(CustomComponent):
"""
A custom component for implementing a Vector Store using Chroma.
"""
display_name: str = "Chroma (Custom Component)"
description: str = "Implementation of Vector Store using Chroma"
documentation = "https://python.langchain.com/docs/integrations/vectorstores/chroma"
beta = True
def build_config(self):
"""
Builds the configuration for the component.
Returns:
- dict: A dictionary containing the configuration options for the component.
"""
return {
"collection_name": {"display_name": "Collection Name", "value": "langflow"},
"persist": {"display_name": "Persist"},
"persist_directory": {"display_name": "Persist Directory"},
"code": {"show": False, "display_name": "Code"},
"documents": {"display_name": "Documents", "is_list": True},
"embedding": {"display_name": "Embedding"},
"chroma_server_cors_allow_origins": {
"display_name": "Server CORS Allow Origins",
"advanced": True,
},
"chroma_server_host": {"display_name": "Server Host", "advanced": True},
"chroma_server_port": {"display_name": "Server Port", "advanced": True},
"chroma_server_grpc_port": {
"display_name": "Server gRPC Port",
"advanced": True,
},
"chroma_server_ssl_enabled": {
"display_name": "Server SSL Enabled",
"advanced": True,
},
}
def build(
self,
collection_name: str,
persist: bool,
chroma_server_ssl_enabled: bool,
persist_directory: Optional[str] = None,
embedding: Optional[Embeddings] = None,
documents: Optional[Document] = None,
chroma_server_cors_allow_origins: Optional[str] = None,
chroma_server_host: Optional[str] = None,
chroma_server_port: Optional[int] = None,
chroma_server_grpc_port: Optional[int] = None,
) -> Union[VectorStore, BaseRetriever]:
"""
Builds the Vector Store or BaseRetriever object.
Args:
- collection_name (str): The name of the collection.
- persist_directory (Optional[str]): The directory to persist the Vector Store to.
- chroma_server_ssl_enabled (bool): Whether to enable SSL for the Chroma server.
- persist (bool): Whether to persist the Vector Store or not.
- embedding (Optional[Embeddings]): The embeddings to use for the Vector Store.
- documents (Optional[Document]): The documents to use for the Vector Store.
- chroma_server_cors_allow_origins (Optional[str]): The CORS allow origins for the Chroma server.
- chroma_server_host (Optional[str]): The host for the Chroma server.
- chroma_server_port (Optional[int]): The port for the Chroma server.
- chroma_server_grpc_port (Optional[int]): The gRPC port for the Chroma server.
Returns:
- Union[VectorStore, BaseRetriever]: The Vector Store or BaseRetriever object.
"""
# Chroma settings
chroma_settings = None
if chroma_server_host is not None:
chroma_settings = chromadb.config.Settings(
chroma_server_cors_allow_origins=chroma_server_cors_allow_origins
or None,
chroma_server_host=chroma_server_host,
chroma_server_port=chroma_server_port or None,
chroma_server_grpc_port=chroma_server_grpc_port or None,
chroma_server_ssl_enabled=chroma_server_ssl_enabled,
)
# If documents, then we need to create a Chroma instance using .from_documents
if documents is not None and embedding is not None:
return Chroma.from_documents(
documents=documents, # type: ignore
persist_directory=persist_directory if persist else None,
collection_name=collection_name,
embedding=embedding,
client_settings=chroma_settings,
)
return Chroma(
persist_directory=persist_directory, client_settings=chroma_settings
)
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~template~frontend_node~llms.py | from typing import Optional
from langflow.services.database.models.base import orjson_dumps
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.frontend_node.constants import CTRANSFORMERS_DEFAULT_CONFIG
from langflow.template.frontend_node.constants import OPENAI_API_BASE_INFO
class LLMFrontendNode(FrontendNode):
def add_extra_fields(self) -> None:
if "VertexAI" in self.template.type_name:
# Add credentials field which should of type file.
self.template.add_field(
TemplateField(
field_type="file",
required=False,
show=True,
name="credentials",
value="",
suffixes=[".json"],
file_types=["json"],
)
)
@staticmethod
def format_vertex_field(field: TemplateField, name: str):
if "VertexAI" in name:
advanced_fields = [
"tuned_model_name",
"verbose",
"top_p",
"top_k",
"max_output_tokens",
]
if field.name in advanced_fields:
field.advanced = True
show_fields = [
"tuned_model_name",
"verbose",
"project",
"location",
"credentials",
"max_output_tokens",
"model_name",
"temperature",
"top_p",
"top_k",
]
if field.name in show_fields:
field.show = True
@staticmethod
def format_openai_field(field: TemplateField):
if "openai" in field.name.lower():
field.display_name = (
field.name.title().replace("Openai", "OpenAI").replace("_", " ")
).replace("Api", "API")
if "key" not in field.name.lower() and "token" not in field.name.lower():
field.password = False
if field.name == "openai_api_base":
field.info = OPENAI_API_BASE_INFO
def add_extra_base_classes(self) -> None:
if "BaseLLM" not in self.base_classes:
self.base_classes.append("BaseLLM")
@staticmethod
def format_azure_field(field: TemplateField):
if field.name == "model_name":
field.show = False # Azure uses deployment_name instead of model_name.
elif field.name == "openai_api_type":
field.show = False
field.password = False
field.value = "azure"
elif field.name == "openai_api_version":
field.password = False
@staticmethod
def format_llama_field(field: TemplateField):
field.show = True
field.advanced = not field.required
@staticmethod
def format_ctransformers_field(field: TemplateField):
if field.name == "config":
field.show = True
field.advanced = True
field.value = orjson_dumps(CTRANSFORMERS_DEFAULT_CONFIG, indent_2=True)
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
display_names_dict = {
"huggingfacehub_api_token": "HuggingFace Hub API Token",
}
FrontendNode.format_field(field, name)
LLMFrontendNode.format_openai_field(field)
LLMFrontendNode.format_ctransformers_field(field)
if name and "azure" in name.lower():
LLMFrontendNode.format_azure_field(field)
if name and "llama" in name.lower():
LLMFrontendNode.format_llama_field(field)
if name and "vertex" in name.lower():
LLMFrontendNode.format_vertex_field(field, name)
SHOW_FIELDS = ["repo_id"]
if field.name in SHOW_FIELDS:
field.show = True
if "api" in field.name and (
"key" in field.name
or ("token" in field.name and "tokens" not in field.name)
):
field.password = True
field.show = True
# Required should be False to support
# loading the API key from environment variables
field.required = False
field.advanced = False
if field.name == "task":
field.required = True
field.show = True
field.is_list = True
field.options = ["text-generation", "text2text-generation", "summarization"]
field.value = field.options[0]
field.advanced = True
if display_name := display_names_dict.get(field.name):
field.display_name = display_name
if field.name == "model_kwargs":
field.field_type = "code"
field.advanced = True
field.show = True
elif field.name in [
"model_name",
"temperature",
"model_file",
"model_type",
"deployment_name",
"credentials",
]:
field.advanced = False
field.show = True
if field.name == "credentials":
field.field_type = "file"
if name == "VertexAI" and field.name not in [
"callbacks",
"client",
"stop",
"tags",
"cache",
]:
field.show = True
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~components~utilities~PostRequest.py | from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class PostRequest(CustomComponent):
display_name: str = "POST Request"
description: str = "Make a POST request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#post-request"
beta = True
field_config = {
"url": {"display_name": "URL", "info": "The URL to make the request to."},
"headers": {
"display_name": "Headers",
"field_type": "code",
"info": "The headers to send with the request.",
},
"code": {"show": False},
"document": {"display_name": "Document"},
}
def post_document(
self,
session: requests.Session,
document: Document,
url: str,
headers: Optional[dict] = None,
) -> Document:
try:
response = session.post(url, headers=headers, data=document.page_content)
try:
response_json = response.json()
result = orjson_dumps(response_json, indent_2=False)
except Exception:
result = response.text
self.repr_value = result
return Document(
page_content=result,
metadata={
"source": url,
"headers": headers,
"status_code": response,
},
)
except Exception as exc:
return Document(
page_content=str(exc),
metadata={
"source": url,
"headers": headers,
"status_code": 500,
},
)
def build(
self,
document: Document,
url: str,
headers: Optional[dict] = None,
) -> list[Document]:
if headers is None:
headers = {}
if not isinstance(document, list) and isinstance(document, Document):
documents: list[Document] = [document]
elif isinstance(document, list) and all(
isinstance(doc, Document) for doc in document
):
documents = document
else:
raise ValueError("document must be a Document or a list of Documents")
with requests.Session() as session:
documents = [
self.post_document(session, doc, url, headers) for doc in documents
]
self.repr_value = documents
return documents
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~components~utilities~JSONDocumentBuilder.py | ### JSON Document Builder
# Build a Document containing a JSON object using a key and another Document page content.
# **Params**
# - **Key:** The key to use for the JSON object.
# - **Document:** The Document page to use for the JSON object.
# **Output**
# - **Document:** The Document containing the JSON object.
from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
class JSONDocumentBuilder(CustomComponent):
display_name: str = "JSON Document Builder"
description: str = "Build a Document containing a JSON object using a key and another Document page content."
output_types: list[str] = ["Document"]
beta = True
documentation: str = (
"https://docs.langflow.org/components/utilities#json-document-builder"
)
field_config = {
"key": {"display_name": "Key"},
"document": {"display_name": "Document"},
}
def build(
self,
key: str,
document: Document,
) -> Document:
documents = None
if isinstance(document, list):
documents = [
Document(
page_content=orjson_dumps({key: doc.page_content}, indent_2=False)
)
for doc in document
]
elif isinstance(document, Document):
documents = Document(
page_content=orjson_dumps({key: document.page_content}, indent_2=False)
)
else:
raise TypeError(
f"Expected Document or list of Documents, got {type(document)}"
)
self.repr_value = documents
return documents
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~components~utilities~UpdateRequest.py | from typing import List, Optional
import requests
from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
class UpdateRequest(CustomComponent):
display_name: str = "Update Request"
description: str = "Make a PATCH request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#update-request"
beta = True
field_config = {
"url": {"display_name": "URL", "info": "The URL to make the request to."},
"headers": {
"display_name": "Headers",
"field_type": "code",
"info": "The headers to send with the request.",
},
"code": {"show": False},
"document": {"display_name": "Document"},
"method": {
"display_name": "Method",
"field_type": "str",
"info": "The HTTP method to use.",
"options": ["PATCH", "PUT"],
"value": "PATCH",
},
}
def update_document(
self,
session: requests.Session,
document: Document,
url: str,
headers: Optional[dict] = None,
method: str = "PATCH",
) -> Document:
try:
if method == "PATCH":
response = session.patch(
url, headers=headers, data=document.page_content
)
elif method == "PUT":
response = session.put(url, headers=headers, data=document.page_content)
else:
raise ValueError(f"Unsupported method: {method}")
try:
response_json = response.json()
result = orjson_dumps(response_json, indent_2=False)
except Exception:
result = response.text
self.repr_value = result
return Document(
page_content=result,
metadata={
"source": url,
"headers": headers,
"status_code": response.status_code,
},
)
except Exception as exc:
return Document(
page_content=str(exc),
metadata={"source": url, "headers": headers, "status_code": 500},
)
def build(
self,
method: str,
document: Document,
url: str,
headers: Optional[dict] = None,
) -> List[Document]:
if headers is None:
headers = {}
if not isinstance(document, list) and isinstance(document, Document):
documents: list[Document] = [document]
elif isinstance(document, list) and all(
isinstance(doc, Document) for doc in document
):
documents = document
else:
raise ValueError("document must be a Document or a list of Documents")
with requests.Session() as session:
documents = [
self.update_document(session, doc, url, headers, method)
for doc in documents
]
self.repr_value = documents
return documents
| [] |
2024-01-10 | ktynski/langflow | src~backend~langflow~components~utilities~GetRequest.py | from langflow import CustomComponent
from langchain.schema import Document
from langflow.services.database.models.base import orjson_dumps
import requests
from typing import Optional
class GetRequest(CustomComponent):
display_name: str = "GET Request"
description: str = "Make a GET request to the given URL."
output_types: list[str] = ["Document"]
documentation: str = "https://docs.langflow.org/components/utilities#get-request"
beta = True
field_config = {
"url": {
"display_name": "URL",
"info": "The URL to make the request to",
"is_list": True,
},
"headers": {
"display_name": "Headers",
"field_type": "code",
"info": "The headers to send with the request.",
},
"code": {"show": False},
"timeout": {
"display_name": "Timeout",
"field_type": "int",
"info": "The timeout to use for the request.",
"value": 5,
},
}
def get_document(
self, session: requests.Session, url: str, headers: Optional[dict], timeout: int
) -> Document:
try:
response = session.get(url, headers=headers, timeout=int(timeout))
try:
response_json = response.json()
result = orjson_dumps(response_json, indent_2=False)
except Exception:
result = response.text
self.repr_value = result
return Document(
page_content=result,
metadata={
"source": url,
"headers": headers,
"status_code": response.status_code,
},
)
except requests.Timeout:
return Document(
page_content="Request Timed Out",
metadata={"source": url, "headers": headers, "status_code": 408},
)
except Exception as exc:
return Document(
page_content=str(exc),
metadata={"source": url, "headers": headers, "status_code": 500},
)
def build(
self,
url: str,
headers: Optional[dict] = None,
timeout: int = 5,
) -> list[Document]:
if headers is None:
headers = {}
urls = url if isinstance(url, list) else [url]
with requests.Session() as session:
documents = [self.get_document(session, u, headers, timeout) for u in urls]
self.repr_value = documents
return documents
| [] |
2024-01-10 | jbrumwell/mentat | mentat~llm_api.py | import logging
import os
import sys
from dataclasses import dataclass
from typing import Generator
import openai
import tiktoken
from dotenv import load_dotenv
from termcolor import cprint
from .config_manager import mentat_dir_path, user_config_path
from .errors import MentatError, UserError
package_name = __name__.split(".")[0]
# Check for .env file or already exported API key
# If no api key found, raise an error
def setup_api_key():
if not load_dotenv(os.path.join(mentat_dir_path, ".env")):
load_dotenv()
key = os.getenv("OPENAI_API_KEY")
try:
openai.api_key = key
openai.Model.list() # Test the API key
except openai.error.AuthenticationError:
raise UserError(
"No valid OpenAI api key detected.\nEither place your key into a .env"
" file or export it as an environment variable."
)
async def call_llm_api(messages: list[dict[str, str]], model) -> Generator:
if (
"PYTEST_CURRENT_TEST" in os.environ
and "--benchmark" not in sys.argv
and os.getenv("MENTAT_BENCHMARKS_RUNNING") == "false"
):
logging.critical("OpenAI call attempted in non benchmark test environment!")
raise MentatError("OpenAI call attempted in non benchmark test environment!")
response = await openai.ChatCompletion.acreate(
model=model,
messages=messages,
temperature=0.5,
stream=True,
)
return response
def count_tokens(message: str) -> int:
return len(
tiktoken.encoding_for_model("gpt-4").encode(message, disallowed_special=())
)
def check_model_availability(allow_32k: bool) -> bool:
available_models = [x["id"] for x in openai.Model.list()["data"]]
if allow_32k:
# check if user has access to gpt-4-32k
if "gpt-4-32k-0314" not in available_models:
cprint(
"You set ALLOW_32K to true, but your OpenAI API key doesn't"
" have access to gpt-4-32k-0314. To remove this warning, set"
" ALLOW_32K to false until you have access.",
"yellow",
)
allow_32k = False
if not allow_32k:
# check if user has access to gpt-4
if "gpt-4-0314" not in available_models:
raise UserError(
"Sorry, but your OpenAI API key doesn't have access to gpt-4-0314,"
" which is currently required to run Mentat."
)
return allow_32k
def choose_model(messages: list[dict[str, str]], allow_32k) -> str:
prompt_token_count = 0
for message in messages:
prompt_token_count += count_tokens(message["content"])
cprint(f"\nTotal token count: {prompt_token_count}", "cyan")
model = "gpt-4-0314"
token_buffer = 500
if prompt_token_count > 8192 - token_buffer:
if allow_32k:
model = "gpt-4-32k-0314"
if prompt_token_count > 32768 - token_buffer:
cprint(
"Warning: gpt-4-32k-0314 has a token limit of 32768. Attempting"
" to run anyway:"
)
else:
cprint(
"Warning: gpt-4-0314 has a maximum context length of 8192 tokens."
" If you have access to gpt-4-32k-0314, set allow-32k to `true` in"
f" `{user_config_path}` to use"
" it. Attempting to run with gpt-4-0314:",
"yellow",
)
return model, prompt_token_count
@dataclass
class CostTracker:
total_cost: int = 0
def display_api_call_stats(
self,
num_prompt_tokens: int,
num_sampled_tokens: int,
model: str,
call_time: float,
) -> None:
cost_per_1000_tokens = {
"gpt-4-0314": (0.03, 0.06),
"gpt-4-32k-0314": (0.06, 0.12),
}
prompt_cost = (num_prompt_tokens / 1000) * cost_per_1000_tokens[model][0]
sampled_cost = (num_sampled_tokens / 1000) * cost_per_1000_tokens[model][1]
tokens_per_second = num_sampled_tokens / call_time
call_cost = prompt_cost + sampled_cost
speed_and_cost_string = (
f"Speed: {tokens_per_second:.2f} tkns/s | Cost: ${call_cost:.2f}"
)
cprint(speed_and_cost_string, "cyan")
costs_logger = logging.getLogger("costs")
costs_logger.info(speed_and_cost_string)
self.total_cost += call_cost
def display_total_cost(self) -> None:
cprint(f"\nTotal session cost: ${self.total_cost:.2f}", color="light_blue")
| [
"0",
"content"
] |
2024-01-10 | New-KT/New-KT_AI | crawling~news_summary.py | import os
import openai
import pandas as pd
import json
from dotenv import load_dotenv
import tiktoken
# Load environment variables from the file
load_dotenv()
#ํ ํฐ ์ ๊ณ์ฐ ํจ์
def encoding_getter(encoding_type: str):
return tiktoken.encoding_for_model(encoding_type)
def tokenizer(string: str, encoding_type: str) -> list:
encoding = encoding_getter(encoding_type)
#print (encoding)
tokens = encoding.encode(string)
return tokens
def token_counter(string: str, encoding_type: str) -> int:
num_tokens = len(tokenizer(string, encoding_type))
return num_tokens
def read_concatenate_news(file_path, max_tokens=3000):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
num_tokens = token_counter(concatenated_text, "gpt-3.5-turbo")
print("ํ ํฐ ์: " + str(num_tokens))
if num_tokens >= max_tokens:
tokens = tokenizer(concatenated_text, "gpt-3.5-turbo")
concatenated_text = encoding_getter("gpt-3.5-turbo").decode(tokens[:max_tokens])
return concatenated_text
else:
return concatenated_text
# def summarize_news(file_path):
# query = read_concatenate_news(file_path)
# GPT_MODEL = "text-davinci-003" # ์
๋ฐ์ดํธ๋ ์์ง ์ด๋ฆ ์ฌ์ฉ
# response = openai.Completion.create(
# engine=GPT_MODEL,
# prompt=f"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด {query}",
# temperature=0.7,
# max_tokens=150
# )
# response_message = response.choices[0].text.strip()
# return response_message
def summarize_news(file_path):
query = read_concatenate_news(file_path)
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด {query}"}]
)
return response.choices[0].message.content.strip()
# def summarize_news(file_path):
# query = read_concatenate_news(file_path)
# GPT_MODEL = "gpt-3.5-turbo"
# messages = [
# {"role": "system", "content": "You're the best summarizer. You have to show the right summary of the news. ๋ชจ๋ ๋๋ต์ ํ๊ธ๋ก."},
# {"role": "user", "content": f"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด {query}"}
# ]
# response = openai.ChatCompletion.create(
# model=GPT_MODEL,
# messages=messages,
# temperature=0
# )
# response_message = response.choices[0].message.content
# return response_message
def save_to_json(result, srcText, node, output_file=None):
if output_file is None:
output_file = f'{srcText}_{node}_summary_result.json'
with open(output_file, 'w', encoding='utf-8') as json_file:
json.dump({'summary': result}, json_file, ensure_ascii=False, indent=4)
print(f"๊ฒฐ๊ณผ๊ฐ {output_file}์ ์ ์ฅ๋์์ต๋๋ค.")
# def main():
# file_path = '๊ณ ๊ตฌ๋ง_naver_news_texts.txt'
# result = summarize_news(file_path)
# save_to_json(result)
# if __name__ == '__main__':
# openai.api_key = os.environ.get("OPENAI_API_KEY")
# main()
| [
"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | meeting_summary~meetsum.py | import os
from openai import OpenAI
from dotenv import load_dotenv
import pandas as pd
import json
def summary_meeting(file_path):
# Set up OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best summarizer for meetings. Summarize the entire content of the meeting efficiently."},
{"role": "user", "content": f"ํ์ ์ ์ฒด ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ํ์ ๋ด์ฉ์ ์์ฝํด์ค. ํ์ ์ ๋ชฉ, ์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ, ์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ, ์ถ๊ฐ ์๊ฑด ๋ฑ ํ์๋ก ์์ฑํด์ค . {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
def mts():
load_dotenv()
file_path = r"meeting.txt"
file_path= read_concatenate_news(file_path)
# Call the function and print the result
result = summary_meeting(file_path)
if result is not None:
result_dict = parse_meeting_result(result)
result_json = json.dumps(result_dict, ensure_ascii=False, indent=2)
print(result_json)
result_file='result.json'
with open(result_file, 'w', encoding='utf-8') as f:
json.dump(result_json, f, ensure_ascii=False, indent=2)
def parse_meeting_result(result_text):
result_dict = {
"ํ์ ์ ๋ชฉ": "",
"์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ": "",
"์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ": "",
"์ถ๊ฐ ์๊ฑด":""
}
current_key = None
# Split the result text into sections based on newlines
lines = result_text.strip().split('\n')
for line in lines:
# Check if the line contains a colon, indicating a key-value pair
if ':' in line:
# Split the line into key and value
key, value = map(str.strip, line.split(':', 1))
current_key = key
if key in result_dict:
result_dict[key] = value
elif current_key:
# If there is a current key, append the line to its value
result_dict[current_key] += ' ' + line
return result_dict
# import json
# def parse_meeting_result(result_text):
# result_dict = {}
# # Split the result text into sections based on newlines
# sections = result_text.strip().split('\n\n')
# # Iterate through each section and extract key-value pairs
# for section in sections:
# lines = section.strip().split('\n')
# key = lines[0].strip(':')
# value = ' '.join(lines[1:]).strip()
# result_dict[key] = value
# return result_dict
if __name__ == "__main__":
mts()
| [
"ํ์ ์ ์ฒด ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ํ์ ๋ด์ฉ์ ์์ฝํด์ค. ํ์ ์ ๋ชฉ, ์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ, ์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ, ์ถ๊ฐ ์๊ฑด ๋ฑ ํ์๋ก ์์ฑํด์ค . meeting.txt",
"You are the best summarizer for meetings. Summarize the entire content of the meeting efficiently."
] |
2024-01-10 | New-KT/New-KT_AI | stt_key_crawl~extract.py | import os
import openai
from dotenv import load_dotenv
import pandas as pd
def extract_keywords_from_meeting(file_path):
# Set up OpenAI client
load_dotenv()
client = openai.OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean."},
{"role": "user", "content": f"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
| [
"You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean.",
"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | stt_keyword~extract.py | import os
from openai import OpenAI
from dotenv import load_dotenv
import pandas as pd
def extract_keywords_from_meeting(file_path):
# Set up OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean."},
{"role": "user", "content": f"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋๋ง. {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
| [
"You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean.",
"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋๋ง. PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | stt_keyword_final~extract.py | import os
from openai import OpenAI
from dotenv import load_dotenv
import pandas as pd
def extract_keywords_from_meeting(file_path):
# Set up OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean."},
{"role": "user", "content": f"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
| [
"You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean.",
"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | stt_keywordlist~extract.py | import os
from openai import OpenAI
from dotenv import load_dotenv
import pandas as pd
def extract_keywords_from_meeting(file_path):
# Set up OpenAI client
load_dotenv()
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean."},
{"role": "user", "content": f"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
| [
"You are the best keyword extractor. You need to extract keywords from the meeting content. All responses should be in Korean.",
"ํ์ ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ๋๊ฐ ์๊ฐํ๊ธฐ์ ํ
์คํธ์์ ์ฃผ์ ๋ผ๊ณ ์๊ฐ๋๋ ํค์๋ 3๊ฐ๋ง ์ถ์ถํด์ค. ๋ค๋ฅธ ์ฌ๋ด์์ด ์ค์ง ํค์๋ ์ธ๊ฐ๋ง. PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | stt_key_crawl_meetsum~meetsum.py | import os
from openai import OpenAI
from dotenv import load_dotenv
import pandas as pd
import json
def summary_meeting(file_path):
# Set up OpenAI client
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# Define system and user messages
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You are the best summarizer for meetings. Summarize the entire content of the meeting efficiently. All responses should be in Korean."},
{"role": "user", "content": f"ํ์ ์ ์ฒด ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ํ์ ๋ด์ฉ์ ์์ฝํด์ค. ํ์ ์ ๋ชฉ, ์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ, ์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ, ์ถ๊ฐ ์๊ฑด ๋ฑ์ด ๋ฌด์กฐ๊ฑด ํฌํจ๋ ํ์๋ก ์์ฑํด์ค . {file_path}"}
]
# Make API request using the content from the text file
response = client.chat.completions.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
# Extract and return the generated response
response_message = response.choices[0].message.content
return response_message
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
def mts(output_file_path):
load_dotenv()
file_path= read_concatenate_news(output_file_path)
# Call the function and print the result
result = summary_meeting(file_path)
if result is not None:
result_dict = parse_meeting_result(result)
result_json = json.dumps(result_dict, ensure_ascii=False, indent=2)
print(result_json)
result_file='result.json'
with open(result_file, 'w', encoding='utf-8') as f:
json.dump(result_json, f, ensure_ascii=False, indent=2)
def parse_meeting_result(result_text):
result_dict = {
"ํ์ ์ ๋ชฉ": "",
"์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ": "",
"์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ": "",
"์ถ๊ฐ ์๊ฑด":""
}
current_key = None
# Split the result text into sections based on newlines
lines = result_text.strip().split('\n')
for line in lines:
# Check if the line contains a colon, indicating a key-value pair
if ':' in line:
# Split the line into key and value
key, value = map(str.strip, line.split(':', 1))
current_key = key
if key in result_dict:
result_dict[key] = value
elif current_key:
# If there is a current key, append the line to its value
result_dict[current_key] += ' ' + line
return result_dict
if __name__ == "__main__":
mts()
| [
"You are the best summarizer for meetings. Summarize the entire content of the meeting efficiently. All responses should be in Korean.",
"ํ์ ์ ์ฒด ๋ด์ฉ ํ
์คํธํ์ผ์ด์ผ. ํ์ ๋ด์ฉ์ ์์ฝํด์ค. ํ์ ์ ๋ชฉ, ์ฃผ์ ์ด์ ๋ฐ ์งํ์ํฉ, ์๋ก์ด ์ํฉ ๋ฐ ๊ณต์ง์ฌํญ, ์ถ๊ฐ ์๊ฑด ๋ฑ์ด ๋ฌด์กฐ๊ฑด ํฌํจ๋ ํ์๋ก ์์ฑํด์ค . PLACEHOLDER"
] |
2024-01-10 | New-KT/New-KT_AI | stt_key_crawl~news_summary.py | import os
import openai
import pandas as pd
import json
from dotenv import load_dotenv
# Load environment variables from the file
load_dotenv()
def read_concatenate_news(file_path):
news = pd.read_csv(file_path, delimiter='\t', header=None, names=['text'])
concatenated_text = news['text'].str.cat(sep=' ')
return concatenated_text
def summarize_news(file_path):
query = read_concatenate_news(file_path)
GPT_MODEL = "gpt-3.5-turbo"
messages = [
{"role": "system", "content": "You're the best summarizer. You have to show the right summary of the news. ๋ชจ๋ ๋๋ต์ ํ๊ธ๋ก."},
{"role": "user", "content": f"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด {query}"}
]
response = openai.ChatCompletion.create(
model=GPT_MODEL,
messages=messages,
temperature=0
)
response_message = response.choices[0].message.content
return response_message
def save_to_json(result, srcText, node, output_file=None):
if output_file is None:
output_file = f'{srcText}_{node}_summary_result.json'
with open(output_file, 'w', encoding='utf-8') as json_file:
json.dump({'summary': result}, json_file, ensure_ascii=False, indent=4)
print(f"๊ฒฐ๊ณผ๊ฐ {output_file}์ ์ ์ฅ๋์์ต๋๋ค.")
# def save_to_json(result, output_file='summary_result.json'):
# with open(output_file, 'w', encoding='utf-8') as json_file:
# json.dump({'summary': result}, json_file, ensure_ascii=False, indent=4)
# print(f"๊ฒฐ๊ณผ๊ฐ {output_file}์ ์ ์ฅ๋์์ต๋๋ค.")
def main():
file_path = '์ค๋๋ ์จ_naver_news_texts.txt'
result = summarize_news(file_path)
save_to_json(result)
if __name__ == '__main__':
openai.api_key = os.environ.get("OPENAI_API_KEY")
main()
| [
"๋ด์ค์ ๋ํ ๊ฒฐ๊ณผ์ผ. ์์ฝ์ค๋ช
ํด PLACEHOLDER",
"You're the best summarizer. You have to show the right summary of the news. ๋ชจ๋ ๋๋ต์ ํ๊ธ๋ก."
] |
2024-01-10 | chriscarrollsmith/pdftoprompt | pdftoprompt~compressor.py | import os
import pypdf
import pytesseract
from pdf2image import convert_from_path
import openai
from dotenv import load_dotenv
from typing import Optional
from tempfile import NamedTemporaryFile
import requests
from urllib.parse import urlparse
import re
def is_url(path):
try:
result = urlparse(path)
return all([result.scheme, result.netloc])
except ValueError:
return False
def download_file(url):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with NamedTemporaryFile(delete=False) as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return f.name
def set_openai_api_key(api_key: Optional[str] = None) -> None:
if api_key is None:
load_dotenv()
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
raise ValueError(
"User must supply an api_key argument or set an "
"OPENAI_API_KEY in the .env file for the current "
"environment"
)
elif not isinstance(api_key, str):
raise TypeError("api_key must be a string")
os.environ['OPENAI_API_KEY'] = api_key
# Check if the operation was successful
if not os.environ.get('OPENAI_API_KEY'):
raise ValueError("Failed to set OPENAI_API_KEY environment variable")
def extract_text_from_pdf(file_path, use_ocr=False):
if use_ocr:
return extract_text_with_ocr(file_path)
else:
return extract_text_without_ocr(file_path)
def extract_text_without_ocr(file_path):
with open(file_path, "rb") as pdf_file:
pdf_reader = pypdf.PdfReader(pdf_file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
def extract_text_with_ocr(file_path):
images = convert_from_path(file_path)
text = ""
for img in images:
text += pytesseract.image_to_string(img)
return text
def calculate_compression_factor(text):
tokens = len(text) // 4
factor = tokens / 3500
return factor
def chunk_text(text, max_tokens=3500):
text = text.replace('\n', ' ')
chunk_length = max_tokens * 4
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
chunks = []
current_chunk = ""
for sentence in sentences:
if len(current_chunk) + len(sentence) + 1 <= chunk_length:
current_chunk += sentence
if not current_chunk.endswith(('.', '?', '!')):
current_chunk += ' '
else:
if current_chunk:
chunks.append(current_chunk.strip())
if len(sentence) > chunk_length:
start = 0
while start < len(sentence):
end = min(start + chunk_length, len(sentence))
chunks.append(sentence[start:end].strip())
start = end
else:
current_chunk = sentence + ' '
if current_chunk:
chunks.append(current_chunk.strip())
return chunks
def compress_with_gpt4(chunk_list, factor):
openai.api_key = os.getenv('OPENAI_API_KEY')
compressed_text = ""
for chunk in chunk_list:
message = (
f"compress the following text by a factor of {factor} in a"
"way that is lossless but results in the minimum number of"
"tokens which could be fed into an LLM like yourself as-is"
"and produce the same output. feel free to use multiple"
"languages, symbols, other up-front priming to lay down"
"rules. this is entirely for yourself to recover and"
"proceed from with the same conceptual priming, not for"
"humans to decompress: "
) + chunk
prompt = [{"role": "user", "content": message}]
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=2048,
temperature=0.7,
messages=prompt)
compressed_chunk = response.choices[0].message['content']
compressed_text += compressed_chunk
return compressed_text
def compress_pdf(file_path, use_ocr=False):
if is_url(file_path):
try:
temp_file_path = download_file(file_path)
text = extract_text_from_pdf(temp_file_path, use_ocr)
os.unlink(temp_file_path)
except Exception as e:
if os.path.exists(temp_file_path):
os.unlink(temp_file_path)
raise e
else:
text = extract_text_from_pdf(file_path, use_ocr)
factor = calculate_compression_factor(text)
chunk_list = chunk_text(text)
compressed_text = compress_with_gpt4(chunk_list, factor)
return compressed_text
def main():
file_path = input("Enter PDF file path: ")
use_ocr = input("Use OCR? (y/n): ").lower() == "y"
compressed_text = compressor.compress_pdf(file_path, use_ocr)
print("\nCompressed Text:")
print(compressed_text)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | vempatisaivishal/GlobalTruthHub | titlecontext.py | from word2bert import Word2Bert
import openai
API_KEY = "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"
openai.api_key = API_KEY
class titleContext:
def __init__(self, headline, context):
self.headline = headline
self.context = context
self.w2vSim = 0
self.bertSim = 0
def check_similarity(self):
self.w2vSim, self.bertSim = Word2Bert(self.headline, self.context).run()
return self.w2vSim, self.bertSim
def check_similarity2(self):
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"i have two sentences \nsentence1 = {self.headline} \nsentence2 = {self.context} \n dont consider additional information, is the second statement true based on first statement? yes or no, why",
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response_text = response.to_dict()['choices'][0]['text'].replace("\n", "")
print(response_text)
final_response = response_text[:4]
if final_response.lower().find('yes') != -1:
return "YES"
else:
return "NO"
def run(self):
# print(self.check_similarity())
print(self.check_similarity2())
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.