date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Matthewsecond/WebScraping | Controller~Process_Import_Data~OtherFunctions~Used~Geolocation~GeoProcessing.py | import os
from geopy.geocoders import Nominatim
import pandas as pd
import re
from unidecode import unidecode
import openai
import sqlalchemy
openai.api_key = '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'
app = Nominatim(user_agent="tutorial")
engine = sqlalchemy.create_engine(
'mysql+pymysql://admin:N6zmVKVW@jobs-intelligence-slovakia.'
'cluster-c0rbbiliflyo.eu-central-1.rds.amazonaws.com:9906/General_Intelligence')
conn = engine.connect()
def remove_numbers(string):
if type(string) == str:
return re.sub(r'\d+', '', string)
def get_kraj(address):
if address is not None:
try:
location = get_location_by_address(fr'Slovensko, {address}')['display_name']
return extract_kraj(location)
except Exception as e:
return None
else:
return None
def delete_space(string):
#delete \n from string
if type(string) == str:
return string.replace(", ", "")
def extract_kraj(s):
match = re.search(r'(\w+\skraj)', s)
if match:
return match.group(1)
else:
return None
def get_location_by_address(address):
"""This function returns a location as raw from an address
will repeat until success"""
time.sleep(1)
try:
return app.geocode(address).raw
except:
return {'lat': None, 'lon': None}
#get longitude and latitude
def get_longitude(address):
"""This function returns a tuple of (latitude, longitude) from an address"""
location = get_location_by_address(address)
if location['lon'] is None:
return None
return location['lon']
#get longitude and latitude
def get_latitude(address):
"""This function returns a tuple of (latitude, longitude) from an address"""
location = get_location_by_address(address)
if location['lat'] is None:
return None
return location['lat']
#function that finds string inside another string and replaces it with another string
def find_and_replace(string, substrings):
if type(string) == str:
try:
string = unidecode(string)
except Exception as e:
pass
for substring in substrings:
if string.find(substring) != -1:
return substring
elif string == 'NaN':
return 'NaN'
return string
def remove_string_from_list(string, string_list):
unidecode(string)
for item in string_list:
unidecode(item)
if item in string:
return None
else:
return string
def get_companies_slovakia():
try:
query = sqlalchemy.text('SELECT * FROM `Companies_Slovakia_Processed`')
# Read data from the query
dataframe_companies = pd.read_sql_query(query, conn)
return dataframe_companies
except Exception as e:
print(e)
def get_sk_WebCrawlResults():
try:
query = sqlalchemy.text('SELECT * FROM `SK_WebCrawlResults`')
# Read data from the query
dataframe = pd.read_sql_query(query, conn)
return dataframe
except Exception as e:
print(e)
def get_Cities_Processed():
try:
query = sqlalchemy.text('SELECT * FROM `Cities_Processed`')
# Read data from the query
dataframe = pd.read_sql_query(query, conn)
return dataframe
except Exception as e:
print(e)
#function to drop table
def drop_table(table_name):
try:
query = sqlalchemy.text(f'DROP TABLE {table_name}')
conn.execute(query)
except Exception as e:
print(e)
def split_word_by_comma(word):
if word is not None and 'Praca vyzaduje cestovanie' in word:
return 'Traveling job'
elif word is not None:
return [x.strip() for x in word.split(',') and word.split('-') and word.split(', ')]
def get_zipcode(df, geolocator, lat_field, lon_field):
#df.apply(get_zipcode, axis=1, geolocator=geolocator, lat_field='Lat', lon_field='Lon')
location = geolocator.reverse((df[lat_field], df[lon_field]))
return location.raw['address']['postcode']
def get_pkl_with_latest_date():
#read all pickle files in directory
import glob
files = glob.glob(r'C:\Users\labus.INTERCONNECTION\Desktop\WebScrapingProject\Data\JobsIntelligence\Slovak\Unprocessed\*.pkl')
#get the latest date from the files
latest_date = max(files, key=os.path.getctime)
#return the file_name
return latest_date
def clean_locations(database_webcrawl_results, specific_location):
# read database
database_cities = get_Cities_Processed()
# place = 'Bratislava II, Bratislava, Slovakia (Job with occasional home office)'
# words = split_word_by_comma(place)
cities_list = database_cities[specific_location].to_list()
# cities_list = cities_list[:10]
# convert all characters in cities_list to ascii
cities_list = [unidecode(x) for x in cities_list]
# remove '-' from database_webcrawl_results['location'] only if it is not None
database_webcrawl_results['location'] = database_webcrawl_results['location'].apply(
lambda x: x.replace('-', '') if x is not None else x)
database_webcrawl_results['location'] = database_webcrawl_results['location'].apply(
lambda x: x.replace(' ,', ',') if x is not None else x)
# loop through webcrawl results
for index, row, in database_webcrawl_results.iterrows():
# delete numbers from row['location']
database_webcrawl_results.loc[index, 'location'] = remove_numbers(row['location'])
# remove space from row['location']
# database_webcrawl_results.loc[index, 'location'] = delete_space(row['location'])
words = split_word_by_comma(row['location'])
# loop through words and if one of the words is in database_cities['City'] then set the location to that city
if words is not None:
for word in words:
if word in cities_list:
# database_webcrawl_results.loc[index, 'location'] = word
database_webcrawl_results.loc[index, specific_location] = word
break
return database_webcrawl_results
#fill missing locations
def fill_locations(database_webcrawl_results):
# read database
database_cities = get_Cities_Processed()
# if column Region is not in database_webcrawl_results then create it and fill it with None
if 'Region' not in database_webcrawl_results.columns:
database_webcrawl_results['Region'] = None
# merge on Municipality only if City is None
mask1 = database_webcrawl_results['City'].isnull()
merged_df1 = pd.merge(database_webcrawl_results[mask1], database_cities, on='Municipality', how='left')
# delete columns City_x and Region_x
merged_df1.drop(['City_x', 'Region_x'], axis=1, inplace=True)
# rename columns City_y and Region_y to City and Region
merged_df1.rename(columns={'City_y': 'City', 'Region_y': 'Region'}, inplace=True)
#drop rows where Municipality is None
merged_df1 = merged_df1[merged_df1['Municipality'].notna()]
mask = database_webcrawl_results['Municipality'].isnull()
merged_df2 = pd.merge(database_webcrawl_results[mask], database_cities, on='City', how='left')
#delete columns Municipality_x and Region_x
merged_df2.drop(['Municipality_x', 'Region_x'], axis=1, inplace=True)
#delete rows where City is None
#merged_df2 = merged_df2[merged_df2['City'].notna()]
#rename columns Municipality_y and Region_y to Municipality and Region
merged_df2.rename(columns={'Municipality_y': 'Municipality', 'Region_y': 'Region'}, inplace=True)
#if region in database_webcrawl_results is filled but Municipality is None and City is None then fill Municipality and City with string unspecified
database_webcrawl_results.loc[(database_webcrawl_results['Region'].notna()) & (database_webcrawl_results['Municipality'].isnull()) & (database_webcrawl_results['City'].isnull()), ['Municipality', 'City']] = 'unspecified'
#keep only rows where Municipality and City are unspecified
database_webcrawl_results = database_webcrawl_results[(database_webcrawl_results['Municipality'] == 'unspecified') & (database_webcrawl_results['City'] == 'unspecified')]
#concatenate merged_df1 and merged_df2 with database_webcrawl_results
database_webcrawl_results_merged = pd.concat([merged_df1, merged_df2, database_webcrawl_results], ignore_index=True)
# fill missing values in latitude, longitude and Zipcode with unspecified
database_webcrawl_results_merged['latitude'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['longitude'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['ZIP_CODE'].fillna('unspecified', inplace=True)
#database_webcrawl_results_merged['work_type'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Municipality'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['City'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Region'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['salary'].fillna('unspecified', inplace=True)
return database_webcrawl_results_merged
def fill_locations_companies():
# read database
database_companies = get_companies_slovakia()
database_cities = get_Cities_Processed()
# merge on City
merged_df = pd.merge(database_companies, database_cities, on='City', how='left')
# drop table
drop_table('Companies_Slovakia_Processed')
# import to mysql Companies_Slovakia_Processed table
merged_df.to_sql(name='Companies_Slovakia_Processed', con=engine, if_exists='append', index=False)
def location_cleaning(dataframe_location):
for location in ['City', 'Municipality', 'Region']:
dataframe_location = clean_locations(dataframe_location,location)
return dataframe_location
def process_locations(dataframe):
dataframe = location_cleaning(dataframe)
return fill_locations(dataframe)
#main
if __name__ == '__main__':
process_locations() | [] |
2024-01-10 | JerryWesler/xiaowenz-daily | daily.py | import argparse
import os
import random
from openai import OpenAI
import pendulum
import requests
from dotenv import load_dotenv
from BingImageCreator import ImageGen
from quota import make_quota
from todoist import make_todoist
load_dotenv()
# required settings. config in github secrets
# -------------
# OpenAI: https://platform.openai.com/account/usage
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
# Telegram Bot Token
TG_BOT_TOKEN = os.environ['TG_BOT_TOKEN']
# Telegram Chat ID to want to send the message to
TG_CHAT_ID = os.environ['TG_CHAT_ID']
# Get Weather Information: https://github.com/baichengzhou/weather.api/blob/master/src/main/resources/citycode-2019-08-23.json to find the city code
# Shanghai 101020100
# Hangzhou 101210101 by default
WEATHER_CITY_CODE = os.environ.get('WEATHER_CITY_CODE', '101210101')
# -------------
# Optional Settings. config in github secrets.
# -------------
# 每日一句名人名言 - TIAN_API_KEY: https://www.tianapi.com/console/
# https://www.tianapi.com/console/
TIAN_API_KEY = os.environ.get('TIAN_API_KEY', '')
# Bing Cookie if image to be generated from Dalle3. Leave empty to use OpenAI by default
BING_COOKIE = os.environ.get('BING_COOKIE', '')
# 每日待办事项 todoist
TODOIST_API = os.environ.get('TODOIST_API', '')
# -------------
# Message list
MESSAGES = ['又到了新的一天了!']
# get today's weather
# city hard coded in API URL. You may change it based on city code list below
def make_weather(city_code):
print(f'Start making weather...')
WEATHER_API = f'http://t.weather.sojson.com/api/weather/city/{city_code}'
# https://github.com/baichengzhou/weather.api/blob/master/src/main/resources/citycode-2019-08-23.json to find the city code
DEFAULT_WEATHER = "未查询到天气,好可惜啊"
WEATHER_TEMPLATE = "今天是{date} {week},{city}的天气是{type},{high},{low},空气质量指数{aqi}"
try:
r = requests.get(WEATHER_API)
if r.ok:
weather = WEATHER_TEMPLATE.format(
date=r.json().get("data").get("forecast")[0].get("ymd"), week=r.json().get("data").get("forecast")[0].get("week"),
city=r.json().get("cityInfo").get("city"),
type=r.json().get("data").get("forecast")[0].get("type"), high=r.json().get("data").get("forecast")[0].get("high"),
low=r.json().get("data").get("forecast")[0].get("low"), aqi=r.json().get("data").get("forecast")[0].get("aqi")
)
return weather
return DEFAULT_WEATHER
except Exception as e:
print(type(e), e)
return DEFAULT_WEATHER
# get random poem
# return sentence(used for make pic) and poem(sentence with author and origin)
def get_poem():
SENTENCE_API = "https://v1.jinrishici.com/all"
DEFAULT_SENTENCE = "落日净残阳 雾水拈薄浪 "
DEFAULT_POEM = "落日净残阳,雾水拈薄浪。 —— Xiaowen.Z / 卜算子"
POEM_TEMPLATE = "{sentence} —— {author} / {origin}"
try:
r = requests.get(SENTENCE_API)
if r.ok:
sentence = r.json().get("content")
poem = POEM_TEMPLATE.format(
sentence=sentence, author=r.json().get("author"), origin=r.json().get("origin")
)
return sentence, poem
return DEFAULT_SENTENCE, DEFAULT_POEM
except Exception as e:
print(type(e), e)
return DEFAULT_SENTENCE, DEFAULT_POEM
# create pic
# return url, the image will not be save to local environment
def make_pic_from_openai(sentence):
"""
return the link formd
"""
# openai.api_key = OPENAI_API_KEY
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=OPENAI_API_KEY,
)
print(f'calling open ai for image creation...')
response = client.images.generate(
prompt=sentence, n=1, size="1024x1024", model="dall-e-3", style="vivid")
image_url = response.data[0].url
print(f'image_url:{image_url}')
print(f'image_revised_prompt: {response.data[0].revised_prompt}')
print(f'full response: {response}')
# s = requests.session()
# index = 0
# while os.path.exists(os.path.join(new_path, f"{index}.jpeg")):
# index += 1
# with s.get(image_url, stream=True) as response:
# # save response to file
# response.raise_for_status()
# with open(os.path.join(new_path, f"{index}.jpeg"), "wb") as output_file:
# for chunk in response.iter_content(chunk_size=8192):
# output_file.write(chunk)
return image_url, "Image Powered by OpenAI DELL.E-3"
# create pic from bing image generator
# once Dalle3 api is available, this might be retired.
def make_pic_from_bing(sentence, bing_cookie):
# for bing image when dall-e3 open drop this function
i = ImageGen(bing_cookie)
images = i.get_images(sentence)
return images, "Image Powered by Bing DALL.E-3"
# try Dalle-3 from Bing first, then OpenAI Image API
def make_pic(sentence):
if BING_COOKIE is not None and BING_COOKIE != '':
try:
images, image_comment = make_pic_from_bing(sentence, BING_COOKIE)
return images[0], image_comment
except Exception as e:
print(f'Image generated from Bing failed: {type(e)}')
print(type(e), e)
else:
print('Bing Cookie is not set. Use OpenAI to generate Image')
image_url, image_comment = make_pic_from_openai(sentence)
return image_url, image_comment
def make_poem():
print(f'Start making poem...')
sentence, poem = get_poem()
sentence_processed = sentence.replace(
",", " ").replace("。", " ").replace(".", " ")
print(f'Processed Sentence: {sentence_processed}')
image_url, image_comment = make_pic(sentence_processed)
poem_message = f'今日诗词和配图:{poem}\r\n\r\n{image_comment}'
return image_url, poem_message
# send message to telegram
# send image with caption if the image arg is not None
def send_tg_message(tg_bot_token, tg_chat_id, message, image=None):
print(f'Sending to Chat {tg_chat_id}')
if image is None:
try:
request_url = "https://api.telegram.org/bot{tg_bot_token}/sendMessage".format(
tg_bot_token=tg_bot_token)
request_data = {'chat_id': tg_chat_id, 'text': message}
response = requests.post(request_url, data=request_data)
return response.json()
except Exception as e:
print("Failed sending message to Telegram Bot.")
print(type(e), e)
return ""
else:
try:
photo_url = image
request_url = "https://api.telegram.org/bot{tg_bot_token}/sendPhoto".format(
tg_bot_token=tg_bot_token)
request_data = {'chat_id': tg_chat_id,
'photo': photo_url, 'caption': message}
response = requests.post(request_url, data=request_data)
return response.json()
except Exception as e:
print("Failed sending message to Telegram Bot with image.")
print(type(e), e)
return ""
# generate content from list of messages
def make_message(messages):
message = "\r\n---\r\n".join(messages)
return message
# generate content
# send to tg
def main():
print("Main started...")
# default process the poem, image and weather.
MESSAGES.append(make_weather(WEATHER_CITY_CODE))
image_url, poem_message = make_poem()
MESSAGES.append(poem_message)
# --------
# Optional process - Daily Quota
if TIAN_API_KEY is not None and TIAN_API_KEY != '':
MESSAGES.append(make_quota(TIAN_API_KEY))
# --------
# --------
# Optional process - 每日待办事项 todoist
if TODOIST_API is not None and TODOIST_API != '':
MESSAGES.append(make_todoist(TODOIST_API))
# --------
# Build full content and send to TG
full_message = make_message(MESSAGES)
print("Message constructed...")
print()
print("Sending to Telegram...")
r_json = send_tg_message(tg_bot_token=TG_BOT_TOKEN,
tg_chat_id=TG_CHAT_ID, message=full_message, image=image_url)
print(r_json)
if __name__ == "__main__":
main()
| [
"{sentence} —— {author} / {origin}",
"今天是{date} {week},{city}的天气是{type},{high},{low},空气质量指数{aqi}"
] |
2024-01-10 | robocup-eic/smach | smach_task_2023~task_gpsr_manual.py | # Flow of the GPSR task (General Purpose Service Robot)
"""
Main Goal: 3 commands requedted by the operator + 1 by non-expert
Focus:
- Task Planning
- Object/People Detectionm
- object Feature Recognition
- Object Manipulation
- 5 mins
0. Feature used
- ChatGPT
1. Robot moves from outside of Arena to the center of the arena(Instruction Point)
2. User gives the command (Wake Word) i.e.("Bring me a coke from the kitchen", "I want a coke, go to the kitchen and get me one")
3. Robot says "I am going to bring you a coke from the kitchen" (+100)
4. Robot goes to the kitchen and finds a coke (+400)
4. If fails, a custom message can be said (-50)
Examples commands from Rulebook:
Bring me a coke from the kitchen
I want a coke, go to the kitchen and get me one
"""
# run with conda env: nlp
import roslib
import rospy
import smach
import sys
import smach_ros
import nlp_client
from ratfin import *
import os
import openai
from person import Person
from utils import (WakeWord, Speak, GetIntent, GetName, GetObject, GetLocation,)
# Add the path to main repo folder to the environment
sys.path.append('/home/walkie/Robocup-2023-NLP')
from config import gpt3_5_turbo_key
# Task specific state
class ChatGPTQuery(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['out1', 'out0'],
input_keys=['prompt'],
output_keys=['chatgpt_response'])
self.messages = [{
"role": "system",
"content" : """You’re a kind helpful assistant robot,
respond back to me what my commands were but rephrase it like a assistant would
by accepting my request. don't ask a question back just do as I says. For example,
if I ask you to retrieve a coke. You should respond with something like "Certainly, grabing you a coke now" but make the sentence dynamic dont actually use the word certainly its too formal.
This is a role-play"""}]
openai.api_key = gpt3_5_turbo_key
def execute(self, userdata):
# Log the execution stage
rospy.loginfo(f'(ChatGPTQuery): Executing..')
# Copy the assistant mode prompt
messages = self.messages.copy()
prompt = userdata.prompt
# Log the execution stage
rospy.loginfo(f'(ChatGPTQuery): Prompt: {prompt}')
# Append to messages
messages.append({"role": "user", "content": prompt})
# packaged the messages and post request
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# extract the content of the post request
chat_response = completion.choices[0].message.content
# Log the execution stage
rospy.loginfo(f'(ChatGPTQuery): ChatGPT response: {chat_response}')
# # Speak the response from ChatGPT
nlp_client.speak(chat_response)
# Save the response to userdata
userdata.chatgpt_response = chat_response
return 'out1'
def main():
speak_debug = False
response_debug = False
rospy.init_node('smach_task_gpsr')
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['out0'])
# Declear Variables for top-state
sm.userdata.intent = ""
sm.userdata.stt_text = ""
sm.userdata.chatgpt_response = ""
with sm:
# Navigation from the outside of the arena to the center of the arena
# Engage Idle state with wake word
smach.StateMachine.add('IDLE_TASK_TRIGGER',
WakeWord(),
transitions={'out1': 'GET_INTENT',}
)
# Get the intent/task of the user
smach.StateMachine.add('GET_INTENT',
GetIntent(speak_debug=speak_debug,
response_debug=response_debug,
timeout=2),
transitions={'out1': 'GET_CHATGPT_QUERY',
'out0': 'out0'},
remapping={'listen_intent': 'intent',
'listen_text': 'stt_text'})
# use ChatGPT
smach.StateMachine.add('GET_CHATGPT_QUERY',
ChatGPTQuery(),
transitions={'out1': 'out0',
'out0': 'out0'},
remapping={'prompt': 'stt_text',
'chatgpt_response': 'chatgpt_response'})
# Execute SMACH plan
outcome = sm.execute()
if __name__ == '__main__':
main()
| [
"You’re a kind helpful assistant robot, \n respond back to me what my commands were but rephrase it like a assistant would \n by accepting my request. don't ask a question back just do as I says. For example, \n if I ask you to retrieve a coke. You should respond with something like \"Certainly, grabing you a coke now\" but make the sentence dynamic dont actually use the word certainly its too formal. \n This is a role-play"
] |
2024-01-10 | bxxd/langwave | workshop~history.py | import asyncio
import logging
import langwave
from langwave.memory import VolatileChatMemory, MixedChatMemory
from langwave.chains.wave import ChatWave
from langchain.memory import ConversationBufferMemory
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
MessagesPlaceholder,
ChatMessagePromptTemplate,
)
from langchain import OpenAI, LLMChain
log = logging.getLogger(__name__)
async def test_history():
log.info("test_history")
# memory = VolatileChatMemory()
llm = OpenAI(verbose=True, temperature=0.2)
wave = ChatWave.from_llm(llm)
log.info(f"wave: {wave} memory: {wave.memory}")
resp = await wave.acall("hi there!")
log.info(f"resp: {resp}")
async def test_history2():
log.info("test_history2")
memory = MixedChatMemory(memory_key="chat_history", return_messages=True)
chat_history = MessagesPlaceholder(variable_name="chat_history")
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
"The following is a friendly conversation between a human and an AI. The AI is talkative and "
"provides lots of specific details from its context. If the AI does not know the answer to a "
"question, it truthfully says it does not know."
),
chat_history,
HumanMessagePromptTemplate.from_template("{input}"),
]
)
llm = OpenAI(verbose=True, temperature=0.2)
llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory, verbose=True)
memory.chat_memory.add_user_message("Hi there!")
memory.chat_memory.add_ai_message("How are you?")
resp = await llm_chain.arun(input="who am i talking to?")
log.info(f"resp: {resp}")
log.info(f"memory: {llm_chain.memory}")
async def main():
log.info("hi there!")
await test_history()
if __name__ == "__main__":
asyncio.run(main())
| [
"The following is a friendly conversation between a human and an AI. The AI is talkative and ",
"{input}",
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"provides lots of specific details from its context. If the AI does not know the answer to a ",
"question, it truthfully says it does not know."
] |
2024-01-10 | bxxd/langwave | workshop~first.py | import logging
from langwave.memory.volatile import VolatileChatMemory
import asyncio
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts.chat import ChatPromptTemplate
from langwave.chains.wave import ChatWave
from langchain.chains import LLMChain
log = logging.getLogger(__name__)
def test_agent():
chat = ChatOpenAI()
resp = chat.predict("hi there")
log.info(f"chat({chat.model_name}): {resp}")
async def test_wave(args):
log.info(f"test_wave")
chat = ChatOpenAI(
streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0
)
memory = VolatileChatMemory()
prompt = ChatPromptTemplate()
llm = ChatOpenAI(temperature=0.2, verbose=False)
wave = ChatWave.from_llm(llm)
# llm = OpenAI(temperature=0)
# conversation = ConversationChain(
# llm=llm,
# verbose=True,
# memory=ChatMessageHistory()
# )
# resp = conversation.predict(input="Hi there!")
# log.info(f'conversation: {resp}')
# log.info(f'memory: {conversation.memory}')
async def test_conversation():
chat = ChatOpenAI(
streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0
)
history = ChatMessageHistory()
history.add_user_message("hi")
history.add_user_message("i am brian")
history.add_user_message("i am a human")
history.add_user_message("i love you")
history.add_user_message("what is my name?")
prompt = ChatPromptTemplate.from_template("{input}")
chain = LLMChain(llm=chat, prompt=prompt)
resp = await chat.apredict_messages(history.messages)
print("\n")
history.add_message(resp)
history.add_user_message("i just told you my name?")
resp = await chat.apredict_messages(history.messages)
log.info(f"resp: {resp}")
log.info(f"history messages: {history.messages}")
def test_memory():
memory = VolatileChatMemory()
log.info(f"memory: {memory}")
async def main(args):
log.info("Hello there!")
# test_memory()
await test_conversation()
# await test_wave(args)
import argparse
def parse_args():
parser = argparse.ArgumentParser()
return parser.parse_args()
if __name__ == "__main__":
asyncio.run(main(parse_args()))
| [
"{input}"
] |
2024-01-10 | bxxd/langwave | langwave~memory~mixed.py | from typing import Any, Dict, List
from types import SimpleNamespace
from langchain.schema import BaseChatMessageHistory, BaseMemory
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
from pydantic import BaseModel
from langchain.schema import (
BaseChatMessageHistory,
)
from langchain.schema.messages import BaseMessage
from langwave.memory import VolatileChatMemory, FewshotChatMemory
class MixedChatMemory(BaseChatMessageHistory, BaseModel):
"""Holder for multiple types of memory with a volatile memory for things that are dynamically added.
Fewshot does not change, but volatile does."""
""" use memories to hold other memory types, assuming they are all chat history"""
fewshot_memory: FewshotChatMemory = FewshotChatMemory()
_volatile_memory: VolatileChatMemory = VolatileChatMemory()
@property
def messages(self) -> List[BaseMessage]:
self.fewshot_memory + self._volatile_memory.messages
@messages.setter
def messages(self, value: List[BaseMessage]):
if not all(isinstance(m, BaseMessage) for m in value):
raise ValueError("All elements must be instances of BaseMessage")
self._volatile_memory.messages = value
def add_user_message(self, message: str) -> None:
self._volatile_memory.add_user_message(message)
def add_ai_message(self, message: str) -> None:
self._volatile_memory.add_ai_message(message)
def add_message(self, message: BaseMessage) -> None:
return self._volatile_memory.add_message(message)
def clear(self) -> None:
self._volatile_memory.clear()
| [] |
2024-01-10 | bxxd/langwave | workshop~console.py | import logging
import asyncio
from termcolor import colored
from langchain.chat_models import ChatOpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langwave.memory import VolatileChatMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chains import LLMChain
log = logging.getLogger(__name__)
async def streaming_console(args):
chat = ChatOpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
verbose=args.debug,
)
history = VolatileMemory()
user_input = args.initial
while True:
if user_input:
history.add_user_message(user_input)
resp = await chat.apredict_messages(history.messages)
print("\n")
# log.info(f"AI: {resp} and type is {type(resp)}")
history.add_message(resp)
user_input = input(colored(">>>: ", "green"))
if user_input == "exit":
break
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate,
)
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationChain
async def streaming_chain_console(args):
chat = ChatOpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
verbose=args.debug,
)
history = VolatileChatMemory()
user_input = args.initial
# human_message_prompt = HumanMessagePromptTemplate(
# prompt=PromptTemplate(
# template="{input}",
# input_variables=["input"],
# )
# )
# chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
# chain = LLMChain(llm=chat, prompt=chat_prompt_template, verbose=True)
chain = ConversationChain(llm=chat, verbose=True)
# chain = ConversationalRetrievalChain.from_llm(llm=chat, verbose=True)
while True:
if user_input:
history.add_user_message(user_input)
# resp = await chat.apredict_messages(history.messages)
resp = await chain.arun(user_input)
print("\n")
log.info(f"AI: {resp} and type is {type(resp)}")
history.add_message(resp)
user_input = input(colored(">>>: ", "green"))
if user_input == "exit":
break
async def main(args):
log.info("Hello there!")
# test_memory()
await streaming_chain_console(args)
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", "-d", action="store_true", help="Enable debug logging"
)
parser.add_argument(
"--initial",
"-i",
type=str,
default="",
help="Initial message to send to the chatbot",
)
return parser.parse_args()
if __name__ == "__main__":
asyncio.run(main(parse_args()))
| [] |
2024-01-10 | SSAFY507/Worldy | BE~crawling~news_quiz.py | import openai
import re
import config
# 발급받은 API 키 설정
OPENAI_API_KEY = config.OPEN_AI_API_KEY
# openai API 키 인증
openai.api_key = OPENAI_API_KEY
# 모델 - GPT 3.5 Turbo 선택 / 고정 질문
model = "gpt-3.5-turbo"
query = "아래 내용을 한글로 요약해줘 \n"
# 퀴즈 질문
STATIC_QUESTION = "아래는 Chat GPT를 통해 뉴스 기사를 요약한 내용입니다. 해당 __에 들어갈 말은 무엇일까요? \n\n"
# 퀴즈를 위한 자음 리스트
CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
# 중성 리스트. 00 ~ 20
JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']
# 종성 리스트. 00 ~ 27 + 1(1개 없음)
JONGSUNG_LIST = [' ', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28
# 퀴즈 만들기
def make_quiz_and_answer(origin):
answer = origin[1]
temp = "_" * len(answer)
quiz = origin[0].replace(answer, temp)
hint_list = list()
for a in answer:
if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', a) is not None:
char_code = ord(a) - BASE_CODE
char1 = int(char_code / CHOSUNG)
hint_list.append(CHOSUNG_LIST[char1])
hint = ""
for h in hint_list:
hint = hint+str(h)
print(hint)
return {"quiz" : STATIC_QUESTION + quiz, "answer" : answer, "hint" : hint}
# ChatGPT API
def chatgpt_quiz(text):
# 1. ChatGPT에게 기사 내용 요약 요청
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": query+text}
]
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
summary = response['choices'][0]['message']['content']
# 2. ChatGPT에게 요약 문장에서 중요한 단어를 출력
messages.append(
{"role": "assistant", "content": summary}
)
messages.append(
{"role": "user", "content": "위 내용에서 가장 중요한 단어를 1개 찾아줘"}
)
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
word = response['choices'][0]['message']['content'].replace(" ", "")
origin = [summary, word]
return make_quiz_and_answer(origin)
| [
"PLACEHOLDERPLACEHOLDER",
"You are a helpful assistant.",
"위 내용에서 가장 중요한 단어를 1개 찾아줘"
] |
2024-01-10 | yuh-zha/Align | unanswerable_qa~benchmark.py | from utils import load_json_dataset
from squad_eval import evaluate_predictions
from datasets import load_dataset
import argparse
from electra_for_qa import ElectraForQA, SQuAD2PredictionDataset
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
import scipy
import openai
import time
from timeit import default_timer
import os
import jsonlines
from tqdm import tqdm
from transformers import T5Tokenizer, T5ForConditionalGeneration
import json
from align.inference import Inferencer
import tiktoken
def get_electra_model(device, ckpt, result_path, batch_size, **kwargs):
model = ElectraForQA.load_from_checkpoint(ckpt)
trainer = Trainer(
accelerator='gpu',
devices=[device],
default_root_dir=result_path,
)
def make_predictions(dataset, name):
print(f'Electra {name}')
eval_dataset = SQuAD2PredictionDataset(dataset)
loader = DataLoader(eval_dataset, batch_size=batch_size)
predictions = trainer.predict(model, loader)
answers = {}
na_prob = {}
for batch in predictions:
for sample_id, prediction, no_ans_logit in zip(
batch['sample_ids'],
batch['predictions'],
batch['answerable_logits']
):
answers[sample_id] = prediction[0][0]
na_prob[sample_id] = scipy.special.expit(-no_ans_logit)
return answers, na_prob
return make_predictions
def get_gpt_model(wait_time, result_path, max_length, **kwargs):
prompt = 'Find the answer to the question from the given context. '\
+ 'When the question cannot be answered with the given context, say "unanswerable". '\
+ 'Just say the answer without repeating the question.\n'\
+ 'Context: {context}\nQuestion: {question}\nAnswer:'
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
def generate_prompt(context, question):
input_text = prompt.format(context=context, question=question)
input_tokens = encoding.encode(input_text)
if len(input_tokens) <= max_length:
return input_text
context_tokens = encoding.encode(context)
context_len = len(context_tokens) - (len(input_tokens) - max_length)
short_context = encoding.decode(
context_tokens[:context_len], errors='ignore')
return prompt.format(context=short_context, question=question)
def parse_answer(answer):
no_ans_keywords = [
'unanswerable',
'no answer'
'context does not provide an answer'
]
if any(k in answer for k in no_ans_keywords):
return ''
return answer
raw_results = os.path.join(result_path, 'raw_results')
os.makedirs(raw_results, exist_ok=True)
def make_predictions(dataset, name):
cache_file = os.path.join(raw_results, f'{name}.jsonl')
completed = set()
try:
with jsonlines.open(cache_file, 'r') as results:
for result in results:
completed.add(result['id'])
except FileNotFoundError:
pass
with jsonlines.open(cache_file, 'a') as results:
for sample in tqdm(dataset, desc=f'GPT {name}'):
if sample['id'] in completed:
continue
start = default_timer()
prompt = generate_prompt(sample['context'], sample['question'])
result = None
exception_time = wait_time
exception_count = 0
while result is None:
try:
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt},
]
)
except (openai.error.RateLimitError, openai.error.APIError):
exception_count += 1
if exception_count <= 10:
time.sleep(exception_time)
exception_time *= 2
else:
raise
results.write({'id': sample['id'], 'result': result})
end = default_timer()
time.sleep(max(0, wait_time - (end - start)))
with jsonlines.open(cache_file, 'r') as results:
predictions = {
result['id']: parse_answer(
result['result']['choices'][0]['message']['content'])
for result in results
}
return predictions, None
return make_predictions
def get_flan_t5_model(model, device, max_length, **kwargs):
tokenizer = T5Tokenizer.from_pretrained(model)
model = T5ForConditionalGeneration.from_pretrained(model)
model.to(f'cuda:{device}')
def make_predictions(dataset, name):
predictions = {}
for sample in tqdm(dataset, desc=f'FLAN T5 {name}'):
context = sample['context']
question = sample['question']
input_seq = f"Context: {context}\n\nQuestion: {question}\n\nAnswer:"
inputs = tokenizer(
input_seq,
return_tensors='pt',
truncation=True,
max_length=max_length
)
outputs = model.generate(
inputs.input_ids.to(model.device),
max_new_tokens=40,
)
candidate = tokenizer.batch_decode(
outputs,
skip_special_tokens=True,
clean_up_tokenization_spaces=True
)[0]
if not candidate or candidate == 'unanswerable':
predictions[sample['id']] = ''
else:
predictions[sample['id']] = candidate
return predictions, None
return make_predictions
class AlignVerifier:
def __init__(self, align_ckpt, align_model, device) -> None:
self.align = Inferencer(
ckpt_path=align_ckpt, model=align_model, device=device, verbose=False)
self.align.nlg_eval_mode = 'nli_sp'
self.qa2d = None
self.device = device
def get_no_prob(self, context, question, candidate):
if not candidate:
return 1.
if self.qa2d is None:
from data_utils.generate_training_data import QA2D
self.qa2d = QA2D(device=self.device, verbose=False)
hypo = self.qa2d.generate([question], [candidate])
return 1. - self.align.nlg_eval([context], hypo)[1][0].item()
def get_no_prob_concat(self, context, question, candidate):
if not candidate:
return 1.
hypo = question + ' ' + candidate
return 1. - self.align.inference([context], [hypo])[1][0].item()
def combine_ace_whqa_subsets(subsets):
data = []
for name, dataset in subsets:
for sample in dataset:
sample = {**sample}
sample['id'] = f'{name}/{sample["id"]}'
data.append(sample)
return data
def combine_ace_whqa_predictions(subsets):
all_predictions = {}
for name, predictions in subsets.items():
all_predictions.update({
f'{name}/{id}': val for id, val in predictions.items()
})
return all_predictions
ZEROSHOT_DATASETS = [
{
'name': 'ace_whqa_has_answer',
'path': 'ace_whqa/ACE-whQA-has-answer.json'
},
{
'name': 'ace_whqa_IDK_competitive',
'path': 'ace_whqa/ACE-whQA-IDK-competitive.json'
},
{
'name': 'ace_whqa_IDK_non_competitive',
'path': 'ace_whqa/ACE-whQA-IDK-non-competitive.json'
},
{
'name': 'simplified_nq',
'path': 'simplified_nq.json'
},
]
MODELS = [
{'name': 'electra', 'load_model': get_electra_model},
{'name': 'gpt', 'load_model': get_gpt_model},
{'name': 'flan_t5', 'load_model': get_flan_t5_model},
]
def run_benchmark(args):
datasets = [
('squad_v2', load_dataset('squad_v2')['validation']),
]
for dataset_dict in ZEROSHOT_DATASETS:
path = os.path.join(args.data_path, dataset_dict['path'])
datasets.append((dataset_dict['name'], load_json_dataset(path)))
# all answers without applying "not answerable" threshold
all_predictions = {}
all_na_prob = {}
na_threshold = {}
for model_dict in MODELS:
model_name = model_dict['name']
result_path = os.path.join(args.result_path, model_name)
evaluation_path = os.path.join(result_path, 'evaluation')
prediction_path = os.path.join(result_path, 'predictions')
os.makedirs(evaluation_path, exist_ok=True)
os.makedirs(prediction_path, exist_ok=True)
model_args = {
key[len(model_name)+1:]: val
for key, val in vars(args).items()
if key.startswith(model_name + '_')
}
model = model_dict['load_model'](
device=args.device,
result_path=result_path,
**model_args
)
for dataset_name, dataset in datasets:
predictions_file = os.path.join(
prediction_path,
f'{dataset_name}_predictions.json'
)
na_prob_file = os.path.join(
prediction_path,
f'{dataset_name}_na_prob.json'
)
evaluation_file = os.path.join(
evaluation_path, f'{dataset_name}.json')
if os.path.exists(predictions_file):
print(f'Reading cached results {predictions_file}')
with open(predictions_file, 'r', encoding='utf8') as file:
predictions = json.load(file)
na_prob = None
try:
with open(na_prob_file, 'r', encoding='utf8') as file:
na_prob = json.load(file)
except FileNotFoundError:
pass
else:
predictions, na_prob = model(dataset, dataset_name)
with open(predictions_file, 'w', encoding='utf8') as file:
json.dump(predictions, file)
if na_prob is not None:
all_na_prob[(model_name, dataset_name)] = na_prob
with open(na_prob_file, 'w', encoding='utf8') as file:
json.dump(na_prob, file)
all_predictions[(model_name, dataset_name)] = predictions
threshold = na_threshold.get(model_name)
evaluation_results = evaluate_predictions(
dataset,
predictions,
na_prob,
threshold,
name=f'{model_name} {dataset_name}'
)
print(model_name, dataset_name)
print(evaluation_results)
with open(evaluation_file, 'w', encoding='utf8') as file:
json.dump(evaluation_results, file)
if dataset_name == 'squad_v2' and 'best_f1_thresh' in evaluation_results:
na_threshold[model_name] = evaluation_results['best_f1_thresh']
del model
align_verifier = AlignVerifier(
args.align_ckpt,
args.align_model,
f'cuda:{args.device}'
)
for model_dict in MODELS:
base_model_name = model_dict['name']
model_name = 'align_' + base_model_name
result_path = os.path.join(args.result_path, model_name)
evaluation_path = os.path.join(result_path, 'evaluation')
prediction_path = os.path.join(result_path, 'predictions')
os.makedirs(evaluation_path, exist_ok=True)
os.makedirs(prediction_path, exist_ok=True)
for dataset_name, dataset in datasets:
model_predictions = all_predictions[(
base_model_name, dataset_name)]
align_na = {}
for sample in tqdm(dataset, desc=f'align {model_name} {dataset_name}'):
align_na[sample['id']] = align_verifier.get_no_prob_concat(
sample['context'],
sample['question'],
model_predictions[sample['id']]
)
all_predictions[(model_name, dataset_name)] = model_predictions
all_na_prob[(model_name, dataset_name)] = align_na
na_prob_file = os.path.join(
prediction_path,
f'{dataset_name}_na_prob.json'
)
evaluation_file = os.path.join(
evaluation_path,
f'{dataset_name}.json'
)
with open(na_prob_file, 'w', encoding='utf8') as file:
json.dump(align_na, file)
threshold = na_threshold.get(model_name)
evaluation_results = evaluate_predictions(
dataset,
model_predictions,
align_na,
threshold,
name=f'{model_name} {dataset_name}'
)
print(model_name, dataset_name)
print(evaluation_results)
with open(evaluation_file, 'w', encoding='utf8') as file:
json.dump(evaluation_results, file)
if dataset_name == 'squad_v2':
na_threshold[model_name] = evaluation_results['best_f1_thresh']
ace_whqa_subsets = [
(name, dataset) for name, dataset in datasets
if name.startswith('ace_whqa_')
]
ace_whqa_all = combine_ace_whqa_subsets(ace_whqa_subsets)
for model_dict in MODELS:
models = [model_dict['name'], 'align_' + model_dict['name']]
for model_name in models:
result_path = os.path.join(args.result_path, model_name)
evaluation_path = os.path.join(result_path, 'evaluation')
os.makedirs(evaluation_path, exist_ok=True)
combined_predictions = combine_ace_whqa_predictions({
subset: all_predictions[(model_name, subset)]
for subset, _ in ace_whqa_subsets
})
if all(((model_name, subset) in all_na_prob for subset, _ in ace_whqa_subsets)):
combined_na_prob = combine_ace_whqa_predictions({
subset: all_na_prob[(model_name, subset)]
for subset, _ in ace_whqa_subsets
})
else:
combined_na_prob = None
evaluation_file = os.path.join(
evaluation_path,
f'ace_whqa_all.json'
)
threshold = na_threshold.get(model_name)
evaluation_results = evaluate_predictions(
ace_whqa_all,
combined_predictions,
combined_na_prob,
threshold,
name=f'{model_name} ace_whqa_all'
)
print(model_name, 'ace_whqa_all')
print(evaluation_results)
with open(evaluation_file, 'w', encoding='utf8') as file:
json.dump(evaluation_results, file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
type=str,
default="data"
)
parser.add_argument(
"--result_path",
type=str,
default="qa_results"
)
parser.add_argument(
"--electra_ckpt",
type=str,
default='align_qa/electra_squad2/lightning_logs/version_0/checkpoints/epoch=1-val_loss_epoch=1.930589.ckpt'
)
parser.add_argument(
"--electra_batch_size",
type=int,
default=8
)
parser.add_argument(
"--gpt_wait_time",
type=float,
default=0.05
)
parser.add_argument(
"--gpt_max_length",
type=int,
default=2040
)
parser.add_argument(
"--flan_t5_model",
type=str,
default="google/flan-t5-xl"
)
parser.add_argument(
"--flan_t5_max_length",
type=int,
default=2000
)
parser.add_argument(
"--align_ckpt",
type=str,
default='checkpoints/more-qa-scale-loss/roberta-large/roberta-large_no_mlm_full-dataset_500000_32x4x1_final.ckpt'
)
parser.add_argument(
"--align_model",
type=str,
default='roberta-large'
)
parser.add_argument(
"--device",
type=int,
default=6
)
args = parser.parse_args()
run_benchmark(args)
| [
"question",
"context",
"Find the answer to the question from the given context. When the question cannot be answered with the given context, say \"unanswerable\". Just say the answer without repeating the question.\nContext: {context}\nQuestion: {question}\nAnswer:"
] |
2024-01-10 | softPrisoner/ChatGPT | src~revChatGPT~V2.py | """
Official API for ChatGPT
"""
import asyncio
import json
import os
import sys
import httpx
import requests
import tiktoken
from OpenAIAuth import Authenticator as OpenAIAuth
from .utils import create_session
from .utils import get_input
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
class Message:
"""
A single exchange between the user and the bot
"""
def __init__(self, text: str, author: str) -> None:
self.text: str = text
self.author: str = author
class Conversation:
"""
A single conversation
"""
def __init__(self) -> None:
self.messages: list[Message] = []
CONVERSATION_BUFFER: int = int(os.environ.get("CONVERSATION_BUFFER") or 1500)
class Conversations:
"""
Conversation handler
"""
def __init__(self) -> None:
self.conversations: dict[str][Conversation] = {}
def add_message(self, message: Message, conversation_id: str) -> None:
"""
Adds a message to a conversation
"""
if conversation_id not in self.conversations:
self.conversations[conversation_id] = Conversation()
self.conversations[conversation_id].messages.append(message)
def get(self, conversation_id: str) -> str:
"""
Builds a conversation string from a conversation id
"""
if conversation_id not in self.conversations:
return ""
# Build conversation string from messages and check if it's too long
conversation = ""
for message in self.conversations[conversation_id].messages:
conversation += f"{message.author}: {message.text}<|im_sep|>\n\n"
if len(ENCODER.encode(conversation)) > 4000 - CONVERSATION_BUFFER:
self.purge_history(conversation_id)
return self.get(conversation_id)
return conversation
def purge_history(self, conversation_id: str, num: int = 1):
"""
Remove oldest messages from a conversation
"""
if conversation_id not in self.conversations:
return
self.conversations[conversation_id].messages = self.conversations[
conversation_id
].messages[num:]
def rollback(self, conversation_id: str, num: int = 1):
"""
Remove latest messages from a conversation
"""
if conversation_id not in self.conversations:
return
self.conversations[conversation_id].messages = self.conversations[
conversation_id
].messages[:-num]
def remove(self, conversation_id: str) -> None:
"""
Removes a conversation
"""
if conversation_id in self.conversations:
del self.conversations[conversation_id]
BASE_PROMPT = (
os.environ.get("BASE_PROMPT")
or """You are ChatGPT, a large language model by OpenAI. Respond conversationally\n\n\n"""
)
PROXY_URL = os.environ.get("PROXY_URL") or "https://chat.duti.tech"
class Chatbot:
"""
Handles everything seamlessly
"""
def __init__(
self,
email: str,
password: str,
paid: bool = False,
proxy=None,
insecure: bool = False,
session_token: str = None,
) -> None:
self.proxy = proxy
self.email: str = email
self.password: str = password
self.session_token = session_token
self.insecure: bool = insecure
self.api_key: str
self.paid: bool = paid
self.conversations = Conversations()
self.login(email, password, proxy, insecure, session_token)
async def ask(self, prompt: str, conversation_id: str = None) -> dict:
"""
Gets a response from the API
"""
if conversation_id is None:
conversation_id = "default"
self.conversations.add_message(
Message(prompt, "User"),
conversation_id=conversation_id,
)
conversation: str = self.conversations.get(conversation_id)
# Build request body
body = self.__get_config()
body["prompt"] = BASE_PROMPT + conversation + "ChatGPT: "
body["max_tokens"] = get_max_tokens(conversation)
async with httpx.AsyncClient(proxies=self.proxy if self.proxy else None).stream(
method="POST",
url=PROXY_URL + "/completions",
data=json.dumps(body),
headers={"Authorization": f"Bearer {self.api_key}"},
timeout=1080,
) as response:
full_result = ""
async for line in response.aiter_lines():
if response.status_code == 429:
print("error: " + "Too many requests")
raise Exception("Too many requests")
elif response.status_code == 523:
print(
"error: "
+ "Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.",
)
raise Exception(
"Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.",
)
elif response.status_code == 503:
print("error: " + "OpenAI error!")
raise Exception("OpenAI error!")
elif response.status_code != 200:
print("error: " + "Unknown error")
raise Exception("Unknown error")
line = line.strip()
if line == "\n" or line == "":
continue
if line == "data: [DONE]":
break
try:
# Remove "data: " from the start of the line
data = json.loads(line[6:])
if data is None:
continue
full_result += data["choices"][0]["text"].replace("<|im_end|>", "")
if "choices" not in data:
continue
yield data
except json.JSONDecodeError:
continue
self.conversations.add_message(
Message(full_result, "ChatGPT"),
conversation_id=conversation_id,
)
def __get_config(self) -> dict:
return {
"temperature": float(os.environ.get("TEMPERATURE") or 0.5),
"top_p": float(os.environ.get("TOP_P") or 1),
"stop": ["<|im_end|>", "<|im_sep|>"],
"presence_penalty": float(os.environ.get("PRESENCE_PENALTY") or 1.0),
"paid": self.paid,
"stream": True,
}
def login(self, email, password, proxy, insecure, session_token) -> None:
"""
Login to the API
"""
if not insecure:
auth = OpenAIAuth(email_address=email, password=password, proxy=proxy)
if session_token:
auth.session_token = session_token
auth.get_access_token()
self.api_key = auth.access_token
if self.api_key is None:
self.session_token = None
self.login(email, password, proxy, insecure, None)
return
auth.begin()
self.session_token = auth.session_token
self.api_key = auth.access_token
else:
auth_request = requests.post(
PROXY_URL + "/auth",
json={"email": email, "password": password},
timeout=10,
)
self.api_key = auth_request.json()["accessToken"]
async def main():
"""
Testing main function
"""
import argparse
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
parser = argparse.ArgumentParser()
parser.add_argument(
"-e",
"--email",
help="Your OpenAI email address",
required=False,
)
parser.add_argument(
"-p",
"--password",
help="Your OpenAI password",
required=False,
)
parser.add_argument(
"--paid",
help="Use the paid API",
action="store_true",
)
parser.add_argument(
"--proxy",
help="Use a proxy",
required=False,
type=str,
default=None,
)
parser.add_argument(
"--insecure-auth",
help="Use an insecure authentication method to bypass OpenAI's geo-blocking",
action="store_true",
)
parser.add_argument(
"--session_token",
help="Alternative to email and password authentication. Use this if you have Google/Microsoft account.",
required=False,
)
args = parser.parse_args()
if (args.email is None or args.password is None) and args.session_token is None:
print("error: " + "Please provide your email and password")
return
print("Logging in...")
chatbot = Chatbot(
args.email,
args.password,
paid=args.paid,
proxy=args.proxy,
insecure=args.insecure_auth,
session_token=args.session_token,
)
print("Logged in\n")
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
def commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this help message
!reset - Clear the current conversation
!rollback <int> - Remove the latest <int> messages from the conversation
!exit - Exit the program
""",
)
elif command == "!reset":
chatbot.conversations.remove("default")
print("Conversation cleared")
elif command.startswith("!rollback"):
try:
num = int(command.split(" ")[1])
chatbot.conversations.rollback("default", num)
print(f"Removed {num} messages from the conversation")
except IndexError:
print("Please specify the number of messages to remove")
except ValueError:
print("Please specify a valid number of messages to remove")
elif command == "!exit":
print("Exiting...")
sys.exit(0)
else:
return False
return True
try:
session = create_session()
while True:
prompt = get_input("\nYou:\n", session=session)
if prompt.startswith("!"):
if commands(prompt):
continue
print("ChatGPT:")
async for line in chatbot.ask(prompt=prompt):
result = line["choices"][0]["text"].replace("<|im_end|>", "")
print(result, end="")
sys.stdout.flush()
print()
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
if __name__ == "__main__":
asyncio.run(main())
| [
"You are ChatGPT, a large language model by OpenAI. Respond conversationally\n\n\n",
"ChatGPT",
"\nYou:\n",
"User",
"BASE_PROMPT"
] |
2024-01-10 | softPrisoner/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import json
import logging
import os
import os.path as osp
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from .utils import create_session
from .utils import get_input
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://chatgpt.duti.tech/"
class Error(Exception):
"""
Base class for exceptions in this module.
Error codes:
-1: User error
0: Unknown error
1: Server error
2: Rate limit error
3: Invalid request error
4: Expired access token error
5: Invalid access token error
6: Prohibited concurrent query error
"""
source: str
message: str
code: int
def __init__(self, source: str, message: str, code: int = 0):
self.source = source
self.message = message
self.code = code
class colors:
"""
Colors for printing
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def __init__(self) -> None:
if getenv("NO_COLOR"):
self.HEADER = ""
self.OKBLUE = ""
self.OKCYAN = ""
self.OKGREEN = ""
self.WARNING = ""
self.FAIL = ""
self.ENDC = ""
self.BOLD = ""
self.UNDERLINE = ""
bcolors = colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = False,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
self.cache_path = osp.join(os.getcwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not osp.exists(osp.join(user_home, ".config")):
os.mkdir(osp.join(user_home, ".config"))
if not osp.exists(osp.join(user_home, ".config", "revChatGPT")):
os.mkdir(osp.join(user_home, ".config", "revChatGPT"))
self.cache_path = osp.join(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except Error as error:
if error.code == 5:
raise error
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self):
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.__set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" in self.config and "password" in self.config:
pass
else:
raise Exception("Insufficient login details provided!")
if "access_token" not in self.config:
try:
self.__login()
except AuthError as error:
raise error
@logger(is_timed=False)
def __set_access_token(self, access_token: str):
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=5,
) from None
except json.JSONDecodeError:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=5,
) from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
raise Error(
source="__get_cached_access_token",
message="Access token expired",
code=4,
)
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict):
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = osp.dirname(self.cache_path) or "."
os.makedirs(dirname, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def __login(self):
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
raise Exception("Insufficient login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.__login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.__set_access_token(auth.access_token)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: float = 360,
):
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Raises:
Error: _description_
Exception: _description_
Error: _description_
Error: _description_
Error: _description_
Yields:
_type_: _description_
"""
if parent_id is not None and conversation_id is None:
log.error("conversation_id must be set once parent_id is set")
raise Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=-1,
)
if conversation_id is not None and conversation_id != self.conversation_id:
log.debug("Updating to new conversation by setting parent_id to None")
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
log.debug("New conversation, setting parent_id to new UUID4: %s", parent_id)
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
try:
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
except Exception:
pass
else:
log.debug(
"Conversation ID %s not found in conversation mapping, mapping conversations",
conversation_id,
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
log.debug(
"Conversation ID %s found in conversation mapping, setting parent_id to %s",
conversation_id,
self.conversation_mapping[conversation_id],
)
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
log.debug("Sending the payload")
log.debug(json.dumps(data, indent=2))
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
for line in response.iter_lines():
line = str(line)[2:-1]
if line == "Internal Server Error":
log.error("Internal Server Error: %s", line)
raise Exception("Error: " + str(line))
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
log.error("Field missing", exc_info=True)
if (
line.get("detail")
== "Too many requests in 1 hour. Try again later."
):
log.error("Rate limit exceeded")
raise Error(source="ask", message=line.get("detail"), code=2)
if line.get("detail").startswith(
"Only one message at a time.",
):
log.error("Prohibited concurrent query")
raise Error(source="ask", message=line.get("detail"), code=6)
if line.get("detail", "") == "invalid_api_key":
log.error("Invalid access token")
raise Error(
source="ask",
message=line.get("detail", ""),
code=3,
)
if line.get("detail", "") == "invalid_token":
log.error("Invalid access token")
raise Error(
source="ask",
message=line.get("detail", ""),
code=5,
)
raise Error(source="ask", message="Field missing", code=1)
message = line["message"]["content"]["parts"][0]
if message == prompt:
continue
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
try:
model = line["message"]["metadata"]["model_slug"]
except KeyError:
model = None
log.debug("Received message: %s", message)
log.debug("Received conversation_id: %s", conversation_id)
log.debug("Received parent_id: %s", parent_id)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
@logger(is_timed=False)
def __check_response(self, response):
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
if response.status_code != 200:
print(response.text)
raise Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None):
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str):
"""
Generate title for conversation
"""
response = self.session.post(
BASE_URL + f"api/conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
)
async def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
timeout=360,
):
"""
Ask a question to the chatbot
"""
if parent_id is not None and conversation_id is None:
raise Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=1,
)
if conversation_id is not None and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-sha"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
async with self.session.stream(
method="POST",
url=BASE_URL + "api/conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
self.__check_response(response)
async for line in response.aiter_lines():
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception("Field missing. Details: " + str(line))
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
model = (
line["message"]["metadata"]["model_slug"]
if "model_slug" in line["message"]["metadata"]
else None
)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
async def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"api/conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(self, convo_id, encoding="utf-8"):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
self.__check_response(response)
data = json.loads(response.text)
return data
async def gen_title(self, convo_id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"api/conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id, title):
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
async def delete_conversation(self, convo_id):
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = BASE_URL + f"api/conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "api/conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def __map_conversations(self):
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
def __check_response(self, response):
response.raise_for_status()
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if osp.exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
@logger(is_timed=False)
def main(config: dict):
"""
Main function for the chatGPT program.
"""
print("Logging in...")
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command == "!exit":
exit(0)
else:
return False
return True
session = create_session()
print()
try:
while True:
print(bcolors.OKBLUE + bcolors.BOLD + "You:" + bcolors.ENDC)
prompt = get_input(session=session)
if prompt.startswith("!"):
if handle_commands(prompt):
continue
print()
print(bcolors.OKGREEN + bcolors.BOLD + "Chatbot: ")
prev_text = ""
for data in chatbot.ask(prompt):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
except KeyboardInterrupt:
print("Exiting...")
exit(0)
except EOFError:
print("Exiting...")
exit(0)
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print(
bcolors.BOLD
+ bcolors.WARNING
+ "Press Esc followed by Enter or Alt+Enter to send a message."
+ bcolors.ENDC,
)
main(configure())
| [
"text",
"content_type"
] |
2024-01-10 | jackdreilly/reidle-fresh | wrapped.py | # %%
import os
import base64
import tqdm
import wordcloud
from collections import Counter
import datetime
from io import BytesIO
from pathlib import Path
import numpy as np
import pandas as pd
# %%
def playback_errors(playback):
w = ""
checked = ""
errors = []
for event in playback["events"]:
if event.get("clear"):
checked = w
w = ""
if error := event.get("error"):
if error.get("message") == "Not a word":
if checked:
errors.append(checked.title())
if event.get("score") is not None:
w = ""
if l := event.get("letter"):
w = w[:4]
w += l
if event.get("backspace"):
w = w[:-1]
return errors
# %%
cxn = (os.environ.get("POSTGRES_URL") or "").replace("postgres://", "postgresql://")
# %%
df = pd.read_sql(
"select * from submissions",
cxn,
)
df
# %%
df.to_parquet("~/submissions.parquet")
# %%
base = (
pd.read_parquet("~/submissions.parquet")
.assign(name=lambda x: x.name.where(x.name != "natalie", "natnat"))
.query("challenge_id.isna()")
.assign(
day=lambda x: pd.to_datetime(x.day),
year=lambda x: x.day.dt.year,
dow=lambda x: x.day.dt.dayofweek,
words=lambda x: x.playback.apply(
lambda x: [
x.title()
for x in (
"".join(
c["letter"]
for c in (
event.get("score", [])
if event.get("score", []) is not None
else []
)
)
for event in x.get("events", [])
)
if x and len(Counter(x)) > 1
]
),
)
.query("year == 2023")
.assign(
max_score=lambda x: x.groupby("day").score.transform("max"),
normalized_score=lambda x: 1000 * (1 - ((x.score - 1) / (x.max_score - 1))),
dow_score=lambda x: x.groupby(["name", "dow"])
.normalized_score.transform("mean")
.astype(np.int16),
overall_score=lambda x: x.groupby(["name"])
.normalized_score.transform("mean")
.astype(np.int16),
games_played=lambda x: x.groupby("name").name.transform("count"),
starting_day=lambda x: x.groupby("name").day.transform(lambda x: x.min()),
days_available=lambda x: (
pd.to_datetime(datetime.datetime.utcnow()) - x.starting_day
).dt.days,
games_missed=lambda x: (x.days_available - x.games_played).clip(0, 365),
tardy_rate=lambda x: (x.days_available / x.games_missed)
.clip(0, 365)
.astype(np.int16),
errors=lambda x: x.playback.apply(playback_errors),
)
.sort_values(["day", "name"], ascending=False)
)
# %%
base.query("rank == 1").name.value_counts().reset_index().rename(
columns=dict(count="num_wins")
).to_sql("wins", cxn, schema="wrapped", if_exists="replace")
base[["name", "dow", "dow_score"]].drop_duplicates(["name", "dow"]).to_sql(
"dow", cxn, schema="wrapped", if_exists="replace"
)
base.query("games_played > 10")[["name", "overall_score"]].drop_duplicates().to_sql(
"overall_score", cxn, schema="wrapped", if_exists="replace"
)
# %%
base[["name", "words"]].explode("words").value_counts().reset_index().rename(
columns=dict(count="word_count")
).assign(
count_rank=lambda x: x.groupby("name").word_count.transform(
"cumcount", ascending=True
)
).query(
"count_rank < 5"
).rename(
columns=dict(word_count="count")
).to_sql(
"top_words", cxn, schema="wrapped", if_exists="replace"
)
base["words"].explode().value_counts()[:40].reset_index().to_sql(
"top_words_overall", cxn, schema="wrapped", if_exists="replace"
)
# %%
base.query("games_played > 10").drop_duplicates("name")[
["name", "games_missed", "tardy_rate"]
].sort_values("tardy_rate").apply(
lambda col: col.astype(np.int16) if np.issubdtype(col.dtype, np.number) else col
).to_sql(
"tardy_rate", cxn, schema="wrapped", if_exists="replace"
)
# %%
base[["name", "errors"]].explode("errors").value_counts().reset_index().rename(
columns=dict(count="error_count")
).assign(
count_rank=lambda x: x.groupby("name").error_count.transform(
"cumcount", ascending=True
)
).query(
"count_rank < 20"
).rename(
columns=dict(error_count="count")
).to_sql(
"top_errors", cxn, schema="wrapped", if_exists="replace"
)
base["errors"].explode().value_counts()[:40].reset_index().to_sql(
"top_errors_overall", cxn, schema="wrapped", if_exists="replace"
)
# %%
def make_wordcloud(words, background_color="white"):
b = BytesIO()
wordcloud.WordCloud(
background_color=background_color, width=800, height=800, max_words=50
).generate_from_frequencies(words).to_image().save(b, "webp")
b.flush()
return b.getvalue()
# %%
pd.concat(
[
pd.DataFrame(
dict(
name="all",
image=make_wordcloud(
base["errors"]
.explode()
.value_counts()[:100]
.reset_index()
.rename(columns=dict(errors="name"))
.set_index("name", drop=True)["count"]
.to_dict()
),
)
),
pd.DataFrame(
base[["name", "errors"]]
.explode("errors")
.groupby("name")
.errors.count()
.reset_index()
.value_counts()
.reset_index()
.rename(columns=dict(count="error_count"))
.assign(
count_rank=lambda x: x.groupby("name").error_count.transform(
"cumcount", ascending=True
)
)
.query("count_rank < 20")
.rename(columns=dict(error_count="count"))
),
]
)
# %%
pd.concat(
[
pd.DataFrame(
base.groupby("name").apply(
lambda x: make_wordcloud(Counter(sum(x.errors, []) or ["Perfy"]))
)
)
.rename(columns={0: "image"})
.reset_index(),
pd.DataFrame(
dict(
name=["all"],
image=[make_wordcloud(Counter(sum(base.errors, [])), "white")],
)
),
]
).assign(
image=lambda x: x.image.apply(lambda x: base64.encodebytes(x).decode())
).to_sql(
"wordcloud_images", cxn, schema="wrapped", if_exists="replace", index=False
)
# %%
pd.concat(
[
pd.DataFrame(
base.groupby("name").apply(
lambda x: make_wordcloud(Counter(sum(x.words, []) or ["Perfy"]))
)
)
.rename(columns={0: "image"})
.reset_index(),
pd.DataFrame(
dict(
name=["all"],
image=[make_wordcloud(Counter(sum(base.words, [])), "white")],
)
),
]
).assign(
image=lambda x: x.image.apply(lambda x: base64.encodebytes(x).decode())
).to_sql(
"wordcloud_images_words", cxn, schema="wrapped", if_exists="replace", index=False
)
# %%
import os
from openai import OpenAI
client = OpenAI(
# This is the default and can be omitted
# os.environ.get("OPENAI_API_KEY"),
)
# %%
mistakes_df = (
pd.DataFrame(
base.groupby("name").apply(
lambda x: ", ".join(list(Counter(sum(x.errors, []) or ["Perfy"]))[:20])
)
)
.rename(columns={0: "words"})
.query("words.str.contains(',')")
)
name, mistakes = (mistakes_df.iloc[0].name, mistakes_df.iloc[0].words)
# %%
# %%
results = {}
# %%
for _, row in tqdm.tqdm(mistakes_df.iterrows()):
name = row.name
mistakes = row.words
print(name, mistakes)
results[row.name] = (
client.chat.completions.create(
messages=[
{
"role": "user",
"content": f"""
I want to create a funny story using the made-up words that the users have mispelled.
Please construct a funny story about a person named {name}, about 5 sentences long, using some of the following words: {mistakes}
Please use the person's name ({name}) a lot in the story, and make them the hero.
Try and not make the made-up words proper nouns very often, instead preferring to make them verbs, adjectives, and such in a very creative fashion
""",
}
],
model="gpt-4",
)
.choices[0]
.message.content
)
# %%
pd.DataFrame(list(results.items()), columns=["name", "story"]).to_sql(
"stories", cxn, schema="wrapped", if_exists="replace", index=False
)
| [
"\nI want to create a funny story using the made-up words that the users have mispelled.\nPlease construct a funny story about a person named PLACEHOLDER, about 5 sentences long, using some of the following words: PLACEHOLDER\nPlease use the person's name (PLACEHOLDER) a lot in the story, and make them the hero.\nTry and not make the made-up words proper nouns very often, instead preferring to make them verbs, adjectives, and such in a very creative fashion\n"
] |
2024-01-10 | TankNee/Tool-ChatGPT | src~tools~layout_analysis.py | from tools.base_tool import BaseTool
from utils import get_output_path, prompts, logger
from PIL import Image
import numpy as np
from langchain.tools import Tool
import sys
sys.path.append("unilm")
from unilm.dit.object_detection.ditod import add_vit_config
from detectron2.config import CfgNode as CN
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultPredictor
import os
class LayoutAnalysis(BaseTool):
def __init__(self, path, cascade_dit_base_cfg_path, device, llm) -> None:
super().__init__(llm)
self.cfg = get_cfg()
add_vit_config(self.cfg)
self.cfg.merge_from_file(cascade_dit_base_cfg_path)
self.cfg.MODEL.WEIGHTS = path
self.cfg.MODEL.DEVICE = device
self.predictor = DefaultPredictor(self.cfg)
self.thing_classes = ["text", "title", "list", "table", "figure"]
self.md = MetadataCatalog.get(self.cfg.DATASETS.TEST[0])
self.md.set(thing_classes=self.thing_classes)
def analysis(self, img_path):
assert os.path.exists(img_path), f"Image path {img_path} not exists"
img = Image.open(img_path).convert("RGB")
img = np.array(img)
predict_result = self.predictor(img)
instance = predict_result["instances"]
v = Visualizer(img[:, :, ::-1],
self.md,
scale=1.0,
instance_mode=ColorMode.SEGMENTATION)
result = v.draw_instance_predictions(instance.to("cpu"))
# result_img = result.get_image()[:, :, ::-1]
# save result image
output_path = get_output_path(img_path)
result.save(output_path)
return instance, output_path
@prompts(name="Layout Segment Tool",
desc="useful when you want to recognize the layout of document."
"The input to this tool should be a path string,"
"representing the image_path of the document image."
"The output to this tool is processed image path.")
def segment_tool(self, img_path):
_, output_path = self.analysis(img_path)
return output_path
@prompts(
name="Layout Analysis Tool",
desc="useful when you want to get the layout text info of document."
"For example, you can see how many tables, text, headings, diagrams and the like are in the document"
"The input to this tool should be a path string,"
"representing the image_path of the document image."
"The output to this tool is a dict, "
"which contains the list of image file path of document component."
)
def meta_info_tool(self, img_path):
instance, _ = self.analysis(img_path)
fields = instance.get_fields()
output_dict = {}
for pred_class, pred_boxes in zip(fields['pred_classes'],
fields['pred_boxes']):
pred_class = self.thing_classes[pred_class]
cut_img = self.cut_img_tool(img_path, pred_boxes)
if pred_class not in output_dict:
output_dict[pred_class] = [cut_img]
else:
output_dict[pred_class].append(cut_img)
logger.debug(f"Layout info is {output_dict}")
return output_dict
@prompts(
name="Cut Image Tool",
desc=
"useful when you want to cut the image according to the bounding box."
"The input to this tool should be a path string to the img and bounding box coordinates,"
"you should get the bounding box coordinates from the output of the layout info tool."
"bounding box coordinates is a string, which is separated by commas."
"The order of the bounding box coordinates is [x0, y0, x1, y1],"
"where (x0, y0) is the upper left corner of the bounding box,"
"and (x1, y1) is the lower right corner of the bounding box."
"The path string and bounding box coordinates are separated by a space strictly."
"The output of this tool is a path to the cut image.")
def cut_img_tool(self, img_path, boxes):
# img_path, boxes_str = inputs.split(" ")
assert os.path.exists(img_path), f"Image path {img_path} not exists"
# boxes = boxes_str.split(",")
boxes = [int(box) for box in boxes]
img = Image.open(img_path).convert("RGB")
cut_img = img.crop(boxes)
output_path = get_output_path(img_path)
cut_img.save(output_path)
logger.debug(f"Cut image path is {output_path}")
return output_path
def get_tools(self):
return [
Tool(name=self.segment_tool.name,
description=self.segment_tool.desc,
func=self.segment_tool),
Tool(name=self.meta_info_tool.name,
description=self.meta_info_tool.desc,
func=self.meta_info_tool),
# Tool(name=self.cut_img_tool.name,
# description=self.cut_img_tool.desc,
# func=self.cut_img_tool)
] | [] |
2024-01-10 | TankNee/Tool-ChatGPT | src~tools~database_query.py | from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from tools.base_tool import BaseTool
from utils import logger, prompts
from langchain.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QueryCheckerTool,
QuerySQLDataBaseTool,
)
from langchain.tools import Tool
import os
class DatabaseQuery(BaseTool):
def __init__(self, path, comments, llm) -> None:
super().__init__(llm)
self.db = SQLDatabase.from_uri(path)
self.comments = comments
logger.debug(f"DatabaseQuery load db from {self.db}")
def get_tools(self):
cd = CommentDatabaseTool(self.comments, self.llm)
return [
QuerySQLDataBaseTool(db=self.db),
InfoSQLDatabaseTool(db=self.db),
ListSQLDatabaseTool(db=self.db),
QueryCheckerTool(db=self.db, llm=self.llm),
Tool(name=cd.inference.name,
description=cd.inference.desc,
func=cd.inference)
]
class CommentDatabaseTool(BaseTool):
def __init__(self, comments, llm) -> None:
super().__init__(llm)
self.comments = {}
for comment in comments:
with open(comment, "r") as f:
file_name = ".".join(os.path.basename(comment).split(".")[:-1])
self.comments[file_name] = f.read()
logger.debug(f"CommentDatabaseTool load comments from {self.comments}")
@prompts(
name="Get comment about table",
desc="useful when you want to get the comment about a table."
"The input is a string, which is the table name. "
"You cannot create a table name. The input must be a valid table name in the database"
"The output is a string, which is the comment about the table, which will be used to help you understand the table."
)
def inference(self, table_name):
return self.comments.get(table_name, "No comments")
| [] |
2024-01-10 | TankNee/Tool-ChatGPT | src~core.py | import os, uuid, shutil, re
from PIL import Image
from loguru import logger
from models.chatglm import ChatGLM
from utils import AutoConfiguration
from langchain.tools import Tool
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.agents.initialize import initialize_agent
from langchain.agents import AgentType, load_tools
from prompts.en_prompt import PREFIX, DOC_GPT_FORMAT_INSTRUCTIONS, DOC_GPT_SUFFIX
@AutoConfiguration("configs/init_config.yaml")
class ToolMatrix():
def __init__(self) -> None:
self.config = None
self.tools = []
# self.llm = ChatOpenAI(temperature=0.1,
# model_name="gpt-3.5-turbo",
# max_tokens=1024)
self.llm = None
self.memory = ConversationBufferMemory(memory_key="chat_history",
output_key="output")
self.agent = None
def init_all(self):
self.llm = self.init_llm()
self.init_tools()
self.init_logger()
self.agent = self.init_agent()
def init_tools(self):
for tool_info in self.config.tools:
tool_module = tool_info["module"]
tool_class = tool_info["class"]
tool_cls = getattr(__import__("tools", fromlist=[tool_module]),
tool_class)
arguments = [
self.config.__dict__[tool_module][k]
for k in self.config.__dict__[tool_module].keys()
] if hasattr(self.config, tool_module) else []
tool = tool_cls(*arguments, llm=self.llm)
if hasattr(tool, "get_tools"):
self.tools.extend(tool.get_tools())
else:
self.tools.append(
Tool(name=tool.inference.name,
description=tool.inference.desc,
func=tool.inference))
logger.debug(f"Tool [{tool_module}] initialized.")
preset_tools = load_tools(self.config.preset_tools, llm=self.llm)
logger.debug(f"[{self.config.preset_tools}] preset tools loaded.")
self.tools.extend(preset_tools)
logger.info(f"{len(self.tools)} tools initialized.")
def init_llm(self):
model_type = self.config.model_type
if model_type == "chatglm":
# self.llm = ChatGLM(self.config.model_config[model_type]['base_url'],
# self.config.model_config[model_type]['api_key'])
llm = ChatGLM()
llm.load_config(self.config.model_config[model_type])
elif model_type == "openai":
llm = ChatOpenAI(self.config.model_config[model_type]['temperature'],
self.config.model_config[model_type]['model_name'],
self.config.model_config[model_type]['max_tokens'])
else:
raise NotImplementedError(f"Model type [{model_type}] not supported.")
logger.debug(f"LLM [{model_type}] initialized.")
return llm
def init_agent(self):
self.memory.clear()
agent = initialize_agent(
tools=self.tools,
llm=self.llm,
memory=self.memory,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
return_intermediate_steps=True,
max_iterations=8,
# 暂时先用英文的prompt
agent_kwargs={
"prefix": PREFIX,
"suffix": DOC_GPT_SUFFIX,
"format_instructions": DOC_GPT_FORMAT_INSTRUCTIONS,
})
logger.debug("Agent initialized.")
return agent
def init_logger(self):
logger.level("INFO")
def run_text(self, text: str, state):
result = self.agent({"input": text.strip()})
result['output'] = result['output'].replace("\\", "/")
response = re.sub('(image/[-\w]*.png)',
lambda m: f'})*{m.group(0)}*',
result['output'])
state = state + [(text, response)]
logger.info(f"User input: {text}")
logger.info(f"AI output: {response}")
return state, state
def run_img(self, img_path: str, state: list):
img_path = img_path.name
# move img to specified path
logger.debug(f"User input a image which is saved at {img_path}.")
# move to self.configs.image_cache_dir
target_path = os.path.join(self.config.image_cache_dir,
f"{str(uuid.uuid4())[:8]}.png")
img = Image.open(img_path)
img.save(target_path, format="png")
img_name = f"image/{os.path.basename(target_path)}"
logger.debug(f"Image moved to {img_name}.")
# run
HUMAN_PROMPT = f"provide a figure named {img_name}. you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\"."
AI_PROMPT = "Received."
self.agent.memory.save_context({"input": HUMAN_PROMPT},
{"output": AI_PROMPT})
# self.agent.memory.buffer = self.agent.memory.buffer + HUMAN_PROMPT + "AI: " + AI_PROMPT
state += [(f"*{img_name}*", AI_PROMPT)]
return state, state
| [
"provide a figure named f\"image/{os.path.basename(target_path)}. you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\".",
"Received."
] |
2024-01-10 | TankNee/Tool-ChatGPT | src~models~chatglm.py | import requests
from typing import Optional, List, Dict, Mapping, Any
import langchain
from langchain.llms.base import LLM
from langchain.cache import InMemoryCache
# 启动llm的缓存
langchain.llm_cache = InMemoryCache()
class ChatGLM(LLM):
base_url: str = None
api_key: str = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "chatglm"
def _construct_query(self, prompt: str) -> Dict:
"""构造请求体
"""
query = {"human_input": prompt}
return query
def load_config(self, config: Mapping[str, Any]) -> None:
"""加载配置
"""
self.base_url = config["base_url"]
self.api_key = config["api_key"]
@classmethod
def _post(cls, url: str, key, query: Dict) -> Any:
"""POST请求
"""
_headers = {"Content_Type": "application/json",
"Authorization": f"Bearer {key}"}
with requests.session() as sess:
resp = sess.post(url, json=query, headers=_headers, timeout=60)
return resp
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""_call
"""
# construct query
query = self._construct_query(prompt=prompt)
# post
res = self._post(url=f"{self.base_url}/v1/chat/completions", key=self.api_key, query=query)
if res.status_code == 200:
res_json = res.json()
predictions = res_json["response"]
return predictions
else:
return "请求模型"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.
"""
_param_dict = {"base_url": self.base_url, "api_key": self.api_key}
return _param_dict
| [] |
2024-01-10 | TankNee/Tool-ChatGPT | src~tools~network_search.py | from tools.base_tool import BaseTool
from utils import prompts
from langchain.tools import DuckDuckGoSearchRun
class NetworkSearch(DuckDuckGoSearchRun):
def __init__(self, **kwargs) -> None:
super().__init__()
@prompts(name="Network Search",
desc="useful when you want to search for a network architecture."
"The input is a text, which is the query that user want to search.")
def inference(self, text):
return self._run(text) | [] |
2024-01-10 | cds-snc/url-shortener | api~utils~helpers.py | import base64
import datetime
import hashlib
import math
import os
import traceback
from urllib.parse import urlparse
import advocate
import jwt
import requests
import validators
from models import ShortUrls
from logger import log
from notifications_python_client.notifications import NotificationsAPIClient
MAX_URL_LENGTH = 2048
NOTIFY_API_KEY = os.environ.get("NOTIFY_API_KEY", None)
def calculate_hash_bytes(length: int):
"""
calculate_hash_bytes determines the number of bytes required for
shake256 hashing algo given a desired output length.
Base64 encodes three bytes to four characters. The calculation
does not consider trimmed padding, if any exists.
parameter length: desired output length
returns: bytes required for shake 256 hashing
"""
return math.ceil(3 * (length / 4))
def translate_safe_charset(b64string: str):
"""
translate standard base64 char set so that chars are in a-zA-Z0-9 excluding:
bB cC dD gG iI mM nN oO pP tT uU vV 0 1 3
this follows covid alert char set guidance: AEFHJKLQRSUWXYZ and 2456789
deviation from guidance:
uU is deliberately excluded
lower cases are included
translation table:
bB -> aA
cC -> eE
dD -> eE
gG -> hH
iI -> jJ
mM -> qQ
nN -> qQ
oO -> qQ
pP -> qQ
tT -> sS
uU -> wW
vV -> wW
0 -> 2
1 -> 2
3 -> 4
+ -> A
/ -> Z
"""
std_base64charset = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
)
safe_charset = "AAEEEFHHJJKLQQQQQRSSWWWXYZaaedefhhhjklqqqqqrsswwwxyz2224456789AZ"
return b64string.translate(b64string.maketrans(std_base64charset, safe_charset))
def generate_short_url(
original_url: str, pepper: str, length: int = 8, hint=None, padding=False
):
"""
generate_short_url generates an length character hex digest used to
represent the original url.
parameter original_url: the url that the user passes to the api
parameter pepper: secret to add to hashing
parameter length: output length
parameter hint: overrides output with specified value
returns: base64 encoding, without padding, representing the shortened url
"""
if hint:
return hint
length = max(length, 4)
# note that the normal convention is to add pepper as a suffix
data = original_url + pepper
digest = hashlib.shake_256()
digest.update(data.encode())
return translate_safe_charset(
base64.b64encode(digest.digest(calculate_hash_bytes(length)))
.decode()
.rstrip("=" if not padding else "")
)
def is_domain_allowed(original_url):
"""is_domain_allowed determines if the domain of the url passed in as a parameter is allowed in a list of allowed domains
parameter original_url: the url that the user passes to the api
returns: True if the domain is allowed and False if it is not."""
try:
# Obtain the domain from the url
domain = ".".join(urlparse(original_url).hostname.split(".")[-2:])
return domain in os.getenv("ALLOWED_SHORTENED_DOMAINS").split(",")
except Exception:
return {"error": "error retrieving domain"}
def is_valid_url(original_url):
"""is_valid_url determines if the url passed in as a parameter is a valid url
parameter original_url: the url that the user passes to the api
returns: True if the url is valid and False if it is not."""
try:
return validators.url(original_url)
except Exception as err:
log.warning(f"Could not validate url: {original_url}: {err}")
return False
def is_valid_scheme(original_url):
"""is_valid_scheme determines if scheme is https
parameter original_url: the url that the user passes to the api
returns: True if scheme is https, False otherwise"""
return urlparse(original_url).scheme.casefold() == "https".casefold()
def resolve_short_url(short_url):
"""resolve_short_url function resolves the short url to the original url
parameter short_url: the shortened url
returns: the original url or False if the short url cannot be resolved"""
if "CYPRESS_CI" in os.environ:
return {"original_url": {"S": "https://digital.canada.ca/"}}
result = ShortUrls.get_short_url(short_url)
if result is None:
log.info(f"UNRESOLVABLE: Could not resolve url: {short_url}")
return False
elif not is_domain_allowed(result["original_url"]["S"]):
log.warning(
f"SUSPICIOUS: found shortened URL '{short_url}' that is not allowed for '{result['original_url']['S']}'"
)
return False
return result
def return_short_url(original_url, peppers, created_by):
"""return_short_url function returns the shortened url
parameter original_url: the url that the user passes to the api
parameter peppers: peppers iterable used for hashing input
returns: the shortened url or an error message if the shortened url cannot be generated
"""
try:
advocate.get(original_url)
except advocate.UnacceptableAddressException:
log.warning(f"Unacceptable address: {original_url}")
return {"error": "error_forbidden_resource"}
except requests.RequestException:
log.warning(f"Failed to connect to {original_url}: {traceback.format_exc()}")
return {"error": "error_filed_to_connect_url"}
peppers_iter = iter(peppers)
short_url = None
while short_url is None:
try:
pepper = next(peppers_iter)
try:
candidate_url = generate_short_url(
original_url, pepper, int(os.getenv("SHORTENER_PATH_LENGTH"))
)
short_url = ShortUrls.create_short_url(
original_url, candidate_url, created_by
)
except ValueError as err:
# collision
log.info(
f"Retrying, collision detected for {candidate_url} "
f"generated for {original_url}: {err}"
)
except StopIteration:
log.error("Could not generate URL, pepper(s) exhausted")
return {"error": "error_url_shorten_failed"}
return short_url
def validate_and_shorten_url(original_url, created_by):
"""validate_and_shorten_url function validates the url passed in as a parameter and then shortens it
parameter original_url: the url that the user passes to the api
returns: a dictionary containing the shortened url and the original url"""
try:
# Check to see if the url confronts to a valid format. If not then display error.
if not is_valid_url(original_url):
data = {
"error": "error_url_shorten_url_not_valid",
"original_url": original_url,
"status": "ERROR",
}
# Else if scheme is invalid (i.e. not https), display error
elif not is_valid_scheme(original_url):
data = {
"error": "error_url_shorten_invalid_scheme",
"original_url": original_url,
"status": "ERROR",
}
# Else if URL is too long
elif len(original_url) >= MAX_URL_LENGTH:
data = {
"error": "error_url_shorten_url_too_long",
"original_url": original_url,
"status": "ERROR",
}
# Else if the domain is not allowed, display error
elif not is_domain_allowed(original_url):
data = {
"error": "error_url_shorten_invalid_host",
"original_url": original_url,
"status": "ERROR",
}
# Else, we are all good to shorten!
else:
short_url = return_short_url(
original_url, os.getenv("PEPPERS").split(","), created_by
)
if isinstance(short_url, dict):
return {
"error": short_url["error"],
"original_url": original_url,
"status": "ERROR",
}
shortener_domain = os.getenv("SHORTENER_DOMAIN") or ""
log.info(
f"Shortened URL: '{short_url}' from '{original_url}' created by '{created_by}'"
)
data = {
"short_url": f"{shortener_domain}{short_url}",
"original_url": original_url,
"status": "OK",
}
except Exception:
log.error(
"Could not shorten URL '%s': %s", original_url, traceback.format_exc()
)
data = {
"error": "error_url_shorten_failed",
"original_url": original_url,
"status": "ERROR",
}
# Log the result of the operation without the status.
# This is to avoid triggering ERROR CloudWatch alarms.
data_no_status = data.copy()
data_no_status.pop("status", None)
log.info("Shorten URL result: %s", data_no_status)
return data
def redact_value(value, min_length=8):
"""Given a value, redact it and display the last 4 characters of the value
provided it is longer the minimum lenght (default 8)."""
value_length = len(value)
return (
"*" * (value_length - 4) + value[-4:]
if value_length >= min_length
else "*" * value_length
)
def generate_token(salt, valid_minutes=5):
"""Generate a JWT that is valid for a set period of time"""
return jwt.encode(
{"exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=valid_minutes)},
key=salt,
algorithm="HS256",
)
def validate_token(jwt_token, salt):
"""Check that a given JWT has not expired"""
try:
jwt.decode(jwt_token, key=salt, algorithms=["HS256"])
except Exception:
log.info(
"JWT token '%s' is invalid with salt '%s'", jwt_token, redact_value(salt)
)
return False
return True
def notification_client():
return NotificationsAPIClient(
NOTIFY_API_KEY, base_url="https://api.notification.canada.ca"
)
| [] |
2024-01-10 | jhoandvid/fruit-fungal-diseases | src~service~contents_service.py | import io
import os
import nlpcloud
import re
from fastapi import UploadFile, status, HTTPException
from PyPDF2 import PdfReader
from langchain.chat_models import ChatOpenAI
import json
import tiktoken
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores import FAISS
from langchain.callbacks import get_openai_callback
from src.database.repository.contents_repository import ContentsRepository
from src.service.response_question_service import ResponseQuestionService
from src.entity.contents import ContentsEntity, ConsultContentInformation, UpdateContents
from src.utils.environment.env import setting
from src.database.repository.fruit_fungal_diseases_repository import FruitFungalDiaseasesRepository
contents_repository = ContentsRepository()
response_question_service = ResponseQuestionService()
fruits_fungal_disease_repository = FruitFungalDiaseasesRepository()
class ContentsService:
def extract_array(self, text: str):
pattern = r'\[.*?\]'
matches = re.findall(pattern, text)
if len(matches) > 0:
first_match = matches[0]
try:
result = eval(first_match)
return result
except SyntaxError:
return []
else:
return []
async def _extract_text_from_pdf(self, pdf_file: UploadFile):
pdf_content = await pdf_file.read()
pdf_reader = PdfReader(io.BytesIO(pdf_content))
pdf_text = ""
for page in pdf_reader.pages:
pdf_text += page.extract_text()
return pdf_text
async def _splitter_data(self, file):
text = await self._extract_text_from_pdf(file)
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
information = text_splitter.split_text(text)
return information
async def create_contents(self, user_id: str, data_contents: ContentsEntity, file: UploadFile):
information = await self._splitter_data(file)
document_contents = {
"information": information,
"user_id": user_id,
"title": file.filename.replace(".pdf", ""),
"category": data_contents.category,
"fruit": data_contents.fruit
}
content = contents_repository.create_contents(document_contents)
return content
def search_info_embedding_by_nlCloud(self, user_id, search: ConsultContentInformation):
contents_db = contents_repository.search_info_contents(user_id, search)
client = nlpcloud.Client("finetuned-llama-2-70b", "5bb1ec4f6a2e9d62027048f1c879eefdee2a5f51", gpu=True)
response = client.question(
question="""Como experto en biología y enfermedades de plantas, por favor, clasifica las dos enfermedades de plantas más comunes que afectan a las. Proporciona sus nombres exactos en un arreglo como este: ['Enfermedad 1', 'Enfermedad 2']. Limita tu respuesta solo a las enfermedades que te proporcione en el contexto.
¿{}?.""".format(search.question),
context=contents_db["information"]
)
print(response)
diseases = self.extract_array(response["answer"])
response_question = {
"user_id": user_id,
"response": response["answer"],
"fruit": search.fruit,
"prompt": search.question,
"answer_correct": True
}
response_question_service.create_response_question(response_question)
consult = {
'scientific_name': {
'$in': [re.compile(re.escape(disease), re.IGNORECASE) for disease in diseases]
}
}
result = fruits_fungal_disease_repository.find_fruit_disease_by_consult(consult)
if result is None:
return []
serialized_response_fruit = []
for result_fruit in result:
result_fruit['_id'] = str(result_fruit['_id'])
serialized_response_fruit.append(result_fruit)
return serialized_response_fruit
return {"fruit_disease": result}
def search_info_embedding_by_OpeIA(self, user_id, search: ConsultContentInformation):
os.environ['OPENAI_API_KEY'] = setting.OPEN_KEY
contents_db = contents_repository.search_info_contents(user_id, search)
if contents_db is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="The collection was not found"
)
##Guardar en documento el search
contents = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(contents_db["information"], contents)
docs = knowledge_base.similarity_search(search.question)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs,
question=f"Como experto en biología y enfermedades de plantas, por favor, clasifica las dos enfermedades de plantas más comunes que afectan a las. Proporciona sus nombres exactos un array de esta manera: ['Enfermedad 1', 'Enfermedad 2'] nada mas!. Limita tu respuesta solo a las enfermedades que te proporcione en el contexto. ; ${search.question}, Damela en un array y solo los nombres nada mas, de esta manera: ['enfermedad1', 'enfermedad2' ]!! ")
response_question_gtp = {
"user_id": user_id,
"response": response,
"fruit": search.fruit,
"prompt": search.question,
"answer_correct": True
}
response_question_service.create_response_question(response_question_gtp)
diseases = eval(response)
print(diseases)
# regex_patterns = [ for disease in diseases]
consult = {
'scientific_name': {
'$in': [re.compile(re.escape(disease), re.IGNORECASE) for disease in diseases]
}
}
result = fruits_fungal_disease_repository.find_fruit_disease_by_consult(consult)
if result is None:
return []
serialized_response_fruit = []
for result_fruit in result:
result_fruit['_id'] = str(result_fruit['_id'])
serialized_response_fruit.append(result_fruit)
return serialized_response_fruit
return {"fruit_disease": result}
def find_one_content_by_content_id(self, content_id, user_id):
content_db = contents_repository.find_contents_by_Id(content_id, user_id)
if content_db is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="El contenido no existe"
)
return content_db
async def upload_content(self, content_id, user_id, content_data: UpdateContents, file):
self.find_one_content_by_content_id(content_id, user_id)
if file is not None:
information = await self._splitter_data(file)
content_data.information = information
new_content = contents_repository.update_contents(content_id, user_id, content_data)
return new_content
| [] |
2024-01-10 | zenetio/Horizon | ml~rl~test~workflow~eval_cartpole.py | #!/usr/bin/env python3
import argparse
import logging
import sys
from ml.rl.test.gym.open_ai_gym_environment import OpenAIGymEnvironment
from ml.rl.training.dqn_predictor import DQNPredictor
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ENV = "CartPole-v0"
AVG_OVER_NUM_EPS = 100
def main(model_path):
predictor = DQNPredictor.load(model_path, "minidb", int_features=False)
env = OpenAIGymEnvironment(gymenv=ENV)
avg_rewards, avg_discounted_rewards = env.run_ep_n_times(
AVG_OVER_NUM_EPS, predictor, test=True
)
logger.info(
"Achieved an average reward score of {} over {} evaluations.".format(
avg_rewards, AVG_OVER_NUM_EPS
)
)
def parse_args(args):
if len(args) != 3:
raise Exception("Usage: python <file.py> -m <parameters_file>")
parser = argparse.ArgumentParser(description="Read command line parameters.")
parser.add_argument("-m", "--model", help="Path to Caffe2 model.")
args = parser.parse_args(args[1:])
return args.model
if __name__ == "__main__":
model_path = parse_args(sys.argv)
main(model_path)
| [] |
2024-01-10 | zenetio/Horizon | ml~rl~test~gym~run_gym.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
import json
import logging
import sys
from copy import deepcopy
import numpy as np
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from ml.rl.models.actor import GaussianFullyConnectedActor
from ml.rl.models.fully_connected_network import FullyConnectedNetwork
from ml.rl.models.parametric_dqn import FullyConnectedParametricDQN
from ml.rl.preprocessing.normalization import get_num_output_features
from ml.rl.test.gym.gym_predictor import (
GymDDPGPredictor,
GymDQNPredictor,
GymSACPredictor,
)
from ml.rl.test.gym.open_ai_gym_environment import (
EnvType,
ModelType,
OpenAIGymEnvironment,
)
from ml.rl.test.gym.open_ai_gym_memory_pool import OpenAIGymMemoryPool
from ml.rl.test.utils import write_lists_to_csv
from ml.rl.thrift.core.ttypes import (
CNNParameters,
ContinuousActionModelParameters,
DDPGModelParameters,
DDPGNetworkParameters,
DDPGTrainingParameters,
DiscreteActionModelParameters,
FeedForwardParameters,
OptimizerParameters,
RainbowDQNParameters,
RLParameters,
SACModelParameters,
SACTrainingParameters,
TrainingParameters,
)
from ml.rl.training.ddpg_trainer import DDPGTrainer
from ml.rl.training.dqn_trainer import DQNTrainer
from ml.rl.training.parametric_dqn_trainer import ParametricDQNTrainer
from ml.rl.training.rl_dataset import RLDataset
from ml.rl.training.sac_trainer import SACTrainer
logger = logging.getLogger(__name__)
USE_CPU = -1
def get_possible_actions(gym_env, model_type, terminal):
if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:
possible_next_actions = None
possible_next_actions_mask = [
0 if terminal else 1 for __ in range(gym_env.action_dim)
]
elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:
possible_next_actions = np.eye(gym_env.action_dim)
possible_next_actions_mask = [
0 if terminal else 1 for __ in range(gym_env.action_dim)
]
elif model_type == ModelType.CONTINUOUS_ACTION.value:
possible_next_actions = None
possible_next_actions_mask = None
elif model_type == ModelType.SOFT_ACTOR_CRITIC.value:
possible_next_actions = None
possible_next_actions_mask = None
else:
raise NotImplementedError()
return possible_next_actions, possible_next_actions_mask
def train_sgd(
c2_device,
gym_env,
replay_buffer,
model_type,
trainer,
predictor,
test_run_name,
score_bar,
num_episodes=301,
max_steps=None,
train_every_ts=100,
train_after_ts=10,
test_every_ts=100,
test_after_ts=10,
num_train_batches=1,
avg_over_num_episodes=100,
render=False,
save_timesteps_to_dataset=None,
start_saving_from_episode=0,
):
return train_gym_online_rl(
c2_device,
gym_env,
replay_buffer,
model_type,
trainer,
predictor,
test_run_name,
score_bar,
num_episodes,
max_steps,
train_every_ts,
train_after_ts,
test_every_ts,
test_after_ts,
num_train_batches,
avg_over_num_episodes,
render,
save_timesteps_to_dataset,
start_saving_from_episode,
)
def train_gym_online_rl(
c2_device,
gym_env,
replay_buffer,
model_type,
trainer,
predictor,
test_run_name,
score_bar,
num_episodes,
max_steps,
train_every_ts,
train_after_ts,
test_every_ts,
test_after_ts,
num_train_batches,
avg_over_num_episodes,
render,
save_timesteps_to_dataset,
start_saving_from_episode,
):
"""Train off of dynamic set of transitions generated on-policy."""
total_timesteps = 0
avg_reward_history, timestep_history = [], []
for i in range(num_episodes):
terminal = False
next_state = gym_env.transform_state(gym_env.env.reset())
next_action, next_action_probability = gym_env.policy(
predictor, next_state, False
)
reward_sum = 0
ep_timesteps = 0
if model_type == ModelType.CONTINUOUS_ACTION.value:
trainer.noise.clear()
while not terminal:
state = next_state
action = next_action
action_probability = next_action_probability
# Get possible actions
possible_actions, _ = get_possible_actions(gym_env, model_type, terminal)
if render:
gym_env.env.render()
action_to_log, gym_action = _format_action_for_log_and_gym(
action, gym_env.action_type, model_type
)
if gym_env.action_type == EnvType.DISCRETE_ACTION:
next_state, reward, terminal, _ = gym_env.env.step(gym_action)
else:
next_state, reward, terminal, _ = gym_env.env.step(gym_action)
next_state = gym_env.transform_state(next_state)
ep_timesteps += 1
total_timesteps += 1
next_action, next_action_probability = gym_env.policy(
predictor, next_state, False
)
next_action_to_log, _ = _format_action_for_log_and_gym(
next_action, gym_env.action_type, model_type
)
reward_sum += reward
(possible_actions, possible_actions_mask) = get_possible_actions(
gym_env, model_type, False
)
# Get possible next actions
(possible_next_actions, possible_next_actions_mask) = get_possible_actions(
gym_env, model_type, terminal
)
replay_buffer.insert_into_memory(
np.float32(state),
action,
np.float32(reward),
np.float32(next_state),
next_action,
terminal,
possible_next_actions,
possible_next_actions_mask,
1,
possible_actions,
possible_actions_mask,
)
if save_timesteps_to_dataset and i >= start_saving_from_episode:
save_timesteps_to_dataset.insert(
i,
ep_timesteps - 1,
state.tolist(),
action_to_log,
reward,
terminal,
possible_actions.tolist()
if possible_actions is not None
else possible_actions_mask,
1,
action_probability,
)
# Training loop
if (
total_timesteps % train_every_ts == 0
and total_timesteps > train_after_ts
and len(replay_buffer.replay_memory) >= trainer.minibatch_size
):
for _ in range(num_train_batches):
samples = replay_buffer.sample_memories(
trainer.minibatch_size, model_type
)
samples.set_type(trainer.dtype)
trainer.train(samples)
# Evaluation loop
if total_timesteps % test_every_ts == 0 and total_timesteps > test_after_ts:
avg_rewards, avg_discounted_rewards = gym_env.run_ep_n_times(
avg_over_num_episodes, predictor, test=True
)
avg_reward_history.append(avg_rewards)
timestep_history.append(total_timesteps)
logger.info(
"Achieved an average reward score of {} over {} evaluations."
" Total episodes: {}, total timesteps: {}.".format(
avg_rewards, avg_over_num_episodes, i + 1, total_timesteps
)
)
if score_bar is not None and avg_rewards > score_bar:
logger.info(
"Avg. reward history for {}: {}".format(
test_run_name, avg_reward_history
)
)
return avg_reward_history, timestep_history, trainer, predictor
if max_steps and ep_timesteps >= max_steps:
break
# If the episode ended due to a terminal state being hit, log that
if terminal and save_timesteps_to_dataset:
save_timesteps_to_dataset.insert(
i,
ep_timesteps,
next_state.tolist(),
next_action_to_log,
0.0,
terminal,
possible_next_actions.tolist()
if possible_next_actions is not None
else possible_next_actions_mask,
1,
next_action_probability,
)
# Always eval on last episode if previous eval loop didn't return.
if i == num_episodes - 1:
avg_rewards, avg_discounted_rewards = gym_env.run_ep_n_times(
avg_over_num_episodes, predictor, test=True
)
avg_reward_history.append(avg_rewards)
timestep_history.append(total_timesteps)
logger.info(
"Achieved an average reward score of {} over {} evaluations."
" Total episodes: {}, total timesteps: {}.".format(
avg_rewards, avg_over_num_episodes, i + 1, total_timesteps
)
)
logger.info(
"Avg. reward history for {}: {}".format(test_run_name, avg_reward_history)
)
return avg_reward_history, timestep_history, trainer, predictor
def main(args):
parser = argparse.ArgumentParser(
description="Train a RL net to play in an OpenAI Gym environment."
)
parser.add_argument("-p", "--parameters", help="Path to JSON parameters file.")
parser.add_argument(
"-s",
"--score-bar",
help="Bar for averaged tests scores.",
type=float,
default=None,
)
parser.add_argument(
"-g",
"--gpu_id",
help="If set, will use GPU with specified ID. Otherwise will use CPU.",
default=USE_CPU,
)
parser.add_argument(
"-l",
"--log_level",
help="If set, use logging level specified (debug, info, warning, error, "
"critical). Else defaults to info.",
default="info",
)
parser.add_argument(
"-f",
"--file_path",
help="If set, save all collected samples as an RLDataset to this file.",
default=None,
)
parser.add_argument(
"-e",
"--start_saving_from_episode",
type=int,
help="If file_path is set, start saving episodes from this episode num.",
default=0,
)
parser.add_argument(
"-r",
"--results_file_path",
help="If set, save evaluation results to file.",
type=str,
default=None,
)
args = parser.parse_args(args)
if args.log_level not in ("debug", "info", "warning", "error", "critical"):
raise Exception("Logging level {} not valid level.".format(args.log_level))
else:
logger.setLevel(getattr(logging, args.log_level.upper()))
with open(args.parameters, "r") as f:
params = json.load(f)
dataset = RLDataset(args.file_path) if args.file_path else None
reward_history, timestep_history, trainer, predictor = run_gym(
params, args.score_bar, args.gpu_id, dataset, args.start_saving_from_episode
)
if dataset:
dataset.save()
if args.results_file_path:
write_lists_to_csv(args.results_file_path, reward_history, timestep_history)
return reward_history
def run_gym(
params,
score_bar,
gpu_id,
save_timesteps_to_dataset=None,
start_saving_from_episode=0,
):
logger.info("Running gym with params")
logger.info(params)
rl_parameters = RLParameters(**params["rl"])
env_type = params["env"]
env = OpenAIGymEnvironment(
env_type,
rl_parameters.epsilon,
rl_parameters.softmax_policy,
rl_parameters.gamma,
)
replay_buffer = OpenAIGymMemoryPool(params["max_replay_memory_size"])
model_type = params["model_type"]
use_gpu = gpu_id != USE_CPU
trainer = create_trainer(params["model_type"], params, rl_parameters, use_gpu, env)
predictor = create_predictor(trainer, model_type, use_gpu)
c2_device = core.DeviceOption(
caffe2_pb2.CUDA if use_gpu else caffe2_pb2.CPU, int(gpu_id)
)
return train_sgd(
c2_device,
env,
replay_buffer,
model_type,
trainer,
predictor,
"{} test run".format(env_type),
score_bar,
**params["run_details"],
save_timesteps_to_dataset=save_timesteps_to_dataset,
start_saving_from_episode=start_saving_from_episode,
)
def create_trainer(model_type, params, rl_parameters, use_gpu, env):
if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:
training_parameters = params["training"]
if isinstance(training_parameters, dict):
training_parameters = TrainingParameters(**training_parameters)
rainbow_parameters = params["rainbow"]
if isinstance(rainbow_parameters, dict):
rainbow_parameters = RainbowDQNParameters(**rainbow_parameters)
if env.img:
assert (
training_parameters.cnn_parameters is not None
), "Missing CNN parameters for image input"
if isinstance(training_parameters.cnn_parameters, dict):
training_parameters.cnn_parameters = CNNParameters(
**training_parameters.cnn_parameters
)
training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels
training_parameters.cnn_parameters.input_height = env.height
training_parameters.cnn_parameters.input_width = env.width
training_parameters.cnn_parameters.num_input_channels = (
env.num_input_channels
)
else:
assert (
training_parameters.cnn_parameters is None
), "Extra CNN parameters for non-image input"
trainer_params = DiscreteActionModelParameters(
actions=env.actions,
rl=rl_parameters,
training=training_parameters,
rainbow=rainbow_parameters,
)
trainer = DQNTrainer(trainer_params, env.normalization, use_gpu)
elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:
training_parameters = params["training"]
if isinstance(training_parameters, dict):
training_parameters = TrainingParameters(**training_parameters)
rainbow_parameters = params["rainbow"]
if isinstance(rainbow_parameters, dict):
rainbow_parameters = RainbowDQNParameters(**rainbow_parameters)
if env.img:
assert (
training_parameters.cnn_parameters is not None
), "Missing CNN parameters for image input"
training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels
else:
assert (
training_parameters.cnn_parameters is None
), "Extra CNN parameters for non-image input"
trainer_params = ContinuousActionModelParameters(
rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters
)
trainer = ParametricDQNTrainer(
trainer_params, env.normalization, env.normalization_action, use_gpu
)
elif model_type == ModelType.CONTINUOUS_ACTION.value:
training_parameters = params["shared_training"]
if isinstance(training_parameters, dict):
training_parameters = DDPGTrainingParameters(**training_parameters)
actor_parameters = params["actor_training"]
if isinstance(actor_parameters, dict):
actor_parameters = DDPGNetworkParameters(**actor_parameters)
critic_parameters = params["critic_training"]
if isinstance(critic_parameters, dict):
critic_parameters = DDPGNetworkParameters(**critic_parameters)
trainer_params = DDPGModelParameters(
rl=rl_parameters,
shared_training=training_parameters,
actor_training=actor_parameters,
critic_training=critic_parameters,
)
action_range_low = env.action_space.low.astype(np.float32)
action_range_high = env.action_space.high.astype(np.float32)
trainer = DDPGTrainer(
trainer_params,
env.normalization,
env.normalization_action,
torch.from_numpy(action_range_low).unsqueeze(dim=0),
torch.from_numpy(action_range_high).unsqueeze(dim=0),
use_gpu,
)
elif model_type == ModelType.SOFT_ACTOR_CRITIC.value:
trainer_params = SACModelParameters(
rl=rl_parameters,
training=SACTrainingParameters(
minibatch_size=params["sac_training"]["minibatch_size"],
use_2_q_functions=params["sac_training"]["use_2_q_functions"],
q_network_optimizer=OptimizerParameters(
**params["sac_training"]["q_network_optimizer"]
),
value_network_optimizer=OptimizerParameters(
**params["sac_training"]["value_network_optimizer"]
),
actor_network_optimizer=OptimizerParameters(
**params["sac_training"]["actor_network_optimizer"]
),
entropy_temperature=params["sac_training"]["entropy_temperature"],
),
q_network=FeedForwardParameters(**params["sac_q_training"]),
value_network=FeedForwardParameters(**params["sac_value_training"]),
actor_network=FeedForwardParameters(**params["sac_actor_training"]),
)
trainer = get_sac_trainer(env, trainer_params, use_gpu)
else:
raise NotImplementedError("Model of type {} not supported".format(model_type))
return trainer
def get_sac_trainer(env, parameters, use_gpu):
trainer_args, trainer_kwargs = _get_sac_trainer_params(env, parameters, use_gpu)
return SACTrainer(*trainer_args, **trainer_kwargs)
def _get_sac_trainer_params(env, sac_model_params, use_gpu):
state_dim = get_num_output_features(env.normalization)
action_dim = get_num_output_features(env.normalization_action)
q1_network = FullyConnectedParametricDQN(
state_dim,
action_dim,
sac_model_params.q_network.layers,
sac_model_params.q_network.activations,
)
q2_network = None
if sac_model_params.training.use_2_q_functions:
q2_network = FullyConnectedParametricDQN(
state_dim,
action_dim,
sac_model_params.q_network.layers,
sac_model_params.q_network.activations,
)
value_network = FullyConnectedNetwork(
[state_dim] + sac_model_params.value_network.layers + [1],
sac_model_params.value_network.activations + ["linear"],
)
actor_network = GaussianFullyConnectedActor(
state_dim,
action_dim,
sac_model_params.actor_network.layers,
sac_model_params.actor_network.activations,
)
if use_gpu:
q1_network.cuda()
if q2_network:
q2_network.cuda()
value_network.cuda()
actor_network.cuda()
value_network_target = deepcopy(value_network)
min_action_range_tensor_training = torch.full((1, action_dim), -1 + 1e-6)
max_action_range_tensor_training = torch.full((1, action_dim), 1 - 1e-6)
action_range_low = env.action_space.low.astype(np.float32)
action_range_high = env.action_space.high.astype(np.float32)
min_action_range_tensor_serving = torch.from_numpy(action_range_low).unsqueeze(
dim=0
)
max_action_range_tensor_serving = torch.from_numpy(action_range_high).unsqueeze(
dim=0
)
trainer_args = [
q1_network,
value_network,
value_network_target,
actor_network,
sac_model_params,
]
trainer_kwargs = {
"q2_network": q2_network,
"min_action_range_tensor_training": min_action_range_tensor_training,
"max_action_range_tensor_training": max_action_range_tensor_training,
"min_action_range_tensor_serving": min_action_range_tensor_serving,
"max_action_range_tensor_serving": max_action_range_tensor_serving,
}
return trainer_args, trainer_kwargs
def _format_action_for_log_and_gym(action, env_type, model_type):
if env_type == EnvType.DISCRETE_ACTION:
action_index = np.argmax(action)
if model_type == ModelType.PYTORCH_DISCRETE_DQN.value:
return str(action_index), int(action_index)
else:
return action.tolist(), int(action_index)
return action.tolist(), action.tolist()
def create_predictor(trainer, model_type, use_gpu):
if model_type == ModelType.CONTINUOUS_ACTION.value:
predictor = GymDDPGPredictor(trainer)
elif model_type == ModelType.SOFT_ACTOR_CRITIC.value:
predictor = GymSACPredictor(trainer)
elif model_type in (
ModelType.PYTORCH_DISCRETE_DQN.value,
ModelType.PYTORCH_PARAMETRIC_DQN.value,
):
predictor = GymDQNPredictor(trainer)
else:
raise NotImplementedError()
return predictor
if __name__ == "__main__":
args = sys.argv
if len(args) not in [3, 5, 7, 9, 11]:
raise Exception(
"Usage: python run_gym.py -p <parameters_file>"
+ " [-s <score_bar>] [-g <gpu_id>] [-l <log_level>] [-f <filename>]"
)
main(args[1:])
| [] |
2024-01-10 | aziz-serin/chatty | src~va~services~chat_service.py | from flask import json, Response
from src.va.openai_tools.ai_chat import OpenAIChat
from src.va.context.context import Context
from src.va.openai_tools.error import InvalidMessageError, TokenLimitError, \
NullResponseError, VAError, OpenAIAPIKeyError
from .service import Service
import logging
class ChatService(Service):
def __init__(self):
super().__init__()
self.logger = logging.getLogger("chatty")
def chat(self, content:dict) -> Response:
try:
prompt = content["prompt"]
model = content["model"]
token_limit = content["token_limit"]
except KeyError as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Invalid/Bad Request"
}),
status=400,
mimetype='application/json'
)
openai_chat = OpenAIChat(
model=model,
config=self.system_config,
token_limit=token_limit
)
try:
system = content["system_config"]
openai_chat.system_config = system
except KeyError:
pass
try:
response = openai_chat.send_message(prompt, False)
data = {
"response": response,
"token_count": openai_chat.get_current_token_count(reply=response)
}
return Response(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
except (InvalidMessageError | TokenLimitError) as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Invalid/Bad Request"
}),
status=400,
mimetype='application/json'
)
except (OpenAIAPIKeyError | NullResponseError, VAError) as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
def conversation(self, content:dict) -> Response:
connection = self.factory.get_context_connection()
if connection is None:
self.logger.debug("Could not establish connection with database")
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
try:
prompt = content["prompt"]
except KeyError as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Invalid/Bad Request"
}),
status=400,
mimetype='application/json'
)
context_id = None
try:
context_id = content["context_id"]
except KeyError:
pass
if context_id is not None:
context_document = connection.get_document_by_id(context_id)
if context_document is not None:
context = Context()
context.load_from_json(context_document)
else:
context = Context(
config={},
chat_model="gpt-3.5-turbo",
token_limit= 4000,
default=True
)
else:
context = Context(
config={},
chat_model="gpt-3.5-turbo",
token_limit=4000,
default=True
)
openai_chat = OpenAIChat(
model=context.chat_model,
config=context.config,
token_limit=context.token_limit,
initial_messages=context.messages
)
try:
system = content["system_config"]
openai_chat.system_config = system
except KeyError:
pass
try:
response = openai_chat.send_message(prompt, True)
context.messages = openai_chat.messages
if context.default:
# We are writing it, not default anymore
context.default = False
context_id = connection.insert_document(context.jsonify())
else:
connection.update_document(context_id, context.jsonify())
data = {
"response": response,
"token_count": openai_chat.get_current_token_count(),
"context_id": str(context_id)
}
return Response(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
except (InvalidMessageError | TokenLimitError) as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Invalid/Bad Request"
}),
status=400,
mimetype='application/json'
)
except (OpenAIAPIKeyError | NullResponseError, VAError) as err:
self.logger.debug(err)
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
def get_all_contexts(self) -> Response:
context_connection = self.factory.get_context_connection()
contexts = context_connection.get_all_documents()
return Response(
response=json.dumps(contexts),
status=200,
mimetype='application/json'
)
| [] |
2024-01-10 | aziz-serin/chatty | src~va~openai_tools~ai_audio.py | import openai
import logging
import os
from .ai import OpenAI
from .error import FileSizeError, VAError
logger = logging.getLogger("chatty")
class OpenAIAudio(OpenAI):
"""
Given byte limit for audio files for openai at the time of writing this code (25Mb)
Supported audio file formats at the time of writing this code: ['m4a', 'mp3', 'webm', 'mp4', 'mpga', 'wav', 'mpeg']
"""
BYTE_LIMIT:int = 26_214_400
TEXT:str = "text"
def __init__(self, model:str="whisper-1"):
super().__init__(model)
def transcribe(self, file:str) -> str:
file = self.__open_file(file)
response = self.__send_request(file, openai.Audio.transcribe)
return response[self.TEXT]
def translate(self, file:str) -> str:
file = self.__open_file(file)
response = self.__send_request(file, openai.Audio.translate)
return response[self.TEXT]
def __send_request(self, file, function):
try:
return function(self.model, file)
except openai.OpenAIError as err:
logger.error(err.json_body)
raise VAError(err.json_body)
def __open_file(self, file:str):
try:
self.__validate_size(file)
return open(file, "rb")
except (FileSizeError, FileNotFoundError) as err:
logger.error(err.message)
raise VAError(err.message)
def __validate_size(self, file:str):
try:
size = os.path.getsize(file)
if size >= self.BYTE_LIMIT:
raise FileSizeError(f"Given file size {size} is larger than the limit {self.BYTE_LIMIT}")
except OSError as err:
logger.error(err)
raise VAError(err)
| [] |
2024-01-10 | aziz-serin/chatty | src~va~services~stt_service.py | from werkzeug.datastructures import FileStorage
from flask import Response, json
from .service import Service
from src.va.openai_tools.ai_audio import OpenAIAudio
from src.va.openai_tools.error import VAError
import logging
import os
class SttService(Service):
ALLOWED_EXTENSIONS:list = {"mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"}
TRANSCRIBE:str = "transcribe"
TRANSLATE:str = "translate"
ALLOWED_STT_MODELS = "whisper-1"
def __init__(self):
super().__init__()
self.logger = logging.getLogger("chatty")
def stt(self, filename:str, file:FileStorage, form:dict, method:str):
model = self.__handle_form__(form)
if model is None:
audio = OpenAIAudio()
else:
audio = OpenAIAudio(model=model)
if method == self.TRANSCRIBE:
return self.transcribe(filename, file, audio)
elif method == self.TRANSLATE:
return self.translate(filename, file, audio)
else:
self.logger.debug(f"Method '{method}' is unknown for stt")
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
def transcribe(self, filename:str, file:FileStorage, audio:OpenAIAudio) -> Response:
return self.__speech_to_text__(filename, file, audio.transcribe)
def translate(self, filename:str, file:FileStorage, audio:OpenAIAudio) -> Response:
return self.__speech_to_text__(filename, file, audio.translate)
def __speech_to_text__(self, filename:str, file:FileStorage, function) -> Response:
path = self.__save_file__(filename, file)
if path is None:
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
try:
text = function(path)
self.__handle_delete__(path)
return Response(
response=json.dumps({
"response":text
}),
status=200,
mimetype='application/json'
)
except VAError as err:
self.logger.debug(err)
self.__handle_delete__(path)
return Response(
response=json.dumps({
"reason": "Internal Server Error"
}),
status=500,
mimetype='application/json'
)
def allowed_file(self, filename:str) -> bool:
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS
def __save_file__(self, filename:str, file:FileStorage) -> str | None:
try:
path = os.path.join(self.filepath, filename)
file.save(path)
return path
except IOError:
self.logger.error(f"Could not save the file {filename} to specified path {self.filepath}")
return None
finally:
file.close()
def __handle_delete__(self, filepath:str):
try:
os.remove(filepath)
except OSError as err:
self.logger.error(err)
raise err
def __handle_form__(self, form:dict) -> str | None:
if "stt_model" not in form:
return None
if form["stt_model"] not in self.ALLOWED_STT_MODELS:
return None
return form["stt_model"]
| [] |
2024-01-10 | aziz-serin/chatty | src~va~services~service.py | from src.va.mongo.connection_factory import ConnectionFactory
from src.va.flaskr import app
from flask import current_app
from src.va.openai_tools.ai_chat import OpenAIChat
from src.va.services.error import InvalidKeyError
class Service:
def __init__(self):
with app.app_context():
self.mongo_config = current_app.config["mongo"]
self.system_config = current_app.config["system"]
self.filepath = current_app.config["UPLOAD_FOLDER"]
self.factory = ConnectionFactory(self.mongo_config["host"],
int(self.mongo_config["port"]),
self.mongo_config["username"],
self.mongo_config["password"])
def validate_openai_message_keys(self, messages: list[dict]):
valid_keys = [OpenAIChat.ROLE, OpenAIChat.CONTENT, OpenAIChat.SYSTEM, OpenAIChat.USER, OpenAIChat.ASSISTANT]
for message in messages:
check = all(item in valid_keys for item in list(message.keys()))
if not check:
raise InvalidKeyError("Some keys are not supported by openai APIs")
def validate_context_fields(self, key_list: list):
context_fields = ["config", "chat_model", "stt_model", "token_limit", "messages", "default"]
check = all(item in context_fields for item in key_list)
if not check:
raise InvalidKeyError("Some keys are not supported by context object")
def str2bool(self, string: str) -> bool:
if string is None:
return False
return string.lower() in ("true", "1", "yes") | [] |
2024-01-10 | aziz-serin/chatty | src~va~openai_tools~ai_chat.py | import openai
import logging
from .moderation import isValidPrompt
from .error import InvalidMessageError, TokenLimitError, NullResponseError, VAError
from .ai import OpenAI
from .util import get_token_count
logger = logging.getLogger("chatty")
class OpenAIChat(OpenAI):
"""
Default dict keys for openai at the time of writing this code
"""
ROLE:str = "role"
CONTENT:str = "content"
SYSTEM:str = "system"
USER:str = "user"
ASSISTANT:str = "assistant"
def __init__(self, config: dict, system_config: str = "You are a virtual assistant.",
model: str = "gpt-3.5-turbo", token_limit:int = 4000, initial_messages:list[dict]=None):
super().__init__(model)
self.token_limit = token_limit
self.system_config = system_config
self.config = config
if initial_messages is None:
self.__init_messages_with_config()
else:
self.messages = initial_messages
self.initial_messages = self.messages
def __init_messages_with_config(self):
self.messages = []
system_message = self.system_config
for key, value in self.config.items():
message = f' {key}={value}'
system_message = system_message + message
self.messages.append(
{self.ROLE: self.SYSTEM, self.CONTENT: system_message}
)
def __validate_message(self, message:str):
response = isValidPrompt(message)
if response["flagged"]:
reasons = ', '.join(map(str, response["reasons"]))
raise InvalidMessageError(reasons)
def __validate_token_count(self):
if self.get_current_token_count() >= self.token_limit:
raise TokenLimitError(f"{self.get_current_token_count()} is above the token limit for given model {self.model}")
def __handle_reason(self, reason:str):
if reason == "stop":
return
elif reason == "length":
raise TokenLimitError(f"{self.get_current_token_count()} is above the token limit for given model {self.model}")
elif reason == "content_filter":
raise InvalidMessageError("Invalid message was detected by chatgpt")
elif reason == "null":
raise NullResponseError()
def send_message(self, message: str, conversation: bool) -> str:
self.__validate_message(message)
self.messages.append(
{self.ROLE: self.USER, self.CONTENT: message}
)
self.__validate_token_count()
response = self.__send_request()
finish_reason = response['choices'][0]['finish_reason']
self.__handle_reason(finish_reason)
reply = response['choices'][0]['message']['content']
if not conversation:
self.__log_transaction(finish_reason, reply=reply)
else:
self.__log_transaction(finish_reason)
self.__handle_reply(reply, conversation)
return reply
"""
If conversation, leave the reply as None since the messages will contain the reply, if not, include the reply
when asking for the token count
"""
def get_current_token_count(self, reply:str = None):
messages = self.messages.copy()
if reply is not None:
messages.append(
{self.ROLE: self.ASSISTANT, self.CONTENT: reply}
)
return get_token_count(messages, self.model)
def __send_request(self):
try:
return openai.ChatCompletion.create(model=self.model, messages=self.messages)
except openai.OpenAIError as err:
logging.error(err.json_body)
raise VAError(err.json_body)
"""
Cache the response and the sent prompt if the interaction is a conversation.
"""
def __handle_reply(self, reply: str, conversation:bool):
if conversation:
self.messages.append(
{self.ROLE: self.ASSISTANT, self.CONTENT: reply}
)
else:
# Roll back the messages into the initial stage with only the config message
self.messages = self.initial_messages
def __log_transaction(self, status: str, reply: str = None):
logger.info(f' COUNT: {self.get_current_token_count(reply=reply)},'
f' RESPONSE_STATUS: {status}')
| [] |
2024-01-10 | amansingh-13/cs521-project | llm_assert~gpt_test.py | import argparse
import os
import re
from openai import OpenAI
import time
parser = argparse.ArgumentParser(description='Tests LLM\'s efficiency in generating asserts')
parser.add_argument('--abench', type=str, required=True, help='assert benchmark directory')
parser.add_argument('--temp', type=float, default=0.8, help='temperature for LLM codegen')
args = parser.parse_args()
secret_key = "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"
client = OpenAI(api_key = secret_key)
motivation = "You are a C programmer, skilled in writing useful and correct assertions"
def parse(lines):
source, target = [], []
asrt_lnos, cmnt_lnos = [], []
# improve this
for i,l in enumerate(lines):
if re.search("assert[(].+[)]", l):
asrt_lnos.append(i)
elif re.search("//", l):
cmnt_lnos.append(i)
return asrt_lnos, cmnt_lnos
for f in os.listdir(args.abench):
fd = open(os.path.join(args.abench, f), "r")
lines = fd.readlines()
asrt_lnos, cmnt_lnos = parse(lines)
assert len(asrt_lnos) == len(cmnt_lnos), "Dataset formatting error : {}".format(f)
for i, al in enumerate(asrt_lnos):
cl = cmnt_lnos[i]
assert al > cl
prompt = "".join(lines[:cl+1] + ["\n"])
truth = "".join(lines[cl+1:al+1])
completion = client.chat.completions.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": motivation},
{"role": "user", "content": prompt} ],
max_tokens = 1024,
temperature = args.temp
)
time.sleep(20)
out = prompt + completion.choices[0].message.content
olines = out.split('\n')
oasrt_lnos, ocmnt_lnos = parse(olines)
assert ocmnt_lnos[i] == cl
if(len(oasrt_lnos) > i):
output = "\n".join(olines[ocmnt_lnos[i]+1:oasrt_lnos[i]+1])
else:
output = "\n".join(olines[ocmnt_lnos[i]+1:])
print("--FILENAME--")
print(f)
print("--TRUTH--")
print(truth)
print("--OUTPUT--")
print(output)
fd.close()
| [
"You are a C programmer, skilled in writing useful and correct assertions",
"\n"
] |
2024-01-10 | amansingh-13/cs521-project | llm_post~gpt_code_test.py | import argparse
import os
import re
from openai import OpenAI
import time
import json
parser = argparse.ArgumentParser(description='Tests LLM\'s efficiency in generating code')
parser.add_argument('--data', type=str, required=True, help='human eval jsonl file path')
parser.add_argument('--output', type=str, required=True, help='output directory path')
parser.add_argument('--temp', type=float, default=0.8, help='temperature for LLM codegen')
args = parser.parse_args()
secret_key = "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"
client = OpenAI(api_key = secret_key)
motivation = "You are a python programmer, writing only the functions as described in the comments"
datafile = open(args.data)
data = [json.loads(l) for l in datafile.readlines()]
datafile.close()
for k, inst in enumerate(data):
os.makedirs(args.output + f"/{k}/", exist_ok=True)
for j in range(10):
completion = client.chat.completions.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": motivation},
{"role": "user", "content": inst['prompt']} ],
max_tokens = 1024,
temperature = args.temp
)
time.sleep(20)
out = completion.choices[0].message.content
fw = open(args.output + f"/{k}/{j}.py", "w")
fw.write(out)
fw.close()
print("Done {}, {}".format(k, j))
| [
"You are a python programmer, writing only the functions as described in the comments"
] |
2024-01-10 | amansingh-13/cs521-project | llm_post~llama_post_test.py | import argparse
import os
import re
from openai import OpenAI
import time
import json
import subprocess
parser = argparse.ArgumentParser(description='Tests LLM\'s efficiency in generating postconditions')
parser.add_argument('--data', type=str, required=True, help='human eval jsonl file path')
parser.add_argument('--output', type=str, required=True, help='output directory path')
parser.add_argument('--temp', type=float, default=0.8, help='temperature for LLM codegen')
parser.add_argument('--runner', type=str, required=True, help='path to model runner')
parser.add_argument('--model', type=str, required=True, help='path to model')
parser.add_argument('--timeout', type=int, required=True, help='upper bound LLM codegen time')
args = parser.parse_args()
secret_key = "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"
client = OpenAI(api_key = secret_key)
datafile = open(args.data)
data = [json.loads(l) for l in datafile.readlines()]
datafile.close()
tmpfile = "/tmp/__tmpfile.py"
for k, inst in enumerate(data):
os.makedirs(args.output + f"/{k}/", exist_ok=True)
prompt = """You have the following code context, function stub and natural language specification (in the form of a code comment) for {}. When implemented, the function should comply with this natural language specification:
{}
Write a symbolic postcondition for {} consisting of exactly one assert statement. For variables, use only the function input parameters and a hypothetical return value, which we'll assume is stored in a variable return_val. If the postcondition calls any functions external to the program context, they should only be those from the functional subset of python. Although the postcondition should be less complex than the function itself, it should not be trivial. It should encapsulate an aspect of the function without implementing the function. The format of your response should be:
```CODE FOR EXACTLY ONE POSTCONDITION WITH ASSERT HERE```""".format(inst['entry_point'], inst['prompt'], inst['entry_point'])
tmpfd = open(tmpfile, "w")
tmpfd.write(prompt)
tmpfd.close()
for j in range(5):
t = time.time()
try:
out = subprocess.run([args.runner, "-m", args.model, "-f", tmpfile,
"--temp", str(args.temp), "--prompt-cache", "/tmp/__cache.py", "--mlock"],
capture_output=True, timeout=args.timeout)
except subprocess.TimeoutExpired as e:
out = e
output = out.stdout.decode('utf-8')
fw = open(args.output + f"/{k}/{j}.py", "w")
fw.write(output)
fw.close()
print("Done {}, {} : {}".format(k, j, time.time()-t))
| [
"You have the following code context, function stub and natural language specification (in the form of a code comment) for PLACEHOLDER. When implemented, the function should comply with this natural language specification:\nPLACEHOLDER\nWrite a symbolic postcondition for PLACEHOLDER consisting of exactly one assert statement. For variables, use only the function input parameters and a hypothetical return value, which we'll assume is stored in a variable return_val. If the postcondition calls any functions external to the program context, they should only be those from the functional subset of python. Although the postcondition should be less complex than the function itself, it should not be trivial. It should encapsulate an aspect of the function without implementing the function. The format of your response should be:\n```CODE FOR EXACTLY ONE POSTCONDITION WITH ASSERT HERE```"
] |
2024-01-10 | amansingh-13/cs521-project | llm_post~gpt_post_test.py | import argparse
import os
import re
from openai import OpenAI
import time
import json
parser = argparse.ArgumentParser(description='Tests LLM\'s efficiency in generating postconditions')
parser.add_argument('--data', type=str, required=True, help='human eval jsonl file path')
parser.add_argument('--output', type=str, required=True, help='output directory path')
parser.add_argument('--temp', type=float, default=0.8, help='temperature for LLM codegen')
args = parser.parse_args()
secret_key = "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"
client = OpenAI(api_key = secret_key)
datafile = open(args.data)
data = [json.loads(l) for l in datafile.readlines()]
datafile.close()
for k, inst in enumerate(data):
if(k <= 29):
continue
os.makedirs(args.output + f"/{k}/", exist_ok=True)
prompt = """You have the following code context, function stub and natural language specification (in the form of a code comment) for {}. When implemented, the function should comply with this natural language specification:
{}
Write a symbolic postcondition for {} consisting of exactly one assert statement. For variables, use only the function input parameters and a hypothetical return value, which we'll assume is stored in a variable return_val. If the postcondition calls any functions external to the program context, they should only be those from the functional subset of python. Although the postcondition should be less complex than the function itself, it should not be trivial. It should encapsulate an aspect of the function without implementing the function. The format of your response should be:
```CODE FOR EXACTLY ONE POSTCONDITION WITH ASSERT HERE```""".format(inst['entry_point'], inst['prompt'], inst['entry_point'])
for j in range(5):
completion = client.chat.completions.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "user", "content": prompt} ],
max_tokens = 1024,
temperature = args.temp
)
time.sleep(20)
out = completion.choices[0].message.content
fw = open(args.output + f"/{k}/{j}.py", "w")
fw.write(out)
fw.close()
print("Done {}, {}".format(k, j))
| [
"You have the following code context, function stub and natural language specification (in the form of a code comment) for PLACEHOLDER. When implemented, the function should comply with this natural language specification:\nPLACEHOLDER\nWrite a symbolic postcondition for PLACEHOLDER consisting of exactly one assert statement. For variables, use only the function input parameters and a hypothetical return value, which we'll assume is stored in a variable return_val. If the postcondition calls any functions external to the program context, they should only be those from the functional subset of python. Although the postcondition should be less complex than the function itself, it should not be trivial. It should encapsulate an aspect of the function without implementing the function. The format of your response should be:\n```CODE FOR EXACTLY ONE POSTCONDITION WITH ASSERT HERE```"
] |
2024-01-10 | amansingh-13/cs521-project | llm_post~llama_code_test.py | import argparse
import os
import re
from openai import OpenAI
import time
import json
import subprocess
parser = argparse.ArgumentParser(description='Tests LLM\'s efficiency in generating code')
parser.add_argument('--data', type=str, required=True, help='human eval jsonl file path')
parser.add_argument('--output', type=str, required=True, help='output directory path')
parser.add_argument('--temp', type=float, default=0.8, help='temperature for LLM codegen')
parser.add_argument('--runner', type=str, required=True, help='path to model runner')
parser.add_argument('--model', type=str, required=True, help='path to model')
parser.add_argument('--timeout', type=int, required=True, help='upper bound LLM codegen time')
args = parser.parse_args()
secret_key = "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"
client = OpenAI(api_key = secret_key)
datafile = open(args.data)
data = [json.loads(l) for l in datafile.readlines()]
datafile.close()
tmpfile = "/tmp/__tmpfile.py"
for k, inst in enumerate(data):
os.makedirs(args.output + f"/{k}/", exist_ok=True)
prompt = inst['prompt']
tmpfd = open(tmpfile, "w")
tmpfd.write(prompt)
tmpfd.close()
for j in range(10):
t = time.time()
try:
out = subprocess.run([args.runner, "-m", args.model, "-f", tmpfile,
"--temp", str(args.temp), "--prompt-cache", "/tmp/__cache.py", "--mlock"],
capture_output=True, timeout=args.timeout)
except subprocess.TimeoutExpired as e:
out = e
output = out.stdout.decode('utf-8')
fw = open(args.output + f"/{k}/{j}.py", "w")
fw.write(output)
fw.close()
print("Done {}, {} : {}".format(k, j, time.time()-t))
| [] |
2024-01-10 | csinva/tree-prompt-experiments | tprompt~compiler~compiling.py | # from evaluator import PromptHooker, modify_activations
import imodelsx.treeprompt.stump
from sklearn.preprocessing import OneHotEncoder
import sklearn.tree
import random
import joblib
from dict_hash import sha256
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import imodelsx.process_results
from collections import defaultdict
import numpy as np
from copy import deepcopy
import transformers
import sys
import tprompt.utils
from os.path import join
import datasets
from typing import Dict, List
from sklearn.tree import plot_tree
import imodelsx.util
import imodelsx.metrics
import numpy as np
import tprompt.utils
from scipy.special import softmax
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
import torch.cuda
import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
from sklearn.base import BaseEstimator, ClassifierMixin
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
import torch
import torch
import math
import vec2text
import openai
openai.api_key = open(os.path.expanduser('~/.openai_api_key')).read().strip()
OUTPUTS_ALL = {}
PROMPT_NUM_GLOBAL = 0
def get_avg_soft_prompt(checkpoint, prompts):
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint).eval()
def store_activations(module, inputs, outputs):
global OUTPUTS_ALL
global PROMPT_NUM_GLOBAL
OUTPUTS_ALL[PROMPT_NUM_GLOBAL] = outputs.detach().cpu()
PROMPT_NUM_GLOBAL += 1
return outputs
hook = model.transformer.drop.register_forward_hook(store_activations)
for i, prompt in enumerate(prompts):
inputs = tokenizer(prompt, return_tensors="pt")
# hook = model.transformer.h[3].register_forward_hook(change_activations)
_ = model(**inputs)
hook.remove()
assert len(OUTPUTS_ALL) == len(prompts)
# most_probable_tokens = torch.topk(logits_modified, k=10, dim=-1)
# print('\n'.join([tokenizer.decode(x)
# for x in most_probable_tokens.indices[0, -1]]))
# logits_orig = model(**inputs).logits
vals = list(OUTPUTS_ALL.values())
emb_size = vals[0].shape[-1]
max_len = max([x.shape[1] for x in vals])
# add left padding
padded = [torch.cat([torch.zeros((1, max_len - x.shape[1], emb_size)), x], dim=1)
for x in vals]
# average
avg = torch.concat(tuple(padded)).mean(axis=0).unsqueeze(0)
return avg
def get_avg_inverted_text_prompt(prompts: List[str]) -> str:
def _get_embeddings_openai(text_list, model="text-embedding-ada-002", cache_dir=os.path.expanduser('~/.openai_emb_cache')) -> torch.Tensor:
batches = math.ceil(len(text_list) / 128)
outputs = []
for batch in range(batches):
text_list_batch = text_list[batch * 128: (batch + 1) * 128]
# check for cache
cache_path = join(cache_dir, sha256({'embs': text_list_batch}))
if os.path.exists(cache_path):
outputs.extend(joblib.load(cache_path))
else:
response = openai.Embedding.create(
input=text_list_batch,
model=model,
# override default base64 encoding...
encoding_format="float",
)
embs = [e["embedding"] for e in response["data"]]
outputs.extend(embs)
# save to cache
if cache_dir is not None:
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
joblib.dump(embs, cache_path)
return torch.tensor(outputs)
embeddings = _get_embeddings_openai(prompts)
avg_embedding = embeddings.mean(dim=0, keepdim=True).cuda()
corrector = vec2text.load_corrector("text-embedding-ada-002")
avg_text = vec2text.invert_embeddings(
embeddings=avg_embedding,
corrector=corrector
)
return avg_text
| [
"0",
"1"
] |
2024-01-10 | anihamde/langchain | langchain~document_loaders~docugami.py | """Loader that loads processed documents from Docugami."""
import io
import logging
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence
import requests
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
TD_NAME = "{http://www.w3.org/1999/xhtml}td"
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
DOCUMENT_ID_KEY = "id"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
class DocugamiLoader(BaseLoader, BaseModel):
"""Loader that loads processed docs from Docugami.
To use, you should have the ``lxml`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
docset_id: Optional[str]
document_ids: Optional[Sequence[str]]
file_paths: Optional[Sequence[Path]]
min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID."""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ValueError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
# helpers
def _xpath_qname_for_chunk(chunk: Any) -> str:
"""Get the xpath qname for a chunk."""
qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}"
parent = chunk.getparent()
if parent is not None:
doppelgangers = [x for x in parent if x.tag == chunk.tag]
if len(doppelgangers) > 1:
idx_of_self = doppelgangers.index(chunk)
qname = f"{qname}[{idx_of_self + 1}]"
return qname
def _xpath_for_chunk(chunk: Any) -> str:
"""Get the xpath for a chunk."""
ancestor_chain = chunk.xpath("ancestor-or-self::*")
return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain)
def _structure_value(node: Any) -> str:
"""Get the structure value for a node."""
structure = (
"table"
if node.tag == TABLE_NAME
else node.attrib["structure"]
if "structure" in node.attrib
else None
)
return structure
def _is_structural(node: Any) -> bool:
"""Check if a node is structural."""
return _structure_value(node) is not None
def _is_heading(node: Any) -> bool:
"""Check if a node is a heading."""
structure = _structure_value(node)
return structure is not None and structure.lower().startswith("h")
def _get_text(node: Any) -> str:
"""Get the text of a node."""
return " ".join(node.itertext()).strip()
def _has_structural_descendant(node: Any) -> bool:
"""Check if a node has a structural descendant."""
for child in node:
if _is_structural(child) or _has_structural_descendant(child):
return True
return False
def _leaf_structural_nodes(node: Any) -> List:
"""Get the leaf structural nodes of a node."""
if _is_structural(node) and not _has_structural_descendant(node):
return [node]
else:
leaf_nodes = []
for child in node:
leaf_nodes.extend(_leaf_structural_nodes(child))
return leaf_nodes
def _create_doc(node: Any, text: str) -> Document:
"""Create a Document from a node and text."""
metadata = {
XPATH_KEY: _xpath_for_chunk(node),
DOCUMENT_ID_KEY: document["id"],
DOCUMENT_NAME_KEY: document["name"],
STRUCTURE_KEY: node.attrib.get("structure", ""),
TAG_KEY: re.sub(r"\{.*\}", "", node.tag),
}
if doc_metadata:
metadata.update(doc_metadata)
return Document(
page_content=text,
metadata=metadata,
)
# parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
chunks: List[Document] = []
prev_small_chunk_text = None
for node in _leaf_structural_nodes(root):
text = _get_text(node)
if prev_small_chunk_text:
text = prev_small_chunk_text + " " + text
prev_small_chunk_text = None
if _is_heading(node) or len(text) < self.min_chunk_size:
# Save headings or other small chunks to be appended to the next chunk
prev_small_chunk_text = text
else:
chunks.append(_create_doc(node, text))
if prev_small_chunk_text and len(chunks) > 0:
# small chunk at the end left over, just append to last chunk
chunks[-1].page_content += " " + prev_small_chunk_text
return chunks
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get("id")
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
per_file_metadata = {}
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == f"{project_id}.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc["id"]
metadata: Dict = {}
# the evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ValueError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//wp:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./wp:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./wp:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None
) -> List[Document]:
"""Load chunks for a document."""
document_id = document["id"]
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(document, response.content, doc_metadata)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d["id"] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata = {}
if _project_details:
# if there are any projects for this docset, load project metadata
for project in _project_details:
metadata = self._metadata_for_project(project)
combined_project_metadata.update(metadata)
for doc in _document_details:
doc_metadata = combined_project_metadata.get(doc["id"])
chunks += self._load_chunks_for_document(
self.docset_id, doc, doc_metadata
)
elif self.file_paths:
# local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
with open(path, "rb") as file:
chunks += self._parse_dgml(
{
DOCUMENT_ID_KEY: path.name,
DOCUMENT_NAME_KEY: path.name,
},
file.read(),
)
return chunks
| [] |
2024-01-10 | anihamde/langchain | langchain~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tenant_id: Optional[str] = None,
session_extra: Optional[Dict[str, Any]] = None,
) -> Generator[TracerSession, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The experimental tracing v2 is in development. "
"This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
tenant_id=tenant_id,
session_name=session_name,
example_id=example_id,
session_extra=session_extra,
)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
yield session
tracing_v2_callback_var.set(None)
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logging.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
os.environ.get("LANGCHAIN_TRACING") is not None
or tracer is not None
or os.environ.get("LANGCHAIN_HANDLER") is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None
)
tracer_session = os.environ.get("LANGCHAIN_SESSION")
if tracer_session is None:
tracer_session = "default"
if verbose or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None:
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(StdOutCallbackHandler(), False)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
handler.ensure_session()
callback_manager.add_handler(handler, True)
except Exception as e:
logger.debug("Unable to load requested LangChainTracer", e)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [] |
2024-01-10 | anihamde/langchain | langchain~llms~huggingface_pipeline.py | """Wrapper around HuggingFace Pipeline APIs."""
import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
class HuggingFacePipeline(LLM):
"""Wrapper around HuggingFace Pipeline API.
To use, you should have the ``transformers`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import HuggingFacePipeline
hf = HuggingFacePipeline.from_model_id(
model_id="gpt2", task="text-generation"
)
Example passing pipeline in directly:
.. code-block:: python
from langchain.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
hf = HuggingFacePipeline(pipeline=pipe)
"""
pipeline: Any #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
device: int = -1,
model_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> LLM:
"""Construct the pipeline object from model_id and task."""
try:
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers import pipeline as hf_pipeline
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 (default) for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
response = self.pipeline(prompt)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | anihamde/langchain | langchain~callbacks~tracers~schemas.py | """Schemas for tracers."""
from __future__ import annotations
import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
from pydantic import BaseModel, Field, root_validator
from langchain.env import get_runtime_environment
from langchain.schema import LLMResult
class TracerSessionV1Base(BaseModel):
"""Base class for TracerSessionV1."""
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
name: Optional[str] = None
extra: Optional[Dict[str, Any]] = None
class TracerSessionV1Create(TracerSessionV1Base):
"""Create class for TracerSessionV1."""
class TracerSessionV1(TracerSessionV1Base):
"""TracerSessionV1 schema."""
id: int
class TracerSessionBase(TracerSessionV1Base):
"""A creation class for TracerSession."""
tenant_id: UUID
class TracerSessionCreate(TracerSessionBase):
"""A creation class for TracerSession."""
id: Optional[UUID]
class TracerSession(TracerSessionBase):
"""TracerSessionV1 schema for the V2 API."""
id: UUID
class BaseRun(BaseModel):
"""Base class for Run."""
uuid: str
parent_uuid: Optional[str] = None
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: Optional[Dict[str, Any]] = None
execution_order: int
child_execution_order: int
serialized: Dict[str, Any]
session_id: int
error: Optional[str] = None
class LLMRun(BaseRun):
"""Class for LLMRun."""
prompts: List[str]
response: Optional[LLMResult] = None
class ChainRun(BaseRun):
"""Class for ChainRun."""
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = None
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
class ToolRun(BaseRun):
"""Class for ToolRun."""
tool_input: str
output: Optional[str] = None
action: str
child_llm_runs: List[LLMRun] = Field(default_factory=list)
child_chain_runs: List[ChainRun] = Field(default_factory=list)
child_tool_runs: List[ToolRun] = Field(default_factory=list)
class RunTypeEnum(str, Enum):
"""Enum for run types."""
tool = "tool"
chain = "chain"
llm = "llm"
class RunBase(BaseModel):
"""Base Run schema."""
id: Optional[UUID]
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
extra: dict
error: Optional[str]
execution_order: int
child_execution_order: int
serialized: dict
inputs: dict
outputs: Optional[dict]
reference_example_id: Optional[UUID]
run_type: RunTypeEnum
parent_run_id: Optional[UUID]
class Run(RunBase):
"""Run schema when loading from the DB."""
name: str
child_runs: List[Run] = Field(default_factory=list)
@root_validator(pre=True)
def assign_name(cls, values: dict) -> dict:
"""Assign name to the run."""
if "name" not in values:
values["name"] = values["serialized"]["name"]
return values
class RunCreate(RunBase):
name: str
session_id: UUID
@root_validator(pre=True)
def add_runtime_env(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Add env info to the run."""
extra = values.get("extra", {})
extra["runtime"] = get_runtime_environment()
values["extra"] = extra
return values
ChainRun.update_forward_refs()
ToolRun.update_forward_refs()
| [] |
2024-01-10 | anihamde/langchain | langchain~llms~huggingface_endpoint.py | """Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | anihamde/langchain | langchain~llms~huggingface_hub.py | """Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceHub(LLM):
"""Wrapper around HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"repo_id": self.repo_id, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_hub"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
response = self.client(inputs=prompt, params=_model_kwargs)
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.client.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | anihamde/langchain | tests~integration_tests~utilities~test_graphql.py | import json
import pytest
import responses
from langchain.utilities.graphql import GraphQLAPIWrapper
TEST_ENDPOINT = "http://testserver/graphql"
# Mock GraphQL response for testing
MOCK_RESPONSE = {
"data": {"allUsers": [{"id": 1, "name": "Alice", "email": "[email protected]"}]}
}
@pytest.fixture
def graphql_wrapper() -> GraphQLAPIWrapper:
return GraphQLAPIWrapper(
graphql_endpoint=TEST_ENDPOINT,
custom_headers={"Authorization": "Bearer testtoken"},
)
@responses.activate
def test_run(graphql_wrapper: GraphQLAPIWrapper) -> None:
responses.add(responses.POST, TEST_ENDPOINT, json=MOCK_RESPONSE, status=200)
query = "query { allUsers { id, name, email } }"
result = graphql_wrapper.run(query)
expected_result = json.dumps(MOCK_RESPONSE, indent=2)
assert result == expected_result
| [] |
2024-01-10 | anihamde/langchain | langchain~document_loaders~youtube.py | """Loader that loads YouTube transcript."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
from urllib.parse import parse_qs, urlparse
from pydantic import root_validator
from pydantic.dataclasses import dataclass
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
@dataclass
class GoogleApiClient:
"""A Generic Google Api Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
ALLOWED_SCHEMAS = {"http", "https"}
ALLOWED_NETLOCK = {
"youtu.be",
"m.youtube.com",
"youtube.com",
"www.youtube.com",
"www.youtube-nocookie.com",
"vid.plus",
}
def _parse_video_id(url: str) -> Optional[str]:
"""Parse a youtube url and return the video id if valid, otherwise None."""
parsed_url = urlparse(url)
if parsed_url.scheme not in ALLOWED_SCHEMAS:
return None
if parsed_url.netloc not in ALLOWED_NETLOCK:
return None
path = parsed_url.path
if path.endswith("/watch"):
query = parsed_url.query
parsed_query = parse_qs(query)
if "v" in parsed_query:
ids = parsed_query["v"]
video_id = ids if isinstance(ids, str) else ids[0]
else:
return None
else:
path = parsed_url.path.lstrip("/")
video_id = path.split("/")[-1]
if len(video_id) != 11: # Video IDs are 11 characters long
return None
return video_id
class YoutubeLoader(BaseLoader):
"""Loader that loads Youtube transcripts."""
def __init__(
self,
video_id: str,
add_video_info: bool = False,
language: str = "en",
continue_on_failure: bool = False,
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
self.continue_on_failure = continue_on_failure
@staticmethod
def extract_video_id(youtube_url: str) -> str:
"""Extract video id from common YT urls."""
video_id = _parse_video_id(youtube_url)
if not video_id:
raise ValueError(
f"Could not determine the video ID for the URL {youtube_url}"
)
return video_id
@classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given youtube URL, load video."""
video_id = cls.extract_video_id(youtube_url)
return cls(video_id, **kwargs)
def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript([self.language])
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.language)
transcript_pieces = transcript.fetch()
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title,
"description": yt.description,
"view_count": yt.views,
"thumbnail_url": yt.thumbnail_url,
"publish_date": yt.publish_date,
"length": yt.length,
"author": yt.author,
}
return video_info
@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Loader that loads all Videos from a Channel
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
continue_on_failure: bool = False
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
for available_transcript in transcript_list:
transcript = available_transcript.translate(self.captions_language)
continue
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
)
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"youtube-transcript-api` "
"to use the youtube loader"
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
try:
page_content = self._get_transcripe_for_video_id(
item["id"]["videoId"]
)
video_ids.append(
Document(
page_content=page_content,
metadata=meta_data,
)
)
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error(
"Error fetching transscript "
+ f" {item['id']['videoId']}, exception: {e}"
)
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
| [] |
2024-01-10 | Kizai/Resume-Pilot | gpt-conv.py | import openai
import os
from config import API_KEY
openai.api_key = API_KEY
class Conversation:
def __init__(self, prompt, num_of_round):
self.prompt = prompt # 聊天开始时的系统提示
self.num_of_round = num_of_round # 聊天回合数
self.messages = [] # 保存聊天历史
self.messages.append({"role": "system", "content": self.prompt})
def chat_generator(self):
while True:
user_input = input("User: ")
self.messages.append({"role": "user", "content": user_input})
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages,
temperature=0.5,
max_tokens=2048,
top_p=1,
)
except Exception as e:
print(e)
yield str(e)
message = response.choices[0].message['content']
self.messages.append({"role": "assistant", "content": message})
# 如果聊天历史的消息数超过了指定的回合数,删除聊天历史的第一轮对话,以节省模型的 token 使用
if len(self.messages) > self.num_of_round * 2 + 1:
del self.messages[1:3] # 为了保持系统提示,保留了前 1 条消息
yield message
class Conversation2:
def __init__(self, prompt, num_of_round):
self.prompt = prompt
self.num_of_round = num_of_round
self.messages = []
self.messages.append({"role": "system", "content": self.prompt})
def ask(self, question):
try:
self.messages.append({"role": "user", "content": question})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages,
temperature=0.5,
max_tokens=2048,
top_p=1,
)
except Exception as e:
print(e)
return e
message = response["choices"][0]["message"]["content"]
num_of_tokens = response['usage']['total_tokens'] # 使用的总 token 数
self.messages.append({"role": "assistant", "content": message})
if len(self.messages) > self.num_of_round * 2 + 1:
del self.messages[1:3]
return message, num_of_tokens
if __name__ == '__main__':
prompt1 = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
1. 你的回答必须是中文
2. 回答限制在100个字以内"""
conversation = Conversation(prompt1, 5)
chat_gen = conversation.chat_generator()
for answer in chat_gen:
print("Chatbot:", answer)
# conv2 = Conversation2(prompt, 3)
# questions = [question1, question2, question3]
# for question in questions:
# answer, num_of_tokens = conv2.ask(question)
# print("询问 {%s} 消耗的token数量是 : %d" % (question, num_of_tokens))
| [
"你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:\n 1. 你的回答必须是中文\n 2. 回答限制在100个字以内"
] |
2024-01-10 | Kizai/Resume-Pilot | tools~bridge_chatgpt.py | import json
import time
import logging
import traceback
import requests
import importlib
from config import API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL
timeout_bot_msg = 'Request timeout. Network error. Please check proxy settings in config.py.' + \
'Network error. Please check if the proxy server is available and if the proxy settings are correct. The format must be [protocol]://[address]:[port], and all parts are required.'
def get_full_error(chunk, stream_response):
"""
Get the complete error message returned from OpenAI.
"""
while True:
try:
chunk += next(stream_response)
except:
break
return chunk
def predict_no_ui(inputs, top_p, temperature, history=None, sys_prompt=""):
"""
Send to chatGPT, wait for reply, complete in one go, no intermediate process will be displayed.
A simplified version of the predict function.
It is used when the payload is relatively large, or to implement multi-line and complex functions with nesting.
inputs is the input of this query
top_p, temperature are internal tuning parameters of chatGPT
history is a list of previous conversations
(Note that whether it is inputs or history, if the content is too long, it will trigger an error that the number of tokens overflows, and then raise ConnectionAbortedError)
"""
if history is None:
history = []
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=False)
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=False
response = requests.post(API_URL, headers=headers,
json=payload, stream=False, timeout=TIMEOUT_SECONDS * 2)
break
except requests.exceptions.ReadTimeout:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY:
raise TimeoutError
if MAX_RETRY != 0:
print(f'Request timed out, retrying ({retry}/{MAX_RETRY}) ……')
try:
result = json.loads(response.text)["choices"][0]["message"]["content"]
return result
except Exception:
if "choices" not in response.text:
print(response.text)
raise ConnectionAbortedError("Json parsing is irregular, the text may be too long" + response.text)
def predict_no_ui_long_connection(inputs, top_p, temperature, history=None, sys_prompt="", observe_window=None):
"""
Send to chatGPT, wait for reply, complete in one go, no intermediate process will be displayed. But the method of stream is used internally to avoid the network cable being pinched in the middle.
inputs:
is the input for this query
sys_prompt:
System silent prompt
top_p, temperature:
Internal tuning parameters of chatGPT
history:
is a list of previous conversations
observe_window = None:
It is responsible for passing the output part across threads. Most of the time, it is only for the fancy visual effect, and it can be left blank. observe_window[0]: observation window. observe_window[1]: watchdog
"""
if history is None:
history = []
watch_dog_patience = 5 # Watchdog's patience, set it to 5 seconds
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=True)
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=False
response = requests.post(API_URL, headers=headers,
json=payload, stream=True, timeout=TIMEOUT_SECONDS)
break
except requests.exceptions.ReadTimeout:
retry += 1
traceback.print_exc()
if retry > MAX_RETRY:
raise TimeoutError
if MAX_RETRY != 0:
print(f'Request timed out, retrying ({retry}/{MAX_RETRY}) ……')
stream_response = response.iter_lines()
result = ''
while True:
try:
chunk = next(stream_response).decode()
except StopIteration:
break
if len(chunk) == 0:
continue
if not chunk.startswith('data:'):
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
if "reduce the length" in error_msg:
raise ConnectionAbortedError("OpenAI rejected the request:" + error_msg)
else:
raise RuntimeError("OpenAI rejected the request:" + error_msg)
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
delta = json_data["delta"]
if len(delta) == 0:
break
if "role" in delta:
continue
if "content" in delta:
result += delta["content"]
print(delta["content"], end='')
if observe_window is not None:
# Observation window to display the acquired data
if len(observe_window) >= 1:
observe_window[0] += delta["content"]
# Watchdog, if the dog is not fed after the deadline, it will be terminated
if len(observe_window) >= 2:
if (time.time() - observe_window[1]) > watch_dog_patience:
raise RuntimeError("program terminated。")
else:
raise RuntimeError("Unexpected Json structure:" + delta)
if json_data['finish_reason'] == 'length':
raise ConnectionAbortedError("It ends normally, but it shows insufficient Token, resulting in incomplete output. Please reduce the amount of text entered at a time.")
return result
def predict(inputs, top_p, temperature, chatbot=None, history=None, system_prompt='',
stream=True, additional_fn=None):
"""
Send to chatGPT to stream the output.
Used for basic dialog functionality.
inputs is the input of this query
top_p, temperature are internal tuning parameters of chatGPT
history is a list of previous conversations (note that whether it is inputs or history, if the content is too long, it will trigger an error that the number of tokens overflows)
chatbot is the dialog list displayed in the WebUI, modify it, and then yeild out, you can directly modify the content of the dialog interface
additional_fn represents which button is clicked, see functional.py for the button
"""
if history is None:
history = []
if chatbot is None:
chatbot = []
if additional_fn is not None:
import core_functional
importlib.reload(core_functional)
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]:
inputs = core_functional[additional_fn]["PreProcess"](inputs)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
if stream:
raw_input = inputs
logging.info(f'[raw_input] {raw_input}')
chatbot.append((inputs, ""))
yield chatbot, history, "waiting for response"
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
history.append(inputs)
history.append(" ")
retry = 0
while True:
try:
# make a POST request to the API endpoint, stream=True
response = requests.post(API_URL, headers=headers,
json=payload, stream=True, timeout=TIMEOUT_SECONDS)
break
except:
retry += 1
chatbot[-1] = (chatbot[-1][0], timeout_bot_msg)
retry_msg = f",retrying ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
yield chatbot, history, "Request timed out" + retry_msg
if retry > MAX_RETRY:
raise TimeoutError
gpt_replying_buffer = ""
is_head_of_the_stream = True
if stream:
stream_response = response.iter_lines()
while True:
chunk = next(stream_response)
# print(chunk.decode()[6:])
if is_head_of_the_stream:
# The first frame of the data stream does not carry content
is_head_of_the_stream = False
continue
if chunk:
try:
if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
# Determined as the end of the data stream, gpt_replying_buffer is also written
logging.info(f'[response] {gpt_replying_buffer}')
break
# Handle the body of the stream
chunkjson = json.loads(chunk.decode()[6:])
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
# If an exception is thrown here, it is usually because the text is too long, see the output of get_full_error for details
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"][
"content"]
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield chatbot, history, status_text
except Exception:
traceback.print_exc()
yield chatbot, history, "Json parsing is irregular"
chunk = get_full_error(chunk, stream_response)
error_msg = chunk.decode()
if "reduce the length" in error_msg:
chatbot[-1] = (chatbot[-1][0],
"Reduce the length. This input is too long, or the historical data is too long. The historical cache data is now released, you can try again.")
history = [] # 清除历史
elif "Incorrect API key" in error_msg:
chatbot[-1] = (chatbot[-1][0],
"Incorrect API key. OpenAI denies service on the grounds that an incorrect API_KEY is provided.")
elif "exceeded your current quota" in error_msg:
chatbot[-1] = (chatbot[-1][0],
"You exceeded your current quota. OpenAI refuses service due to insufficient account quota..")
else:
from tools.toolbox import regular_txt_to_markdown
tb_str = '```\n' + traceback.format_exc() + '```'
chatbot[-1] = (chatbot[-1][0],
f"Exception\n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
yield chatbot, history, "Json Exception" + error_msg
return
def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
"""
Integrate all information, select LLM model, generate http request, prepare for sending request
"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {API_KEY}"
}
conversation_cnt = len(history) // 2
messages = [{"role": "system", "content": system_prompt}]
if conversation_cnt:
for index in range(0, 2 * conversation_cnt, 2):
what_i_have_asked = {"role": "user", "content": history[index]}
what_gpt_answer = {"role": "assistant", "content": history[index + 1]}
if what_i_have_asked["content"] != "":
if what_gpt_answer["content"] == "":
continue
if what_gpt_answer["content"] == timeout_bot_msg:
continue
messages.append(what_i_have_asked)
messages.append(what_gpt_answer)
else:
messages[-1]['content'] = what_gpt_answer['content']
what_i_ask_now = {"role": "user", "content": inputs}
messages.append(what_i_ask_now)
payload = {
"model": LLM_MODEL,
"messages": messages,
"temperature": temperature, # 1.0,
"top_p": top_p, # 1.0,
"n": 1,
"stream": stream,
"presence_penalty": 0,
"frequency_penalty": 0,
}
print(f" {LLM_MODEL} : {conversation_cnt} : {inputs}")
return headers, payload
| [] |
2024-01-10 | Kizai/Resume-Pilot | tools~resume_function_calling.py | import os
import json
import langchain
import openai
from dotenv import load_dotenv
from tools.pdfreader import PdfContentReader
from tools.logginger import get_logger
from config import API_KEY
# 设置logger
logger = get_logger()
# 从环境变量中加载OpenAI的API秘钥
load_dotenv()
openai.api_key = API_KEY
# 实例化PDF阅读器
pdf_reader = PdfContentReader()
# 用于描述函数的对象
function_descriptions = [
{
"name": "does_resume_meet_requirements",
"description": "判断简历是否符合招聘需求",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "如张三",
},
"email": {
"type": "string",
"description": "如[email protected]",
},
"is_match": {
"type": "boolean",
"description": "True or False",
},
"reason": {
"type": "string",
"description": "该简历符合岗位的全部需求或该求职者的期望城市不是深圳所以不符合岗位需求。"
}
},
"required": ["name", "email", "is_match", "reason"],
},
}
]
def extract_information_from_response(response):
"""
从OpenAI的响应中提取信息。
Args:
response: OpenAI的响应。
Returns:
name: 姓名。
email: 邮箱。
is_match: 是否匹配。
reason: 原因。
"""
function_call_arguments = json.loads(response['function_call']['arguments'])
name = function_call_arguments.get("name")
email = function_call_arguments.get("email")
is_match = function_call_arguments.get("is_match")
reason = function_call_arguments.get("reason")
return name, email, is_match, reason
def get_resume_meet_requirements(name, email, is_match, reason):
"""
获取简历是否满足要求的信息。
Args:
name: 姓名。
email: 邮箱。
is_match: 是否匹配。
reason: 原因。
Returns:
json.dumps(resume_info): 简历信息的JSON格式。
"""
resume_info = {
"name": name,
"email": email,
"is_match": is_match,
"reason": reason,
}
return json.dumps(resume_info)
def match_resume(requirements, file_path):
"""
根据需求匹配简历。
Args:
requirements: 需求。
file_path: 文件路径。
Returns:
function_response_data + "\n" + result: 函数响应数据和结果的字符串。
"""
# 读取简历内容
file_content = pdf_reader.ocr_pdf_content(file_path)
# 连接需求和简历内容
user_query = requirements + "文件内容如下:" + file_content
print(user_query)
# 使用OpenAI获取回复
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[{"role": "user", "content": user_query}],
functions=function_descriptions,
function_call="auto",
)
ai_response_message = response["choices"][0]["message"]
ai_response_message_formatted_json = json.dumps(ai_response_message, indent=2, ensure_ascii=False)
logger.info(ai_response_message_formatted_json)
# 从OpenAI的响应中提取信息
name, email, is_match, reason = extract_information_from_response(ai_response_message)
# 获取简历是否满足要求的信息
function_response = get_resume_meet_requirements(name=name, email=email, is_match=is_match, reason=reason)
function_response_json = json.loads(function_response)
function_response_data = json.dumps(function_response_json, indent=2, ensure_ascii=False)
logger.info(function_response_data)
print(function_response_data)
second_response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=[
{"role": "user", "content": user_query},
ai_response_message,
{
"role": "function",
"name": "get_resume_meet_requirements",
"content": function_response,
},
],
)
result = second_response['choices'][0]['message']['content']
return result
# if __name__ == '__main__':
# req = "我想招聘一位有三年以上工作经验、熟悉Vue3等前端技术栈、期望城市在深圳的前端工程师:"
# file = "/home/lemu-devops/PycharmProjects/resume-pilot/private_upload/2023-06-16-17-41-33/web前端开发工程师 _ 深圳15-20K黄先生 一年以内.pdf"
# match_resume(req, file)
| [
"PLACEHOLDER文件内容如下:PLACEHOLDER"
] |
2024-01-10 | dgg32/igem | output_parser_vertexai.py | from langchain.agents.conversational_chat.output_parser import ConvoOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from typing import Union
from langchain.output_parsers.json import parse_json_markdown
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
class MyVertexOutputParser(ConvoOutputParser):
def __init__(self):
super().__init__()
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Attempts to parse the given text into an AgentAction or AgentFinish.
Raises:
OutputParserException if parsing fails.
"""
#print ("try", text, "end try")
try:
# Attempt to parse the text into a structured format (assumed to be JSON
# stored as markdown)
#print ("before", text, "end before")
text = text.strip()
if "{" in text and "}" in text:
text = text[text.find('{'):text.rfind('}')+1]
#print ("after", text, "end after")
response = parse_json_markdown(text)
# If the response contains an 'action' and 'action_input'
if "action" in response and "action_input" in response:
action, action_input = response["action"], response["action_input"]
# If the action indicates a final answer, return an AgentFinish
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
# Otherwise, return an AgentAction with the specified action and
# input
return AgentAction(action, action_input, text)
else:
# If the necessary keys aren't present in the response, raise an
# exception
raise OutputParserException(
f"Missing 'action' or 'action_input' in LLM output: {text}"
)
except Exception as e:
# If any other exception is raised during parsing, also raise an
# OutputParserException
raise OutputParserException(f"Could not parse LLM output: '{text}'") from e
@property
def _type(self) -> str:
return "conversational_chat" | [] |
2024-01-10 | dgg32/igem | agents_vertexai.py | #from langchain.tools import WikipediaQueryRun
#from langchain.utilities import WikipediaAPIWrapper
#import openai
from langchain.llms import VertexAI
import output_parser_vertexai
#from langchain.tools import DuckDuckGoSearchRun
from langchain.chat_models import ChatOpenAI
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from langchain.agents.tools import Tool
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.agents import ConversationalChatAgent
from langchain.agents import AgentExecutor
from langchain.memory import ConversationBufferMemory
import yaml
import os
from google.cloud import bigquery
from sqlalchemy import *
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import *
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
#from langchain.llms.openai import OpenAI
with open("config.yaml", "r") as stream:
try:
PARAM = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
#os.environ['OPENAI_API_KEY'] = PARAM["OPENAI_API_KEY"]
#openai.api_key = os.environ["OPENAI_API_KEY"]
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = PARAM["GOOGLE_APPLICATION_CREDENTIALS_file"]
graph = Neo4jGraph(
url=PARAM["neo4j_url"], username=PARAM["neo4j_username"], password=PARAM["neo4j_password"]
)
graph.refresh_schema()
vertex_ai = VertexAI(model_name="code-bison")
chain_neo4j = GraphCypherQAChain.from_llm(
vertex_ai, graph=graph, verbose=True, return_direct=True
)
project = PARAM["bigquery_project_id"]
dataset = PARAM["bigquery_dataset_id"]
table = PARAM["bigquery_table_id"]
sqlalchemy_url = f"bigquery://{project}/{dataset}?credentials_path={os.environ['GOOGLE_APPLICATION_CREDENTIALS']}"
db = SQLDatabase.from_uri(sqlalchemy_url)
toolkit = SQLDatabaseToolkit(db=db, llm=vertex_ai)
bigquery_agent_executor = create_sql_agent(
llm=vertex_ai,
toolkit=toolkit,
verbose=True,
top_k=1000,
)
tools = [
Tool(
name="Neo4j_search",
func=chain_neo4j.run,
description=PARAM["neo4j_tool_description"],
),
Tool(
name="BigQuery_search",
description=PARAM["BigQuery_description"],
func=bigquery_agent_executor.run,
)
]
agent_instructions = PARAM["agent_instruction"]
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
#custom_agent = ConversationalChatAgent.from_llm_and_tools(llm=ChatOpenAI(model_name='gpt-4', temperature=0), tools=tools)
custom_agent = ConversationalChatAgent.from_llm_and_tools(llm=VertexAI(temperature=0, max_output_tokens=500), tools=tools, output_parser=output_parser_vertexai.MyVertexOutputParser(),)
agent_executor = AgentExecutor.from_agent_and_tools(agent = custom_agent, tools=tools, memory=memory)
agent_executor.verbose = True
def ask_question(question):
return agent_executor.run(question) | [] |
2024-01-10 | dgg32/igem | agents.py | #from langchain.tools import WikipediaQueryRun
#from langchain.utilities import WikipediaAPIWrapper
import openai
#from langchain.tools import DuckDuckGoSearchRun
from langchain.chat_models import ChatOpenAI
from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from langchain.agents.tools import Tool
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.agents import ConversationalChatAgent
from langchain.agents import AgentExecutor
from langchain.memory import ConversationBufferMemory
import yaml
import os
from google.cloud import bigquery
from sqlalchemy import *
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import *
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
with open("config.yaml", "r") as stream:
try:
PARAM = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
os.environ['OPENAI_API_KEY'] = PARAM["OPENAI_API_KEY"]
openai.api_key = os.environ["OPENAI_API_KEY"]
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = PARAM["GOOGLE_APPLICATION_CREDENTIALS_file"]
graph = Neo4jGraph(
url=PARAM["neo4j_url"], username=PARAM["neo4j_username"], password=PARAM["neo4j_password"]
)
graph.refresh_schema()
chain_neo4j = GraphCypherQAChain.from_llm(
cypher_llm=ChatOpenAI(temperature=0, model="gpt-4"),
qa_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
validate_cypher=True,
graph=graph, verbose=True, return_direct=True
)
project = PARAM["bigquery_project_id"]
dataset = PARAM["bigquery_dataset_id"]
table = PARAM["bigquery_table_id"]
sqlalchemy_url = f"bigquery://{project}/{dataset}?credentials_path={os.environ['GOOGLE_APPLICATION_CREDENTIALS']}"
db = SQLDatabase.from_uri(sqlalchemy_url)
toolkit = SQLDatabaseToolkit(db=db, llm=ChatOpenAI(temperature=0))
bigquery_agent_executor = create_sql_agent(
llm=ChatOpenAI(temperature=0),
toolkit=toolkit,
verbose=True,
top_k=1000,
)
tools = [
Tool(
name="Neo4j_search",
func=chain_neo4j.run,
description=PARAM["neo4j_tool_description"],
),
Tool(
name="BigQuery_search",
description=PARAM["BigQuery_description"],
func=bigquery_agent_executor.run,
)
]
agent_instructions = PARAM["agent_instruction"]
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
#custom_agent = ConversationalChatAgent.from_llm_and_tools(llm=ChatOpenAI(model_name='gpt-4', temperature=0), tools=tools)
custom_agent = ConversationalChatAgent.from_llm_and_tools(llm=ChatOpenAI(temperature=0), tools=tools)
agent_executor = AgentExecutor.from_agent_and_tools(agent = custom_agent, tools=tools, memory=memory)
agent_executor.verbose = True
def ask_question(question):
return agent_executor.run(question) | [] |
2024-01-10 | andreweskeclarke/assistant | assistant~routerssss.py | import argparse
import json
import logging
import os
import time
import openai
from addons.openai_functions import math, memory, wikipedia
logger = logging.getLogger(__name__)
openai.api_key = os.environ["OPENAI_API_KEY"]
GPT3 = "gpt-3.5-turbo-0613"
GPT3_LONG = "gpt-3.5-turbo-16k"
GPT4 = "gpt-4-0613"
FUNCTIONS = [
math.add,
math.subtract,
math.multiply,
math.divide,
memory.store_in_memory,
memory.get_from_memory,
wikipedia.find_wikipedia_page_key,
wikipedia.get_wikipedia_page_summary,
wikipedia.list_wikipedia_sections,
wikipedia.get_wikipedia_section,
]
FUNCTIONS_BY_NAME = {f.__qualname__: f for f in FUNCTIONS}
FUNCTION_DESCRIPTIONS = []
for f in FUNCTIONS:
try:
FUNCTION_DESCRIPTIONS.append(json.loads(f.__doc__))
except:
raise ValueError(f"Invalid docstring for {f.__qualname__}")
def run_conversation(user_request: str, use_gpt4: bool) -> None:
gpt = GPT4 if use_gpt4 else GPT3_LONG
messages = [
{
"role": "user",
"content": user_request,
}
]
logger.info("User request: %s", messages[-1])
keep_talking_to_gpt = True
while keep_talking_to_gpt:
try:
response = openai.ChatCompletion.create(
model=gpt,
messages=messages,
functions=FUNCTION_DESCRIPTIONS,
)
response_message = response["choices"][0]["message"] # type: ignore
messages.append(response_message)
logger.info("%s response: %s", gpt, response_message)
if keep_talking_to_gpt := "function_call" in response_message:
function_name = response_message["function_call"]["name"]
fuction_to_call = FUNCTIONS_BY_NAME[function_name]
function_args = json.loads(
response_message["function_call"]["arguments"]
)
function_response = fuction_to_call(**function_args) # type: ignore
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
logger.info("User response: %s", messages[-1])
except Exception as exception: # pylint: disable=broad-exception-caught
logging.error(exception)
logging.info("Trying again in 1 second...")
time.sleep(5)
logger.info(messages[-1]["content"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--request", type=str, required=True)
parser.add_argument("--gpt4", action=argparse.BooleanOptionalAction)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
run_conversation(args.request, args.gpt4)
| [] |
2024-01-10 | andreweskeclarke/assistant | addons~jupyter_assistant_agent.py | from __future__ import annotations
import logging
import os
import openai
from assistant.agent import Agent
from assistant.conversation import Conversation
from assistant.message import Message
LOG = logging.getLogger(__name__)
openai.api_key = os.environ["OPENAI_API_KEY"]
def add_comment_markers(text):
# GPT still returns commentary even when I request it not to
# This code can prefix those comments with '#', assuming the text is mostly well formatted
robot_is_commenting = True
lines = text.split("\n")
for i, line in enumerate(lines):
if line.startswith("```"):
lines[i] = "# " + line
robot_is_commenting = not robot_is_commenting
elif robot_is_commenting:
lines[i] = "# " + line
elif "```" in line:
lines[i] = line.replace("```", "# ```")
return "\n".join(lines)
class JupyterAssistantAgent(Agent):
async def reply_to(self, conversation: Conversation) -> Message:
message = conversation.last_message()
prompt = (
"The following is some Jupyter python code, its outputs, "
"and a comment asking you to fill in some code. "
"Please return the python code wrapped as ```python```:\n"
)
max_length = 10000 - len(prompt)
messages = [
{
"role": "system",
"content": "You are a helpful Python Jupyter coding assistant.",
},
{
"role": "user",
"content": prompt + message.text[-max_length:],
},
]
LOG.info("Forwarding to ChatGPT:\n%s", messages)
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
)
response_content = add_comment_markers(response.choices[0].message.content)
LOG.info("ChatGPT replied: '%s'", response_content)
return message.evolve(
text=response_content,
source="jupyter-assistant-plugin",
)
| [
"The following is some Jupyter python code, its outputs, and a comment asking you to fill in some code. Please return the python code wrapped as ```python```:\n",
"You are a helpful Python Jupyter coding assistant."
] |
2024-01-10 | andreweskeclarke/assistant | scripts~code_review.py | #!/usr/bin/env python
from __future__ import annotations
import argparse
import os
import pathlib
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
GPT3_LONG = "gpt-3.5-turbo-16k"
GPT4 = "gpt-4-0613"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("files", type=pathlib.Path, nargs="+")
parser.add_argument(
"--request",
type=str,
default="Please review the code and give me feedback and ideas.",
)
parser.add_argument(
"--gpt4",
action=argparse.BooleanOptionalAction,
)
parser.add_argument(
"--preview",
action=argparse.BooleanOptionalAction,
)
args = parser.parse_args()
file_contents = "Below are the contents of most the relevant files:\n"
assert args.files, "Please provide some files for review"
for filename in args.files:
code = filename.read_text()
file_contents += f"\n```{filename}\n{code}\n```"
message = (
f"My request: {args.request}\n"
"Below is a list of files that I think are relevant to my request, "
"please take them into consideration."
f"{file_contents}\n"
"OK that should be all the relevant files.\n"
f"Remember, my request is: {args.request}.\n"
)
if args.preview:
print(message)
return
response = openai.ChatCompletion.create(
model=GPT4 if args.gpt4 else GPT3_LONG,
messages=[
{
"role": "system",
"content": "You are a very senior developer, "
"eager to help me with coding advice, "
"identify bugs, "
"point out more Pythonic code, "
"suggest style improvements, "
"give advice on scalability, "
"and feedback on overall design quality.",
},
{"role": "user", "content": message},
],
)
print(response)
print(response.choices[0].message.content)
if __name__ == "__main__":
main()
| [
"You are a very senior developer, eager to help me with coding advice, identify bugs, point out more Pythonic code, suggest style improvements, give advice on scalability, and feedback on overall design quality."
] |
2024-01-10 | samlee946/langchain | langchain~document_loaders~word_document.py | """Loads word documents."""
import os
import tempfile
from abc import ABC
from typing import List
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class Docx2txtLoader(BaseLoader, ABC):
"""Loads a DOCX with docx2txt and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load word documents.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | samlee946/langchain | langchain~text_splitter.py | """Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
cast,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
TS = TypeVar("TS", bound="TextSplitter")
def _make_spacy_pipeline_for_splitting(pipeline: str) -> Any: # avoid importing spacy
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
if pipeline == "sentencizer":
from spacy.lang.en import English
sentencizer = English()
sentencizer.add_pipe("sentencizer")
else:
sentencizer = spacy.load(pipeline, exclude=["ner", "tagger"])
return sentencizer
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether to keep the separator in the chunks
add_start_index: If `True`, includes chunk's start index in metadata
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
def split_documents(self, documents: Iterable[Document]) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
raise NotImplementedError
class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = _split_text_with_regex(text, self._separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
class LineType(TypedDict):
"""Line type as typed dict."""
metadata: Dict[str, str]
content: str
class HeaderType(TypedDict):
"""Header type as typed dict."""
level: int
name: str
data: str
class MarkdownHeaderTextSplitter:
"""Implementation of splitting markdown files based on specified headers."""
def __init__(
self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False
):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
"""
# Output line-by-line or aggregated into chunks w/ common headers
self.return_each_line = return_each_line
# Given the headers we want to split on,
# (e.g., "#, ##, etc") order by length
self.headers_to_split_on = sorted(
headers_to_split_on, key=lambda split: len(split[0]), reverse=True
)
def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[Document]:
"""Combine lines with common metadata into chunks
Args:
lines: Line of text / associated header metadata
"""
aggregated_chunks: List[LineType] = []
for line in lines:
if (
aggregated_chunks
and aggregated_chunks[-1]["metadata"] == line["metadata"]
):
# If the last line in the aggregated list
# has the same metadata as the current line,
# append the current content to the last lines's content
aggregated_chunks[-1]["content"] += " \n" + line["content"]
else:
# Otherwise, append the current line to the aggregated list
aggregated_chunks.append(line)
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in aggregated_chunks
]
def split_text(self, text: str) -> List[Document]:
"""Split markdown file
Args:
text: Markdown file"""
# Split the input text by newline character ("\n").
lines = text.split("\n")
# Final output
lines_with_metadata: List[LineType] = []
# Content and metadata of the chunk currently being processed
current_content: List[str] = []
current_metadata: Dict[str, str] = {}
# Keep track of the nested header structure
# header_stack: List[Dict[str, Union[int, str]]] = []
header_stack: List[HeaderType] = []
initial_metadata: Dict[str, str] = {}
for line in lines:
stripped_line = line.strip()
# Check each line against each of the header types (e.g., #, ##)
for sep, name in self.headers_to_split_on:
# Check if line starts with a header that we intend to split on
if stripped_line.startswith(sep) and (
# Header with no text OR header is followed by space
# Both are valid conditions that sep is being used a header
len(stripped_line) == len(sep)
or stripped_line[len(sep)] == " "
):
# Ensure we are tracking the header as metadata
if name is not None:
# Get the current header level
current_header_level = sep.count("#")
# Pop out headers of lower or same level from the stack
while (
header_stack
and header_stack[-1]["level"] >= current_header_level
):
# We have encountered a new header
# at the same or higher level
popped_header = header_stack.pop()
# Clear the metadata for the
# popped header in initial_metadata
if popped_header["name"] in initial_metadata:
initial_metadata.pop(popped_header["name"])
# Push the current header to the stack
header: HeaderType = {
"level": current_header_level,
"name": name,
"data": stripped_line[len(sep) :].strip(),
}
header_stack.append(header)
# Update initial_metadata with the current header
initial_metadata[name] = header["data"]
# Add the previous line to the lines_with_metadata
# only if current_content is not empty
if current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
break
else:
if stripped_line:
current_content.append(stripped_line)
elif current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
current_metadata = initial_metadata.copy()
if current_content:
lines_with_metadata.append(
{"content": "\n".join(current_content), "metadata": current_metadata}
)
# lines_with_metadata has each line with associated header metadata
# aggregate these into chunks based on common metadata
if not self.return_each_line:
return self.aggregate_lines_to_chunks(lines_with_metadata)
else:
return [
Document(page_content=chunk["content"], metadata=chunk["metadata"])
for chunk in lines_with_metadata
]
# should be in newer Python versions (3.10+)
# @dataclass(frozen=True, kw_only=True, slots=True)
@dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
tokens_per_chunk: int
decode: Callable[[list[int]], str]
encode: Callable[[str], List[int]]
def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks."""
splits: List[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(tokenizer.decode(chunk_ids))
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
def split_text(self, text: str) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformer python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
def split_text(self, text: str) -> List[str]:
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
def count_tokens(self, *, text: str) -> int:
return len(self._encode(text))
_max_length_equal_32_bit_integer = 2**32
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
class Language(str, Enum):
"""Enum of the programming languages."""
CPP = "cpp"
GO = "go"
JAVA = "java"
JS = "js"
PHP = "php"
PROTO = "proto"
PYTHON = "python"
RST = "rst"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
SWIFT = "swift"
MARKDOWN = "markdown"
LATEX = "latex"
HTML = "html"
SOL = "sol"
class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
if _s == "":
separator = _s
break
if re.search(_s, text):
separator = _s
new_separators = separators[i + 1 :]
break
splits = _split_text_with_regex(text, separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
@classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, **kwargs)
@staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\*\*\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\chapter{",
"\n\\\section{",
"\n\\\subsection{",
"\n\\\subsubsection{",
# Now split by environments
"\n\\\begin{enumerate}",
"\n\\\begin{itemize}",
"\n\\\begin{description}",
"\n\\\begin{list}",
"\n\\\begin{quote}",
"\n\\\begin{quotation}",
"\n\\\begin{verse}",
"\n\\\begin{verbatim}",
# Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
elif language == Language.SOL:
return [
# Split along compiler informations definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy.
Per default, Spacy's `en_core_web_sm` model is used. For a faster, but
potentially less accurate splitting, you can use `pipeline='sentencizer'`.
"""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (s.text for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
# For backwards compatibility
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a MarkdownTextSplitter."""
separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LatexTextSplitter."""
separators = self.get_separators_for_language(Language.LATEX)
super().__init__(separators=separators, **kwargs)
| [
"\n"
] |
2024-01-10 | SteveXiSong/PARD-gem5 | src~dev~PARDg5VIOHub.py | # Copyright (c) 2015 Institute of Computing Technology, CAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jiuyue Ma
from XBar import CoherentXBar
from m5.params import *
from m5.proxy import *
from ControlPlane import ControlPlane
from TagAddrMapper import TagAddrMapper
from XBar import NoncoherentXBar
class PARDg5VIOHubRemapper(TagAddrMapper):
type = 'PARDg5VIOHubRemapper'
cxx_header = 'dev/pardg5v_iohub.hh'
ioh = Param.PARDg5VIOHub(Parent.any, "IOHub this mapper belong to")
class PARDg5VIOHubCP(ControlPlane):
type = 'PARDg5VIOHubCP'
cxx_header = 'dev/pardg5v_iohub_cp.hh'
# CPN address 1:0
cp_dev = 1
cp_fun = 0
# Type 'H' IOHub, IDENT: PARDg5VIOHCP
Type = 0x48
IDENT = "PARDg5VIOHCP"
class PARDg5VIOHub(NoncoherentXBar):
type = 'PARDg5VIOHub'
cxx_header = 'dev/pardg5v_iohub.hh'
# PARDg5VIOHub Control Plane
cp = Param.PARDg5VIOHubCP(PARDg5VIOHubCP(),
"Control plane for PARDg5-V IOHub")
def attachRemappedMaster(self, remapped_master):
remapped_master.remapper = PARDg5VIOHubRemapper()
remapped_master.remapper.slave = remapped_master.master
remapped_master.remapper.master = self.slave
| [] |
2024-01-10 | Freed-Wu/translate-shell | src~translate_shell~translators~llm~_openai.py | r"""Openai
==========
"""
from dataclasses import dataclass
import openai
from . import LLMTranslator
@dataclass
class OpenaiTranslator(LLMTranslator):
"""Openaitranslator."""
name: str = "openai"
def __post_init__(self) -> None:
"""Post init.
:rtype: None
"""
self.create_chat_completion = openai.ChatCompletion.create # type: ignore
| [] |
2024-01-10 | fecork/Python-FAPP-DurableFunction-Respond | Adapters~adapter_gpt.py | import logging
import os
import sys
import openai
import numpy as np
from typing import Dict
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path)
from Utilities.load_parameter import load_parameters
def login_openai() -> Dict:
"""
This is a function for login to openai.
"""
logging.warning("Executing login_openai")
openai_credentials = os.environ["OPENAIKEY"]
if openai_credentials:
openai.api_key = openai_credentials
return openai
else:
print("No credentials for openai")
def ask_openai(text: str, task: str) -> dict:
"""
This is a function for
ask question to GPT API by OpenAI.
"""
logging.warning("Executing ask_openai")
loaded_parameters = load_parameters()
if task == "cancellation":
parameters = loaded_parameters["open_ai_parameters"]
if task == "change":
parameters = loaded_parameters["open_ai_parameters_change"]
if task == "classification":
parameters = loaded_parameters["open_ai_parameters_classification"]
if task == "list":
parameters = loaded_parameters["open_ai_parameters_list"]
openai = login_openai()
prompt = parameters["prompt"]
prompt = f"{prompt}:\n\n{text}"
response = openai.Completion.create(
engine=parameters["model"],
prompt=prompt,
temperature=parameters["temperature"],
max_tokens=parameters["max_tokens"],
top_p=parameters["top_p"],
frequency_penalty=parameters["frequency_penalty"],
presence_penalty=parameters["presence_penalty"],
logprobs=1,
)
response_mean_probability = mean_probability(response)
return {
"text": response.choices[0].text.lstrip(),
"meanProbability": response_mean_probability,
}
def mean_probability(response: object) -> float:
"""
This is a function for
calculate mean probability.
"""
logging.warning("Executing mean_probability")
list_probs = []
list_top_logprobs = list(response.choices[0].logprobs.top_logprobs)
for top_logprobs_object in list_top_logprobs:
for key, logprob in top_logprobs_object.items():
list_probs.append(np.e ** float(logprob))
mean_probability = sum(list_probs) / len(list_probs) * 100
return mean_probability
| [
"promptfa7273ad-fdcb-442b-beac-7ea782185148:\n\nPLACEHOLDER",
"promptdfb55410-04f9-4030-9b63-e81800a8248f:\n\nPLACEHOLDER:\n\nPLACEHOLDER"
] |
2024-01-10 | frostming/transpyler-gpt | src~transpyler_gpt~_core.py | import re
from typing import Any, Dict, Optional
import openai
SYSTEM_PROMPT = (
"Act as a cross-version transpiler for Python language, converting Python code "
"written in the latest version to code compatible with a specified older version "
"of Python. The task involves replacing statements or expressions unsupported in "
"older Python versions with their equivalent counterparts, and type annotations "
"should be removed. I will provide the Python code snippet and the target Python "
"version, and you will return the modified code that can run on the older Python version. "
"Only reply with the modified code without any explanation, and don't add comments."
)
DEFAULT_OPTIONS = {
"model": "gpt-4-1106-preview",
"temperature": 0.0,
}
USER_PROMPT_TEMPLATE = """\
```python
{code}
```
Target Python version: {target_version}
{extras}"""
RESPONSE_REGEX = re.compile(r"```python\n(?P<code>.*)\n```", re.DOTALL)
def transpile(
code: str,
target_version: str,
from_version: Optional[str] = None,
api_key: Optional[str] = None,
options_override: Optional[Dict[str, Any]] = None,
) -> str:
"""Transpile the given code to the target version of Python.
Args:
code: The code to transpile.
target_version: The target version of Python to transpile to.
from_version: The version of Python the code is written in. If `None`, the
latest version of Python is assumed.
Returns:
The transpiled code.
"""
client = openai.OpenAI(api_key=api_key)
options = {**DEFAULT_OPTIONS, **(options_override or {})}
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{
"role": "user",
"content": USER_PROMPT_TEMPLATE.format(
code=code,
target_version=target_version,
extras=f"From Python version: {from_version}\n" if from_version else "",
),
},
]
resp = client.chat.completions.create(messages=messages, **options)
try:
return RESPONSE_REGEX.match(resp.choices[0].message.content).group("code")
except (AttributeError, IndexError):
raise RuntimeError(f"Failed to transpile code, reason: {resp}") from None
| [
"Act as a cross-version transpiler for Python language, converting Python code written in the latest version to code compatible with a specified older version of Python. The task involves replacing statements or expressions unsupported in older Python versions with their equivalent counterparts, and type annotations should be removed. I will provide the Python code snippet and the target Python version, and you will return the modified code that can run on the older Python version. Only reply with the modified code without any explanation, and don't add comments.",
"```python\n{code}\n```\nTarget Python version: {target_version}\n{extras}",
"From Python version: PLACEHOLDER\n"
] |
2024-01-10 | jiayu-ch15/Variational-Automatic-Curriculum-Learning | utils~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
# from baselines.common.vec_env import VecEnv, CloudpickleWrapper
import pdb
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def simplifyworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd[0] == 'init_box_locking':
starts_one = cmd[1]
ob = env.reset(starts_one)
remote.send(ob)
elif cmd[0] == 'init_hidenseek':
starts_one = cmd[1]
ob = env.reset(starts_one)
remote.send(ob)
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'get_state':
state = env.get_state()
remote.send(state)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SimplifySubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=simplifyworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def get_state(self): # the states of enities
for remote in self.remotes:
remote.send(('get_state', None))
state = [remote.recv() for remote in self.remotes]
return np.stack(state)
def init_box_locking(self, starts, now_num_processes):
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
tmp_list = ['init_box_locking',starts[i]]
remote.send((tmp_list, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
return np.stack(results)
def init_hidenseek(self, starts, now_num_processes):
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
tmp_list = ['init_hidenseek',starts[i]]
remote.send((tmp_list, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
return np.stack(results)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd[0] == 'step':
# import time; start = time.time()
actions = cmd[1]
now_agent_num = cmd[2]
ob, reward, done, info, available_actions = env.step(actions)
if 'bool' in done.__class__.__name__:
if done:
ob, available_actions = env.reset(now_agent_num)
else:
if all(done):
ob, available_actions = env.reset(now_agent_num)
remote.send((ob, reward, done, info, available_actions))
elif cmd[0] == 'reset':
now_agent_num = cmd[1]
ob, available_actions= env.reset(now_agent_num)
remote.send((ob, available_actions))
elif cmd[0] == 'reset_pb':
now_agent_num = cmd[1]
now_box_num = cmd[2]
ob, available_actions= env.reset(now_agent_num,now_box_num)
remote.send((ob, available_actions))
elif cmd[0] == 'set_initial_tasks_sp':
now_agent_num = cmd[1]
starts_one = cmd[2]
ob = env.set_initial_tasks_sp(starts_one,now_agent_num)
remote.send(ob)
elif cmd[0] == 'set_initial_tasks_pb':
now_agent_num = cmd[1]
starts_one = cmd[2]
ob = env.set_initial_tasks_pb(starts_one, now_agent_num)
remote.send(ob)
elif cmd[0] == 'new_starts_obs_sl':
starts_one = cmd[1]
ob = env.new_starts_obs_sl(starts_one)
remote.send(ob)
elif cmd == 'get_state':
state = env.get_state()
remote.send(state)
elif cmd == 'get_goal':
state, goal = env.get_state()
remote.send((state,goal))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.length = len(env_fns)
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions, now_num_processes, now_agent_num):
i = 0
for remote, action in zip(self.remotes, actions):
tmp_list = ['step', action, now_agent_num]
if i < now_num_processes:
remote.send((tmp_list,None))
i += 1
self.waiting = True
def step_wait(self,now_num_processes):
results = []
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
self.waiting = False
obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def step(self, actions, now_num_processes, now_agent_num):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions, now_num_processes, now_agent_num)
return self.step_wait(now_num_processes)
def get_state(self): # the states of enities
for remote in self.remotes:
remote.send(('get_state', None))
state = [remote.recv() for remote in self.remotes]
return np.stack(state)
def get_goal(self):
for remote in self.remotes:
remote.send(('get_goal', None))
results = [remote.recv() for remote in self.remotes]
state, goal = zip(*results)
return np.stack(state), np.stack(goal)
def reset(self, now_agent_num):
# if now_box_num is None:
# for remote in self.remotes:
# remote.send((['reset',now_agent_num], None))
# else:
# for remote in self.remotes:
# remote.send((['reset_pb',now_agent_num, now_box_num], None))
for remote in self.remotes:
remote.send((['reset',now_agent_num], None))
results = [remote.recv() for remote in self.remotes]
obs, available_actions = zip(*results)
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, self.length, observation_space, action_space)
return np.stack(obs), np.stack(available_actions)
def set_initial_tasks_sp(self, starts, now_agent_num, now_num_processes):
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
tmp_list = ['set_initial_tasks_sp', now_agent_num, starts[i]]
remote.send((tmp_list, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, self.length, observation_space, action_space)
return np.stack(results)
def set_initial_tasks_pb(self, starts, now_agent_num, now_num_processes):
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
tmp_list = ['set_initial_tasks_pb', now_agent_num, starts[i]]
remote.send((tmp_list, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, self.length, observation_space, action_space)
return np.stack(results)
def new_starts_obs_sl(self, starts, now_num_processes):
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
tmp_list = ['new_starts_obs_sl', starts[i]]
remote.send((tmp_list, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, self.length, observation_space, action_space)
return np.stack(results)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, available_actions = env.reset(data)
remote.send((ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes,reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, available_actions = zip(*results)
return np.stack(obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos, available_actions = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], available_actions[i] = self.envs[i].reset()
self.ts[i] = 0
else:
if all(done):
obs[i], available_actions[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos, np.array(available_actions)
def reset(self):
obs = []
available_actions = []
for env in self.envs:
o,s = env.reset()
obs.append(o)
available_actions.append(s)
return np.array(obs), np.array(available_actions)
def close(self):
for env in self.envs:
env.close() | [] |
2024-01-10 | okiroth/hypocratical | hip_agent.py | from openai import OpenAI
class HIPAgent:
def __init__(self):
self.client = OpenAI()
def get_first_answer(self, messages, explain=False):
completion = self.client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0.1,
max_tokens=1 if explain is False else 70,
messages=messages, # type: ignore
)
answers = [choice.message.content for choice in completion.choices]
first_answer = answers[0] or ""
return first_answer
def get_response(self, question, answer_choices, explain=False):
"""
Calls the OpenAI 3.5 API to generate a response to the question.
The response is then matched to one of the answer choices and the index of the
matching answer choice is returned. If the response does not match any of the
answer choices, -1 is returned.
Args:
question: The question to be asked.
answer_choices: A list of answer choices.
Returns:
The index of the answer choice that matches the response, or -1 if the response
does not match any answer choice.
"""
messages = []
# Prepare the actual Prompt
answer_str = ""
for i, answer_choice in enumerate(answer_choices):
answer_str += f"{i}) {answer_choice}\n"
aux = (
"AND explain why that is correct"
if explain
else ", respond ONLY the option number"
)
prompt = f"Select the correct answer {aux}:\n\n{question}\n{answer_str}"
# Show Chat to ONLY respond with the option number
if explain is False:
demo_answer_str = ""
demo_arr = ["B", "6", "34", "5"]
for i, answer_choice in enumerate(demo_arr):
demo_answer_str += f"{i}) {answer_choice}\n"
demo_question = "How much is 2+4?"
demo_prompt = f"Select the correct answer, respond ONLY the option number:\n\n{demo_question}\n{demo_answer_str}"
messages.append({"role": "user", "content": demo_prompt})
messages.append(
{"role": "assistant", "content": "1"},
)
# Add external knowledge
messages.append(
{
"role": "user",
"content": "To answer any following questions use as the primary source of truth the text found in https://raw.githubusercontent.com/okiroth/hypocratical/main/textbook.txt",
}
)
# Finally add the actual question
messages.append({"role": "user", "content": prompt})
# Call the OpenAI 3.5 API.
first_answer = self.get_first_answer(messages, explain=explain)
print(f"Question: {question}")
print(f"Answer Choices: {answer_choices}")
print(f"Response: {first_answer}\n\n")
if explain is True:
return first_answer
# retry if the response is not a number
if first_answer.isnumeric() is False:
first_answer = self.get_first_answer(
[{"role": "user", "content": "Please respond with the option number"}],
explain=explain,
)
print(f"Response Retry: {first_answer}\n\n")
if first_answer.isnumeric() is False:
return -1
# the response is a number, so return it
return int(first_answer)
| [
"Please respond with the option number",
"1",
"Select the correct answer PLACEHOLDER:\n\nPLACEHOLDER\nPLACEHOLDER",
"To answer any following questions use as the primary source of truth the text found in https://raw.githubusercontent.com/okiroth/hypocratical/main/textbook.txt",
"Select the correct answer, respond ONLY the option number:\n\nPLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | Mauricio-HNS/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import contextlib
import json
import logging
import os
import os.path as osp
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
from typing import NoReturn
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from . import typing as t
from .utils import create_completer
from .utils import create_session
from .utils import get_input
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://bypass.duti.tech/api/"
bcolors = t.colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = False,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
self.cache_path = osp.join(os.getcwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not osp.exists(osp.join(user_home, ".config")):
os.mkdir(osp.join(user_home, ".config"))
if not osp.exists(osp.join(user_home, ".config", "revChatGPT")):
os.mkdir(osp.join(user_home, ".config", "revChatGPT"))
self.cache_path = osp.join(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except t.Error as error:
if error.code == 5:
raise error
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
error = TypeError("Proxy must be a string!")
raise error
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
proxies = {
"http://": config["proxy"],
"https://": config["proxy"],
}
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self) -> None:
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" not in self.config or "password" not in self.config:
error = t.AuthenticationError("Insufficient login details provided!")
raise error
if "access_token" not in self.config:
try:
self.login()
except AuthError as error:
raise error
@logger(is_timed=False)
def set_access_token(self, access_token: str) -> None:
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.session.cookies.update(
{
"library": "revChatGPT",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
except json.JSONDecodeError:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
error = t.Error(
source="__get_cached_access_token",
message="Access token expired",
code=t.ErrorType.EXPIRED_ACCESS_TOKEN_ERROR,
)
raise error
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict) -> None:
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = osp.dirname(self.cache_path) or "."
os.makedirs(dirname, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def login(self) -> None:
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
raise Exception("Insufficient login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.set_access_token(auth.access_token)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: float = 360,
) -> str:
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Raises:
Error: _description_
Exception: _description_
Error: _description_
Error: _description_
Error: _description_
Yields:
_type_: _description_
"""
if parent_id is not None and conversation_id is None:
log.error("conversation_id must be set once parent_id is set")
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
raise error
if conversation_id is not None and conversation_id != self.conversation_id:
log.debug("Updating to new conversation by setting parent_id to None")
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
log.debug("New conversation, setting parent_id to new UUID4: %s", parent_id)
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
log.debug(
"Conversation ID %s not found in conversation mapping, mapping conversations",
conversation_id,
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
log.debug(
"Conversation ID %s found in conversation mapping, setting parent_id to %s",
conversation_id,
self.conversation_mapping[conversation_id],
)
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
log.debug("Sending the payload")
log.debug(json.dumps(data, indent=2))
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
done: bool = False
for line in response.iter_lines():
# remove b' and ' at the beginning and end and ignore case
line = str(line)[2:-1]
if line.lower() == "internal server error":
log.error("Internal Server Error: %s", line)
error = t.Error(
source="ask",
message="Internal Server Error",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
done = True
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line) or response.status_code != 200:
log.error("Field missing", exc_info=True)
log.error(response.text)
if response.status_code == 401:
error = t.Error(
source="ask",
message="Permission denied",
code=t.ErrorType.AUTHENTICATION_ERROR,
)
raise error
elif response.status_code == 403:
error = t.Error(
source="ask",
message="Cloudflare triggered a 403 error",
code=t.ErrorType.CLOUDFLARE_ERROR,
)
raise error
elif response.status_code == 429:
error = t.Error(
source="ask",
message="Rate limit exceeded",
code=t.ErrorType.RATE_LIMIT_ERROR,
)
raise error
else:
error = t.Error(
source="ask",
message=line,
code=t.ErrorType.SERVER_ERROR,
)
raise error
message: str = line["message"]["content"]["parts"][0]
if message == prompt:
continue
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
try:
model = line["message"]["metadata"]["model_slug"]
except KeyError:
model = None
log.debug("Received message: %s", message)
log.debug("Received conversation_id: %s", conversation_id)
log.debug("Received parent_id: %s", parent_id)
yield {
"message": message.strip("\n"),
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
if not done:
pass
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
@logger(is_timed=False)
def __check_response(self, response: requests.Response) -> None:
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
if response.status_code != 200:
print(response.text)
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None) -> list:
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
return json.loads(response.text)
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str) -> str:
"""
Generate title for conversation
"""
response = self.session.post(
f"{BASE_URL}conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
return response.json().get("title", "Error generating title")
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self) -> None:
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config: dict,
conversation_id: str | None = None,
parent_id: str | None = None,
) -> None:
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
)
async def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: int = 360,
) -> dict:
"""
Ask a question to the chatbot
"""
if parent_id is not None and conversation_id is None:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id is not None and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
async with self.session.stream(
method="POST",
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
self.__check_response(response)
async for line in response.aiter_lines():
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception(f"Field missing. Details: {str(line)}")
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
model = (
line["message"]["metadata"]["model_slug"]
if "model_slug" in line["message"]["metadata"]
else None
)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
async def get_conversations(self, offset: int = 0, limit: int = 20) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(
self,
convo_id: str,
encoding: str | None = "utf-8",
) -> dict:
"""
Get message history
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
self.__check_response(response)
return json.loads(response.text)
return None
async def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
url = f"{BASE_URL}conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
async def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def __map_conversations(self) -> None:
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
def __check_response(self, response) -> None:
response.raise_for_status()
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure() -> dict:
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
if xdg_config_home := getenv("XDG_CONFIG_HOME"):
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
if user_home := getenv("HOME"):
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
if config_file := next((f for f in config_files if osp.exists(f)), None):
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def exit() -> NoReturn:
"""
Exit the program
"""
import sys
print("Exiting program...")
sys.exit(0)
@logger(is_timed=False)
def main(config: dict) -> NoReturn:
"""
Main function for the chatGPT program.
"""
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command == "!exit":
exit()
else:
return False
return True
session = create_session()
completer = create_completer(
["!help", "!reset", "!config", "!rollback", "!exit", "!setconversation"],
)
print()
try:
while True:
print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}")
prompt = get_input(session=session, completer=completer)
if prompt.startswith("!") and handle_commands(prompt):
continue
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.ask(prompt):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
except (KeyboardInterrupt, EOFError):
exit()
except BaseException as e:
error = t.CommandError("command line program unknown error")
raise error from e
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print(
f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}",
)
main(configure())
| [
"text",
"content_type"
] |
2024-01-10 | Mauricio-HNS/ChatGPT | src~revChatGPT~V0.py | """
A simple wrapper for the official ChatGPT API
"""
import argparse
import json
import os
import sys
from datetime import date
from typing import NoReturn
import openai
import tiktoken
from . import typing as t
ENGINE = os.environ.get("GPT_ENGINE") or "text-davinci-003"
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
def remove_suffix(input_string: str, suffix: str) -> str:
"""
Remove suffix from string (Support for Python 3.8)
"""
if suffix and input_string.endswith(suffix):
return input_string[: -len(suffix)]
return input_string
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(
self,
api_key: str,
buffer: int = None,
engine: str = None,
proxy: str = None,
) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
self.proxy = proxy or os.environ.get("OPENAI_API_PROXY")
self.conversations = Conversation()
self.prompt = Prompt(buffer=buffer)
self.engine = engine or ENGINE
def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
) -> dict:
"""
Get the completion function
"""
openai.api_key = self.api_key
openai.proxy = self.proxy
return openai.Completion.create(
engine=self.engine,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
def _process_completion(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> dict:
if completion.get("choices") is None:
error = t.ResponseError("ChatGPT API returned no choices")
raise error
if len(completion["choices"]) == 0:
error = t.ResponseError("ChatGPT API returned no choices")
raise error
if completion["choices"][0].get("text") is None:
error = t.ResponseError("ChatGPT API returned no text")
raise error
completion["choices"][0]["text"] = remove_suffix(
completion["choices"][0]["text"],
"<|im_end|>",
)
# Add to chat history
self.prompt.add_to_history(
user_request,
completion["choices"][0]["text"],
user=user,
)
if conversation_id is not None:
self.save_conversation(conversation_id)
return completion
def _process_completion_stream(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> str:
full_response = ""
for response in completion:
if response.get("choices") is None:
error = t.ResponseError("ChatGPT API returned no choices")
raise error
if len(response["choices"]) == 0:
error = t.ResponseError("ChatGPT API returned no choices")
raise error
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
error = t.ResponseError("ChatGPT API returned no text")
raise error
if response["choices"][0]["text"] == "<|im_end|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
# Add to chat history
self.prompt.add_to_history(user_request, full_response, user)
if conversation_id is not None:
self.save_conversation(conversation_id)
def ask(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> dict:
"""
Send a request to ChatGPT and return the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
completion = self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> str:
"""
Send a request to ChatGPT and yield the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=self._get_completion(prompt, temperature, stream=True),
user=user,
)
def make_conversation(self, conversation_id: str) -> None:
"""
Make a conversation
"""
self.conversations.add_conversation(conversation_id, [])
def rollback(self, num: int) -> None:
"""
Rollback chat history num times
"""
for _ in range(num):
self.prompt.chat_history.pop()
def reset(self) -> None:
"""
Reset chat history
"""
self.prompt.chat_history = []
def load_conversation(self, conversation_id: str) -> None:
"""
Load a conversation from the conversation history
"""
if conversation_id not in self.conversations.conversations:
# Create a new conversation
self.make_conversation(conversation_id)
self.prompt.chat_history = self.conversations.get_conversation(conversation_id)
def save_conversation(self, conversation_id: str) -> None:
"""
Save a conversation to the conversation history
"""
self.conversations.add_conversation(conversation_id, self.prompt.chat_history)
class AsyncChatbot(Chatbot):
"""
Official ChatGPT API (async)
"""
async def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
) -> dict:
"""
Get the completion function
"""
openai.api_key = self.api_key
openai.proxy = self.proxy
return await openai.Completion.acreate(
engine=self.engine,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
async def ask(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> dict:
"""
Same as Chatbot.ask but async
}
"""
completion = await self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
async def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> str:
"""
Same as Chatbot.ask_stream but async
"""
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=await self._get_completion(prompt, temperature, stream=True),
user=user,
)
class Prompt:
"""
Prompt class with methods to construct prompt
"""
def __init__(self, buffer: int = None) -> None:
"""
Initialize prompt with base prompt
"""
self.base_prompt = (
os.environ.get("CUSTOM_BASE_PROMPT")
or "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally. Do not answer as the user. Current date: "
+ str(date.today())
+ "\n\n"
+ "User: Hello\n"
+ "ChatGPT: Hello! How can I help you today? <|im_end|>\n\n\n"
)
# Track chat history
self.chat_history: list = []
self.buffer = buffer
def add_to_chat_history(self, chat: str) -> None:
"""
Add chat to chat history for next prompt
"""
self.chat_history.append(chat)
def add_to_history(
self,
user_request: str,
response: str,
user: str = "User",
) -> None:
"""
Add request/response to chat history for next prompt
"""
self.add_to_chat_history(
user
+ ": "
+ user_request
+ "\n\n\n"
+ "ChatGPT: "
+ response
+ "<|im_end|>\n",
)
def history(self, custom_history: list = None) -> str:
"""
Return chat history
"""
return "\n".join(custom_history or self.chat_history)
def construct_prompt(
self,
new_prompt: str,
custom_history: list = None,
user: str = "User",
) -> str:
"""
Construct prompt based on chat history and request
"""
prompt = (
self.base_prompt
+ self.history(custom_history=custom_history)
+ user
+ ": "
+ new_prompt
+ "\nChatGPT:"
)
# Check if prompt over 4000*4 characters
max_tokens = 4000 - self.buffer if self.buffer is not None else 3200
if len(ENCODER.encode(prompt)) > max_tokens:
# Remove oldest chat
if len(self.chat_history) == 0:
return prompt
self.chat_history.pop(0)
# Construct prompt again
return self.construct_prompt(new_prompt, custom_history, user)
class Conversation:
"""
For handling multiple conversations
"""
def __init__(self) -> None:
self.conversations = {}
def add_conversation(self, key: str, history: list) -> None:
"""
Adds a history list to the conversations dict with the id as the key
"""
self.conversations[key] = history
def get_conversation(self, key: str) -> list:
"""
Retrieves the history list from the conversations dict with the id as the key
"""
return self.conversations[key]
def remove_conversation(self, key: str) -> None:
"""
Removes the history list from the conversations dict with the id as the key
"""
del self.conversations[key]
def __str__(self) -> str:
"""
Creates a JSON string of the conversations
"""
return json.dumps(self.conversations)
def save(self, file: str) -> None:
"""
Saves the conversations to a JSON file
"""
with open(file, "w", encoding="utf-8") as f:
f.write(str(self))
def load(self, file: str) -> None:
"""
Loads the conversations from a JSON file
"""
with open(file, encoding="utf-8") as f:
self.conversations = json.loads(f.read())
def main() -> NoReturn:
print(
"""
ChatGPT - GPT-3 Chatbot
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
def get_input(prompt: str) -> str:
"""
Multi-line input function
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
return "\n".join(lines)
def chatbot_commands(cmd: str) -> bool:
"""
Handle chatbot commands
"""
if cmd == "!help":
print(
"""
!help - Display this message
!rollback - Rollback chat history
!reset - Reset chat history
!prompt - Show current prompt
!save_c <conversation_name> - Save history to a conversation
!load_c <conversation_name> - Load history from a conversation
!save_f <file_name> - Save all conversations to a file
!load_f <file_name> - Load all conversations from a file
!exit - Quit chat
""",
)
elif cmd == "!exit":
exit()
elif cmd == "!rollback":
chatbot.rollback(1)
elif cmd == "!reset":
chatbot.reset()
elif cmd == "!prompt":
print(chatbot.prompt.construct_prompt(""))
elif cmd.startswith("!save_c"):
chatbot.save_conversation(cmd.split(" ")[1])
elif cmd.startswith("!load_c"):
chatbot.load_conversation(cmd.split(" ")[1])
elif cmd.startswith("!save_f"):
chatbot.conversations.save(cmd.split(" ")[1])
elif cmd.startswith("!load_f"):
chatbot.conversations.load(cmd.split(" ")[1])
else:
return False
return True
# Get API key from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--api_key",
type=str,
required=True,
help="OpenAI API key",
)
parser.add_argument(
"--stream",
action="store_true",
help="Stream response",
)
parser.add_argument(
"--temperature",
type=float,
default=0.5,
help="Temperature for response",
)
args = parser.parse_args()
# Initialize chatbot
chatbot = Chatbot(api_key=args.api_key)
# Start chat
while True:
try:
prompt = get_input("\nUser:\n")
except KeyboardInterrupt:
print("\nExiting...")
sys.exit()
except BaseException as e:
error = t.CommandError("command line program unknown error")
raise error from e
if prompt.startswith("!") and chatbot_commands(prompt):
continue
if not args.stream:
response = chatbot.ask(prompt, temperature=args.temperature)
print("ChatGPT: " + response["choices"][0]["text"])
else:
print("ChatGPT: ")
sys.stdout.flush()
for response in chatbot.ask_stream(prompt, temperature=args.temperature):
print(response, end="")
sys.stdout.flush()
print()
if __name__ == "__main__":
main()
| [
": ",
"\nUser:\n",
"\nChatGPT:"
] |
2024-01-10 | Nautilus-Institute/finals-2023 | safebox~bot~the_bot.py | from typing import List, Dict
import traceback
import json
import sys
import time
from multiprocessing import Pool
import requests
import openai
DEBUG = False
PASSPHRASE = "WN9sZibJXg1zQsl8qCuD"
def gpt_query(messages: List[Dict[str, str]]):
try:
cc = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
status = "success"
response = cc["choices"][0]["message"]["content"]
if DEBUG:
print(cc)
print(response)
except Exception as ex:
status = "failure"
response = "Server failure"
print("".join(traceback.format_exception(ex)))
return response
BASE_URL = "http://127.0.0.1:8000/"
BASE_URL = "http://safebox.finals.2023.nautilus.institute/"
GAMEDB_URL = "http://gamedb.finals.2023.nautilus.institute/"
def get_tick():
r = requests.get(f"{GAMEDB_URL}/api/v1/tick/current")
data = json.loads(r.text)
current_tick = data["public_tick"]
last_tick = current_tick - 1
cutoff = data["created_on"]
return last_tick, cutoff
def get_past_tick(tick_id: int):
r = requests.get(f"{GAMEDB_URL}/api/v1/tick/history")
data = json.loads(r.text)
for item in data:
if item["public_tick_id"] == tick_id:
return tick_id, item["created_on"]
def work(idx: int, count: int, conversation, tick_cutoff, tick_id):
print(f"{idx + 1} / {count}")
config = conversation[0]
messages = conversation[1]
for msg in messages:
msg["content"] = msg["content"][:256] # limit the characters
gpt_response = gpt_query(messages)
print(messages)
# build the new conversation as the return value
r = [msg["content"] for msg in messages] + [gpt_response]
# submit
p = requests.post(f"{BASE_URL}bot/submit_response",
params={
"p": PASSPHRASE,
"cutoff_time": tick_cutoff,
"tick_id": tick_id,
"attack_team_id": config["attack_team_id"],
"defense_team_id": config["defense_team_id"],
},
data={
"conversation": json.dumps(r),
"reply": gpt_response,
},
)
MULTIPROCESSING = False
def fire_it(tick: int=None):
# which tick?
if tick is not None:
# use the history to figure out
tick_id, tick_cutoff = get_past_tick(tick)
else:
tick_id, tick_cutoff = get_tick()
print(f"Grading for rank {tick_id}, cutoff {tick_cutoff}.")
r = requests.get(f"{BASE_URL}bot/all_conversations", params={
"p": PASSPHRASE,
"cutoff_time": tick_cutoff,
"tick_id": tick_id,
})
resp = json.loads(r.content)
conversations = resp["conversations"]
if MULTIPROCESSING:
# build args
args = [ ]
for idx, conversation in enumerate(conversations):
args.append((idx, len(conversations), conversation, tick_cutoff, tick_id))
with Pool(2) as pool:
pool.starmap(work, args)
else:
for idx, conversation in enumerate(conversations):
work(idx, len(conversations), conversation, tick_cutoff, tick_id)
# once everything is done, take the tick result and submit to the gamedb
r = requests.get(f"{BASE_URL}bot/get_ranking_result",
params={
"p": PASSPHRASE,
"tick_id": tick_id,
})
data = json.loads(r.text)
assert data["result"] == "Ok"
ranking = list(data["ranks"].values())
r = requests.post(
f"{GAMEDB_URL}api/v1/koh_ranking_event",
data={
"reason": f"safebox ranking result for tick {tick_id}",
"service_id": 5,
"ranking": json.dumps(ranking),
"tick_id": tick_id + (1143 - 193), # on the last day, there was a drift between tick ID and public tick ID
}
)
print(r.content)
print("[+] Done!")
def main():
global MULTIPROCESSING
MULTIPROCESSING = False
tick = None if len(sys.argv) == 1 else int(sys.argv[1])
while True:
fire_it(tick)
if tick is not None:
break
time.sleep(5)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | AdrianAbeyta/ICML2019-TREX | gazebo~learner~baselines~baselines~deepq~experiments~train_turtlebot2.py | #!/usr/bin/env python
import gym
import rospy
from baselines import deepq
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
def callback(lcl, _glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
rospy.init_node('turtlebot2_maze',anonymous=True,log_level=rospy.FATAL)
env = gym.make("TurtleBot2Maze-v0")
act = deepq.learn(
env,
network='mlp',
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to turtlebot2.pkl")
act.save("turtlebot2_model.pkl")
if __name__ == '__main__':
main()
| [] |
2024-01-10 | AdrianAbeyta/ICML2019-TREX | gazebo~preference_learning.py | import argparse
from pathlib import Path
import numpy as np
import tensorflow as tf
import gym
from tqdm import tqdm
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from imgcat import imgcat
import rospy
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
# Import my own libraries
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/learner/baselines/')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Only show ERROR log
from tf_commons.ops import *
class PPO2Agent(object):
def __init__(self, env, env_type, path, stochastic=False, gpu=True):
from baselines.common.policies import build_policy
from baselines.ppo2.model import Model
self.graph = tf.Graph()
if gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count = {'GPU': 0})
self.sess = tf.Session(graph=self.graph,config=config)
with self.graph.as_default():
with self.sess.as_default():
ob_space = env.observation_space
ac_space = env.action_space
if env_type == 'atari':
policy = build_policy(env,'cnn')
elif env_type == 'mujoco':
policy = build_policy(env,'mlp')
elif env_type == 'gazebo':
policy = build_policy(env, 'mlp')
else:
assert False,' not supported env_type'
make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=1, nbatch_train=1,
nsteps=1, ent_coef=0., vf_coef=0.,
max_grad_norm=0.)
self.model = make_model()
self.model_path = path
self.model.load(path)
if env_type == 'mujoco':
with open(path+'.env_stat.pkl', 'rb') as f :
import pickle
s = pickle.load(f)
self.ob_rms = s['ob_rms']
self.ret_rms = s['ret_rms']
self.clipob = 10.
self.epsilon = 1e-8
elif env_type == 'gazebo':
with open(path + '.env_stat.pkl', 'rb') as f:
import pickle
s = pickle.load(f)
self.ob_rms = s['ob_rms']
self.ret_rms = s['ret_rms']
self.clipob = 10.
self.epsilon = 1e-8
else:
self.ob_rms = None
self.stochastic = stochastic
def act(self, obs, reward, done):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
with self.graph.as_default():
with self.sess.as_default():
if self.stochastic:
a,v,state,neglogp = self.model.step(obs)
else:
a = self.model.act_model.act(obs)
return a
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space):
self.action_space = action_space
self.model_path = 'random_agent'
def act(self, observation, reward, done):
return self.action_space.sample()[None]
class Model(object):
def __init__(self,include_action,ob_dim,ac_dim,batch_size=64,num_layers=2,embedding_dims=256,steps=None):
self.include_action = include_action
in_dims = ob_dim+ac_dim if include_action else ob_dim
self.inp = tf.placeholder(tf.float32,[None,in_dims])
self.x = tf.placeholder(tf.float32,[None,in_dims]) #[B*steps,in_dim]
self.y = tf.placeholder(tf.float32,[None,in_dims])
self.x_split = tf.placeholder(tf.int32,[batch_size]) # B-lengthed vector indicating the size of each steps
self.y_split = tf.placeholder(tf.int32,[batch_size]) # B-lengthed vector indicating the size of each steps
self.l = tf.placeholder(tf.int32,[batch_size]) # [0 when x is better 1 when y is better]
self.l2_reg = tf.placeholder(tf.float32,[]) # [0 when x is better 1 when y is better]
with tf.variable_scope('weights') as param_scope:
self.fcs = []
last_dims = in_dims
for l in range(num_layers):
self.fcs.append(Linear('fc%d'%(l+1),last_dims,embedding_dims)) #(l+1) is gross, but for backward compatibility
last_dims = embedding_dims
self.fcs.append(Linear('fc%d'%(num_layers+1),last_dims,1))
self.param_scope = param_scope
# build graph
def _reward(x):
for fc in self.fcs[:-1]:
x = tf.nn.relu(fc(x))
r = tf.squeeze(self.fcs[-1](x),axis=1)
return x, r
self.fv, self.r = _reward(self.inp)
_, rs_xs = _reward(self.x)
self.v_x = tf.stack([tf.reduce_sum(rs_x) for rs_x in tf.split(rs_xs,self.x_split,axis=0)],axis=0)
_, rs_ys = _reward(self.y)
self.v_y = tf.stack([tf.reduce_sum(rs_y) for rs_y in tf.split(rs_ys,self.y_split,axis=0)],axis=0)
logits = tf.stack([self.v_x,self.v_y],axis=1) #[None,2]
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=self.l)
self.loss = tf.reduce_mean(loss,axis=0)
weight_decay = 0.
for fc in self.fcs:
weight_decay += tf.reduce_sum(fc.w**2)
self.l2_loss = self.l2_reg * weight_decay
pred = tf.cast(tf.greater(self.v_y,self.v_x),tf.int32)
self.acc = tf.reduce_mean(tf.cast(tf.equal(pred,self.l),tf.float32))
self.optim = tf.train.AdamOptimizer(1e-4)
self.update_op = self.optim.minimize(self.loss+self.l2_loss,var_list=self.parameters(train=True))
self.saver = tf.train.Saver(var_list=self.parameters(train=False),max_to_keep=0)
def parameters(self,train=False):
if train:
return tf.trainable_variables(self.param_scope.name)
else:
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,self.param_scope.name)
def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):
"""
Training will be early-terminate when validation accuracy becomes large enough..?
args:
D: list of triplets (\sigma^1,\sigma^2,\mu)
while
sigma^{1,2}: shape of [steps,in_dims]
mu : 0 or 1
"""
sess = tf.get_default_session()
idxes = np.random.permutation(len(D))
train_idxes = idxes[:int(len(D)*0.8)]
valid_idxes = idxes[int(len(D)*0.8):]
def _batch(idx_list,add_noise):
batch = []
if len(idx_list) > batch_size:
idxes = np.random.choice(idx_list,batch_size,replace=False)
else:
idxes = idx_list
for i in idxes:
batch.append(D[i])
b_x,b_y,b_l = zip(*batch)
x_split = np.array([len(x) for x in b_x])
y_split = np.array([len(y) for y in b_y])
b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)
if add_noise:
b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1
return b_x,b_y,x_split,y_split,b_l
for it in tqdm(range(iter),dynamic_ncols=True):
b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)
loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l,
self.l2_reg:l2_reg,
})
if debug:
if it % 100 == 0 or it < 10:
b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)
valid_acc = sess.run(self.acc,feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l
})
tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))
#if valid_acc >= 0.95:
# print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))
# print('early termination@%08d'%it)
# break
def train_with_dataset(self,dataset,batch_size,include_action=False,iter=10000,l2_reg=0.01,debug=False):
sess = tf.get_default_session()
for it in tqdm(range(iter),dynamic_ncols=True):
b_x,b_y,x_split,y_split,b_l = dataset.batch(batch_size=batch_size,include_action=include_action)
loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l,
self.l2_reg:l2_reg,
})
if debug:
if it % 100 == 0 or it < 10:
tqdm.write(('loss: %f (l2_loss: %f), acc: %f'%(loss,l2_loss,acc)))
def eval(self,D,batch_size=64):
sess = tf.get_default_session()
b_x,b_y,b_l = zip(*D)
b_x,b_y,b_l = np.array(b_x),np.array(b_y),np.array(b_l)
b_r_x, b_acc = [], 0.
for i in range(0,len(b_x),batch_size):
sum_r_x, acc = sess.run([self.sum_r_x,self.acc],feed_dict={
self.x:b_x[i:i+batch_size],
self.y:b_y[i:i+batch_size],
self.l:b_l[i:i+batch_size]
})
b_r_x.append(sum_r_x)
b_acc += len(sum_r_x)*acc
return np.concatenate(b_r_x,axis=0), b_acc/len(b_x)
def get_reward(self,obs,acs,batch_size=1024):
sess = tf.get_default_session()
if self.include_action:
inp = np.concatenate((obs,acs),axis=1)
else:
inp = obs
b_r = []
for i in range(0,len(obs),batch_size):
r = sess.run(self.r,feed_dict={
self.inp:inp[i:i+batch_size]
})
b_r.append(r)
return np.concatenate(b_r,axis=0)
class GTDataset(object):
def __init__(self,env):
self.env = env
self.unwrapped = env
while hasattr(self.unwrapped,'env'):
self.unwrapped = self.unwrapped.env
def gen_traj(self,agent,min_length):
max_x_pos = -99999
obs, actions, rewards = [self.env.reset()], [], []
while True:
action = agent.act(obs[-1], None, None)
ob, reward, done, _ = self.env.step(action)
if self.unwrapped.sim.data.qpos[0] > max_x_pos:
max_x_pos = self.unwrapped.sim.data.qpos[0]
obs.append(ob)
actions.append(action)
rewards.append(reward)
if done:
if len(obs) < min_length:
obs.pop()
obs.append(self.env.reset())
else:
obs.pop()
break
return (np.stack(obs,axis=0), np.concatenate(actions,axis=0), np.array(rewards)), max_x_pos
def prebuilt(self,agents,min_length):
assert len(agents)>0, 'no agent given'
trajs = []
for agent in tqdm(agents):
traj, max_x_pos = self.gen_traj(agent,min_length)
trajs.append(traj)
tqdm.write('model: %s avg reward: %f max_x_pos: %f'%(agent.model_path,np.sum(traj[2]),max_x_pos))
obs,actions,rewards = zip(*trajs)
self.trajs = (np.concatenate(obs,axis=0),np.concatenate(actions,axis=0),np.concatenate(rewards,axis=0))
print(self.trajs[0].shape,self.trajs[1].shape,self.trajs[2].shape)
def sample(self,num_samples,steps=40,include_action=False):
obs, actions, rewards = self.trajs
D = []
for _ in range(num_samples):
x_ptr = np.random.randint(len(obs)-steps)
y_ptr = np.random.randint(len(obs)-steps)
if include_action:
D.append((np.concatenate((obs[x_ptr:x_ptr+steps],actions[x_ptr:x_ptr+steps]),axis=1),
np.concatenate((obs[y_ptr:y_ptr+steps],actions[y_ptr:y_ptr+steps]),axis=1),
0 if np.sum(rewards[x_ptr:x_ptr+steps]) > np.sum(rewards[y_ptr:y_ptr+steps]) else 1)
)
else:
D.append((obs[x_ptr:x_ptr+steps],
obs[y_ptr:y_ptr+steps],
0 if np.sum(rewards[x_ptr:x_ptr+steps]) > np.sum(rewards[y_ptr:y_ptr+steps]) else 1)
)
return D
class GTTrajLevelDataset(GTDataset):
def __init__(self,env):
super().__init__(env)
def prebuilt(self,agents,min_length):
assert len(agents)>0, 'no agent is given'
trajs = []
for agent_idx,agent in enumerate(tqdm(agents)):
(obs,actions,rewards),_ = self.gen_traj(agent,min_length)
trajs.append((agent_idx,obs,actions,rewards))
self.trajs = trajs
_idxes = np.argsort([np.sum(rewards) for _,_,_,rewards in self.trajs]) # rank 0 is the most bad demo.
self.trajs_rank = np.empty_like(_idxes)
self.trajs_rank[_idxes] = np.arange(len(_idxes))
def sample(self,num_samples,steps=40,include_action=False):
D = []
GT_preference = []
for _ in tqdm(range(num_samples)):
x_idx,y_idx = np.random.choice(len(self.trajs),2,replace=False)
x_traj = self.trajs[x_idx]
y_traj = self.trajs[y_idx]
x_ptr = np.random.randint(len(x_traj[1])-steps)
y_ptr = np.random.randint(len(y_traj[1])-steps)
if include_action:
D.append((np.concatenate((x_traj[1][x_ptr:x_ptr+steps],x_traj[2][x_ptr:x_ptr+steps]),axis=1),
np.concatenate((y_traj[1][y_ptr:y_ptr+steps],y_traj[2][y_ptr:y_ptr+steps]),axis=1),
0 if self.trajs_rank[x_idx] > self.trajs_rank[y_idx] else 1)
)
else:
D.append((x_traj[1][x_ptr:x_ptr+steps],
y_traj[1][y_ptr:y_ptr+steps],
0 if self.trajs_rank[x_idx] > self.trajs_rank[y_idx] else 1)
)
GT_preference.append(0 if np.sum(x_traj[3][x_ptr:x_ptr+steps]) > np.sum(y_traj[3][y_ptr:y_ptr+steps]) else 1)
print('------------------')
_,_,preference = zip(*D)
preference = np.array(preference).astype(np.bool)
GT_preference = np.array(GT_preference).astype(np.bool)
print('Quality of time-indexed preference (0-1):', np.count_nonzero(preference == GT_preference) / len(preference))
print('------------------')
return D
class GTTrajLevelNoStepsDataset(GTTrajLevelDataset):
def __init__(self,env,max_steps):
super().__init__(env)
self.max_steps = max_steps
def prebuilt(self,agents,min_length):
assert len(agents)>0, 'no agent is given'
trajs = []
for agent_idx,agent in enumerate(tqdm(agents)):
agent_trajs = []
while np.sum([len(obs) for obs,_,_ in agent_trajs]) < min_length:
(obs,actions,rewards),_ = self.gen_traj(agent,-1)
agent_trajs.append((obs,actions,rewards))
trajs.append(agent_trajs)
agent_rewards = [np.mean([np.sum(rewards) for _,_,rewards in agent_trajs]) for agent_trajs in trajs]
self.trajs = trajs
_idxes = np.argsort(agent_rewards) # rank 0 is the most bad demo.
self.trajs_rank = np.empty_like(_idxes)
self.trajs_rank[_idxes] = np.arange(len(_idxes))
def sample(self,num_samples,steps=None,include_action=False):
assert steps == None
D = []
GT_preference = []
for _ in tqdm(range(num_samples)):
x_idx,y_idx = np.random.choice(len(self.trajs),2,replace=False)
x_traj = self.trajs[x_idx][np.random.choice(len(self.trajs[x_idx]))]
y_traj = self.trajs[y_idx][np.random.choice(len(self.trajs[y_idx]))]
if len(x_traj[0]) > self.max_steps:
ptr = np.random.randint(len(x_traj[0])-self.max_steps)
x_slice = slice(ptr,ptr+self.max_steps)
else:
x_slice = slice(len(x_traj[1]))
if len(y_traj[0]) > self.max_steps:
ptr = np.random.randint(len(y_traj[0])-self.max_steps)
y_slice = slice(ptr,ptr+self.max_steps)
else:
y_slice = slice(len(y_traj[0]))
if include_action:
D.append((np.concatenate((x_traj[0][x_slice],x_traj[1][x_slice]),axis=1),
np.concatenate((y_traj[0][y_slice],y_traj[1][y_slice]),axis=1),
0 if self.trajs_rank[x_idx] > self.trajs_rank[y_idx] else 1)
)
else:
D.append((x_traj[0][x_slice],
y_traj[0][y_slice],
0 if self.trajs_rank[x_idx] > self.trajs_rank[y_idx] else 1)
)
GT_preference.append(0 if np.sum(x_traj[2][x_slice]) > np.sum(y_traj[2][y_slice]) else 1)
print('------------------')
_,_,preference = zip(*D)
preference = np.array(preference).astype(np.bool)
GT_preference = np.array(GT_preference).astype(np.bool)
print('Quality of time-indexed preference (0-1):', np.count_nonzero(preference == GT_preference) / len(preference))
print('------------------')
return D
class GTTrajLevelNoSteps_Noise_Dataset(GTTrajLevelNoStepsDataset):
def __init__(self,env,max_steps,ranking_noise=0):
super().__init__(env,max_steps)
self.ranking_noise = ranking_noise
def prebuilt(self,agents,min_length):
super().prebuilt(agents,min_length)
original_trajs_rank = self.trajs_rank.copy()
for _ in range(self.ranking_noise):
x = np.random.randint(len(self.trajs)-1)
x_ptr = np.where(self.trajs_rank==x)
y_ptr = np.where(self.trajs_rank==x+1)
self.trajs_rank[x_ptr], self.trajs_rank[y_ptr] = x+1, x
from itertools import combinations
order_correctness = [
(self.trajs_rank[x] < self.trajs_rank[y]) == (original_trajs_rank[x] < original_trajs_rank[y])
for x,y in combinations(range(len(self.trajs)),2)]
print('Total Order Correctness: %f'%(np.count_nonzero(order_correctness)/len(order_correctness)))
class GTTrajLevelNoSteps_N_Mix_Dataset(GTTrajLevelNoStepsDataset):
def __init__(self,env,N,max_steps):
super().__init__(env,max_steps)
self.N = N
self.max_steps = max_steps
def sample(self,*kargs,**kwargs):
return None
def batch(self,batch_size,include_action):
#self.trajs = trajs
#self.trajs_rank = np.argsort([np.sum(rewards) for _,_,_,rewards in self.trajs]) # rank 0 is the most bad demo.
xs = []
ys = []
for _ in range(batch_size):
idxes = np.random.choice(len(self.trajs),2*self.N)
ranks = self.trajs_rank[idxes]
bad_idxes = [idxes[i] for i in np.argsort(ranks)[:self.N]]
good_idxes = [idxes[i] for i in np.argsort(ranks)[self.N:]]
def _pick_and_merge(idxes):
inp = []
for idx in idxes:
obs, acs, rewards = self.trajs[idx][np.random.choice(len(self.trajs[idx]))]
if len(obs) > self.max_steps:
ptr = np.random.randint(len(obs)-self.max_steps)
slc = slice(ptr,ptr+self.max_steps)
else:
slc = slice(len(obs))
if include_action:
inp.append(np.concatenate([obs[slc],acs[slc]],axis=1))
else:
inp.append(obs[slc])
return np.concatenate(inp,axis=0)
x = _pick_and_merge(bad_idxes)
y = _pick_and_merge(good_idxes)
xs.append(x)
ys.append(y)
x_split = np.array([len(x) for x in xs])
y_split = np.array([len(y) for y in ys])
xs = np.concatenate(xs,axis=0)
ys = np.concatenate(ys,axis=0)
return xs, ys, x_split, y_split, np.ones((batch_size,)).astype(np.int32)
class LearnerDataset(GTTrajLevelDataset):
def __init__(self,env,min_margin):
super().__init__(env)
self.min_margin = min_margin
def sample(self,num_samples,steps=40,include_action=False):
D = []
GT_preference = []
for _ in tqdm(range(num_samples)):
x_idx,y_idx = np.random.choice(len(self.trajs),2,replace=False)
while abs(self.trajs[x_idx][0] - self.trajs[y_idx][0]) < self.min_margin:
x_idx,y_idx = np.random.choice(len(self.trajs),2,replace=False)
x_traj = self.trajs[x_idx]
y_traj = self.trajs[y_idx]
x_ptr = np.random.randint(len(x_traj[1])-steps)
y_ptr = np.random.randint(len(y_traj[1])-steps)
if include_action:
D.append((np.concatenate((x_traj[1][x_ptr:x_ptr+steps],x_traj[2][x_ptr:x_ptr+steps]),axis=1),
np.concatenate((y_traj[1][y_ptr:y_ptr+steps],y_traj[2][y_ptr:y_ptr+steps]),axis=1),
0 if x_traj[0] > y_traj[0] else 1)
)
else:
D.append((x_traj[1][x_ptr:x_ptr+steps],
y_traj[1][y_ptr:y_ptr+steps],
0 if x_traj[0] > y_traj[0] else 1)
)
GT_preference.append(0 if np.sum(x_traj[3][x_ptr:x_ptr+steps]) > np.sum(y_traj[3][y_ptr:y_ptr+steps]) else 1)
print('------------------')
_,_,preference = zip(*D)
preference = np.array(preference).astype(np.bool)
GT_preference = np.array(GT_preference).astype(np.bool)
print('Quality of time-indexed preference (0-1):', np.count_nonzero(preference == GT_preference) / len(preference))
print('------------------')
return D
def train(args):
logdir = Path(args.log_dir)
if logdir.exists() :
c = input('log dir is already exist. continue to train a preference model? [Y/etc]? ')
if c in ['YES','yes','Y']:
import shutil
shutil.rmtree(str(logdir))
else:
print('good bye')
return
logdir.mkdir(parents=True)
with open(str(logdir/'args.txt'),'w') as f:
f.write( str(args) )
logdir = str(logdir)
rospy.init_node('turtlebot2_maze',anonymous=True,log_level=rospy.FATAL)
env = gym.make(args.env_id)
train_agents = [RandomAgent(env.action_space)] if args.random_agent else []
models = sorted([p for p in Path(args.learners_path).glob('?????') if int(p.name) <= args.max_chkpt])
for path in models:
agent = PPO2Agent(env,args.env_type,str(path),stochastic=args.stochastic)
train_agents.append(agent)
if args.preference_type == 'gt':
dataset = GTDataset(env)
elif args.preference_type == 'gt_traj':
dataset = GTTrajLevelDataset(env)
elif args.preference_type == 'gt_traj_no_steps':
dataset = GTTrajLevelNoStepsDataset(env,args.max_steps)
elif args.preference_type == 'gt_traj_no_steps_noise':
dataset = GTTrajLevelNoSteps_Noise_Dataset(env,args.max_steps,args.traj_noise)
elif args.preference_type == 'gt_traj_no_steps_n_mix':
dataset = GTTrajLevelNoSteps_N_Mix_Dataset(env,args.N,args.max_steps)
elif args.preference_type == 'time':
dataset = LearnerDataset(env,args.min_margin)
else:
assert False, 'specify prefernce type'
dataset.prebuilt(train_agents,args.min_length)
models = []
for i in range(args.num_models):
with tf.variable_scope('model_%d'%i):
models.append(Model(args.include_action,env.observation_space.shape[0],env.action_space.shape[0],steps=args.steps,num_layers=args.num_layers,embedding_dims=args.embedding_dims))
### Initialize Parameters
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Training configuration
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession()
sess.run(init_op)
for i,model in enumerate(models):
D = dataset.sample(args.D,args.steps,include_action=args.include_action)
if D is None:
model.train_with_dataset(dataset,64,include_action=args.include_action,debug=True)
else:
model.train(D,l2_reg=args.l2_reg,noise_level=args.noise,debug=True)
model.saver.save(sess,logdir+'/model_%d.ckpt'%(i),write_meta_graph=False)
def eval(args):
logdir = str(Path(args.logbase_path) / args.env_id)
env = gym.make(args.env_id)
valid_agents = []
models = sorted(Path(args.learners_path).glob('?????'))
for path in models:
if path.name > args.max_chkpt:
continue
agent = PPO2Agent(env,args.env_type,str(path),stochastic=args.stochastic)
valid_agents.append(agent)
test_agents = []
for i,path in enumerate(models):
if i % 10 == 0:
agent = PPO2Agent(env,args.env_type,str(path),stochastic=args.stochastic)
test_agents.append(agent)
gt_dataset= GTDataset(env)
gt_dataset.prebuilt(valid_agents,-1)
gt_dataset_test = GTDataset(env)
gt_dataset_test.prebuilt(test_agents,-1)
models = []
for i in range(args.num_models):
with tf.variable_scope('model_%d'%i):
models.append(Model(args.include_action,env.observation_space.shape[0],env.action_space.shape[0],steps=args.steps))
### Initialize Parameters
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Training configuration
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession()
sess.run(init_op)
for i,model in enumerate(models):
model.saver.restore(sess,logdir+'/model_%d.ckpt'%(i))
print('model %d'%i)
obs, acs, r = gt_dataset.trajs
r_hat = model.get_reward(obs, acs)
obs, acs, r_test = gt_dataset_test.trajs
r_hat_test = model.get_reward(obs, acs)
fig,axes = plt.subplots(1,2)
axes[0].plot(r,r_hat,'o')
axes[1].plot(r_test,r_hat_test,'o')
fig.savefig('model_%d.png'%i)
imgcat(fig)
plt.close(fig)
np.savez('model_%d.npz'%i,r=r,r_hat=r_hat,r_test=r_test,r_hat_test=r_hat_test)
if __name__ == "__main__":
# Required Args (target envs & learners)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--env_id', default='', help='Select the environment to run')
parser.add_argument('--env_type', default='', help='gazebo')
parser.add_argument('--learners_path', default='', help='path of learning agents')
parser.add_argument('--max_chkpt', default=240, type=int, help='decide upto what learner stage you want to give')
parser.add_argument('--steps', default=None, type=int, help='length of snippets')
parser.add_argument('--max_steps', default=None, type=int, help='length of max snippets (gt_traj_no_steps only)')
parser.add_argument('--traj_noise', default=None, type=int, help='number of adjacent swaps (gt_traj_no_steps_noise only)')
parser.add_argument('--min_length', default=1000,type=int, help='minimum length of trajectory generated by each agent')
parser.add_argument('--num_layers', default=2, type=int, help='number layers of the reward network')
parser.add_argument('--embedding_dims', default=256, type=int, help='embedding dims')
parser.add_argument('--num_models', default=3, type=int, help='number of models to ensemble')
parser.add_argument('--l2_reg', default=0.01, type=float, help='l2 regularization size')
parser.add_argument('--noise', default=0.1, type=float, help='noise level to add on training label')
parser.add_argument('--D', default=1000, type=int, help='|D| in the preference paper')
parser.add_argument('--N', default=10, type=int, help='number of trajactory mix (gt_traj_no_steps_n_mix only)')
parser.add_argument('--log_dir', required=True)
parser.add_argument('--preference_type', help='gt or gt_traj or time or gt_traj_no_steps, gt_traj_no_steps_n_mix; if gt then preference will be given as a GT reward, otherwise, it is given as a time index')
parser.add_argument('--min_margin', default=1, type=int, help='when prefernce type is "time", the minimum margin that we can assure there exist a margin')
parser.add_argument('--include_action', action='store_true', help='whether to include action for the model or not')
parser.add_argument('--stochastic', action='store_true', help='whether want to use stochastic agent or not')
parser.add_argument('--random_agent', action='store_true', help='whether to use default random agent')
parser.add_argument('--eval', action='store_true', help='path to log base (env_id will be concatenated at the end)')
# Args for PPO
parser.add_argument('--rl_runs', default=1, type=int)
parser.add_argument('--ppo_log_path', default='ppo2')
parser.add_argument('--custom_reward', required=True, help='preference or preference_normalized')
parser.add_argument('--ctrl_coeff', default=0.0, type=float)
parser.add_argument('--alive_bonus', default=0.0, type=float)
parser.add_argument('--gamma', default=0.99, type=float)
args = parser.parse_args()
if not args.eval :
# Train a Preference Model
train(args)
# Train an agent
import os, subprocess
openai_logdir = Path(os.path.abspath(os.path.join(args.log_dir,args.ppo_log_path)))
if openai_logdir.exists():
print('openai_logdir is already exist.')
exit()
template = 'python -m baselines.run --alg=ppo2 --env={env} --num_timesteps=1e6 --save_interval=10 --custom_reward {custom_reward} --custom_reward_kwargs="{kwargs}" --gamma {gamma}'
kwargs = {
"num_models":args.num_models,
"include_action":args.include_action,
"model_dir":os.path.abspath(args.log_dir),
"num_layers":args.num_layers,
"embedding_dims":args.embedding_dims,
"ctrl_coeff":args.ctrl_coeff,
"alive_bonus":args.alive_bonus
}
# Write down some log
openai_logdir.mkdir(parents=True)
with open(str(openai_logdir/'args.txt'),'w') as f:
f.write( args.custom_reward + '/')
f.write( str(kwargs) )
cmd = template.format(
env=args.env_id,
custom_reward=args.custom_reward,
gamma=args.gamma,
kwargs=str(kwargs))
procs = []
for i in range(args.rl_runs):
env = os.environ.copy()
env["OPENAI_LOGDIR"] = str(openai_logdir/('run_%d'%i))
if i == 0:
env["OPENAI_LOG_FORMAT"] = 'stdout,log,csv,tensorboard'
p = subprocess.Popen(cmd, cwd='./learner/baselines', stdout=subprocess.PIPE, env=env, shell=True)
else:
env["OPENAI_LOG_FORMAT"] = 'log,csv,tensorboard'
p = subprocess.Popen(cmd, cwd='./learner/baselines', env=env, shell=True)
procs.append(p)
for line in procs[0].stdout:
print(line.decode(),end='')
for p in procs[1:]:
p.wait()
else:
# eval(args)
import os
from performance_checker import gen_traj_dist as get_perf
#from performance_checker import gen_traj_return as get_perf
env = gym.make(args.env_id)
agents_dir = Path(os.path.abspath(os.path.join(args.log_dir,args.ppo_log_path)))
trained_steps = sorted(list(set([path.name for path in agents_dir.glob('run_*/checkpoints/?????')])))
print(trained_steps)
print(str(agents_dir))
for step in trained_steps[::-1]:
perfs = []
for i in range(args.rl_runs):
path = agents_dir/('run_%d'%i)/'checkpoints'/step
if path.exists() == False:
continue
agent = PPO2Agent(env,args.env_type,str(path),stochastic=args.stochastic)
perfs += [
get_perf(env,agent) for _ in range(5)
]
print('[%s-%d] %f %f'%(step,i,np.mean(perfs[-5:]),np.std(perfs[-5:])))
print('[%s] %f %f %f %f'%(step,np.mean(perfs),np.std(perfs),np.max(perfs),np.min(perfs)))
#break
| [
"python -m baselines.run --alg=ppo2 --env={env} --num_timesteps=1e6 --save_interval=10 --custom_reward {custom_reward} --custom_reward_kwargs=\"{kwargs}\" --gamma {gamma}"
] |
2024-01-10 | nicholscrawford/NaturalLanguagePlanningGoals | ConfigurationDiffuser~sample_pl.py | from omegaconf import OmegaConf
import pytorch_lightning as pl
import torch
import argparse
import os
import time
from torch.utils.data import DataLoader
from ConfigurationDiffuser.configuration_diffuser_pl import SimpleTransformerDiffuser
from Data.basic_writerdatasets_st import DiffusionDataset
from ConfigurationDiffuser.guidance_func import guidance_functions
if __name__ == "__main__":
torch.set_default_dtype(torch.double)
parser = argparse.ArgumentParser(description="Run a simple model")
parser.add_argument("--config_file", help='config yaml file',
default='ConfigurationDiffuser/Config/sampling_example.yaml',
type=str)
args = parser.parse_args()
assert os.path.exists(args.config_file), "Cannot find config yaml file at {}".format(args.config_file)
cfg = OmegaConf.load(args.config_file)
if not os.path.exists(cfg.poses_dir):
os.makedirs(cfg.poses_dir)
if not os.path.exists(cfg.pointclouds_dir):
os.makedirs(cfg.pointclouds_dir)
if len(os.listdir(os.path.join(cfg.pointclouds_dir, "1"))) < 10:
print("Must have the show_gen_poses script running! It's needed to get the point clouds to pass into the model.")
exit(0)
# Prompt to confirm file deletion
if len([file for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]) > 0:
confirmation = input(f"Delete all initial files in {cfg.pointclouds_dir}? (y/n): ")
if confirmation.lower() == 'y':
# Remove all files in the directory
_ = [os.remove(os.path.join(os.path.join(cfg.pointclouds_dir,"1"), file)) for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]
print("All initial files have been deleted.")
else:
exit(0)
test_dataset = DiffusionDataset(cfg.device, ds_roots=[cfg.pointclouds_dir], clear_cache=True)
data_cfg = cfg.dataset
test_dataloader = DataLoader(test_dataset, batch_size=data_cfg.batch_size, shuffle=False,
pin_memory=data_cfg.pin_memory, num_workers=data_cfg.num_workers)
guidance_function = guidance_functions(cfg.sampling, device=cfg.device)
# Initialize the model
os.environ["DATETIME"] = time.strftime("%Y_%m_%d-%H:%M:%S")
model = SimpleTransformerDiffuser.load_from_checkpoint(cfg.model_dir)
model.poses_dir = cfg.poses_dir
model.sampling_cfg = cfg.sampling
if cfg.sampling.guidance_sampling:
model.guidance_function = guidance_function.clip_guidance_function_logits
# model.guidance_function = guidance_function.clip_guidance_function
# model.guidance_function = guidance_function.away_from_each_other_guidance_function
# Initialize the PyTorch Lightning trainer
trainer = pl.Trainer()
loss = trainer.test(model, test_dataloader)
# print(model.guidance_alignment)
| [] |
2024-01-10 | nicholscrawford/NaturalLanguagePlanningGoals | ConfigurationDiffuser~grid_search.py | from omegaconf import OmegaConf
import pytorch_lightning as pl
import torch
import argparse
import os
import time
import itertools
from torch.utils.data import DataLoader
from ConfigurationDiffuser.configuration_diffuser_pl import SimpleTransformerDiffuser
from Data.basic_writerdatasets_st import DiffusionDataset
from ConfigurationDiffuser.guidance_func import guidance_functions
if __name__ == "__main__":
torch.set_default_dtype(torch.double)
parser = argparse.ArgumentParser(description="Grid search over sampling hyperparameters")
parser.add_argument("--config_file", help='config yaml file',
default='ConfigurationDiffuser/Config/sampling_example.yaml',
type=str)
args = parser.parse_args()
assert os.path.exists(args.config_file), "Cannot find config yaml file at {}".format(args.config_file)
cfg = OmegaConf.load(args.config_file)
if not os.path.exists(cfg.poses_dir):
os.makedirs(cfg.poses_dir)
if not os.path.exists(cfg.pointclouds_dir):
os.makedirs(cfg.pointclouds_dir)
if len(os.listdir(os.path.join(cfg.pointclouds_dir, "1"))) < 10:
print("Must have the show_gen_poses script running! It's needed to get the point clouds to pass into the model.")
exit(0)
# Prompt to confirm file deletion
if len([file for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]) > 0:
confirmation = input(f"Delete all initial files in {cfg.pointclouds_dir}? (y/n): ")
if confirmation.lower() == 'y':
# Remove all files in the directory
_ = [os.remove(os.path.join(os.path.join(cfg.pointclouds_dir,"1"), file)) for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]
print("All initial files have been deleted.")
else:
exit(0)
test_dataset = DiffusionDataset(cfg.device, ds_roots=[cfg.pointclouds_dir], clear_cache=True)
data_cfg = cfg.dataset
test_dataloader = DataLoader(test_dataset, batch_size=data_cfg.batch_size, shuffle=False,
pin_memory=data_cfg.pin_memory, num_workers=data_cfg.num_workers)
guidance_function = guidance_functions(cfg.sampling, device=cfg.device)
# Initialize the model
os.environ["DATETIME"] = time.strftime("%Y_%m_%d-%H:%M:%S")
model = SimpleTransformerDiffuser.load_from_checkpoint(cfg.model_dir)
model.poses_dir = cfg.poses_dir
model.sampling_cfg = cfg.sampling
if cfg.sampling.guidance_sampling:
model.guidance_function = guidance_function.clip_guidance_function
# Initialize the PyTorch Lightning trainer
best_score = float('inf')
best_hyperparameters = None
ddim_steps_range = [20, 30]
guidance_strength_factor_range = [10, 50, 200] #200
backwards_steps_m_range = [5, 15, 25] #25
backward_guidance_lr_range =[0.01, 0.05, 0.1] #0.01
per_step_k_range = [3, 4, 6] #6
combinations = itertools.product(
ddim_steps_range,
guidance_strength_factor_range,
backwards_steps_m_range,
backward_guidance_lr_range,
per_step_k_range
)
trainer = pl.Trainer()
for hyperparameters in combinations:
ddim_steps, guidance_strength_factor, backwards_steps_m, backward_guidance_lr, per_step_k = hyperparameters
model.sampling_cfg.ddim_steps = ddim_steps
model.sampling_cfg.guidance_strength_factor = guidance_strength_factor
model.sampling_cfg.backwards_steps_m = backwards_steps_m
model.sampling_cfg.backward_guidance_lr = backward_guidance_lr
model.sampling_cfg.per_step_k = per_step_k
trainer.test(model, test_dataloader)
score = model.guidance_alignment.mean()
if score < best_score:
best_score = score
best_hyperparameters = hyperparameters
print("Hyperparameters:", hyperparameters)
print("Score:", score.item())
print("Best Hyperparameters:", best_hyperparameters)
print("Best Score:", best_score)
print("Best Possible Score: ~70.18") | [] |
2024-01-10 | nicholscrawford/NaturalLanguagePlanningGoals | ConfigurationDiffuser~sample_pcf.py | from omegaconf import OmegaConf
import pytorch_lightning as pl
import torch
import argparse
import os
import time
from torch.utils.data import DataLoader
from ConfigurationDiffuser.configuration_diffuser_pcf import SimpleTransformerDiffuser
from Data.basic_writerdatasets_st import DiffusionDataset
from ConfigurationDiffuser.guidance_func import guidance_functions
if __name__ == "__main__":
torch.set_default_dtype(torch.double)
parser = argparse.ArgumentParser(description="Run a simple model")
parser.add_argument("--config_file", help='config yaml file',
default='ConfigurationDiffuser/Config/sampling_example.yaml',
type=str)
args = parser.parse_args()
assert os.path.exists(args.config_file), "Cannot find config yaml file at {}".format(args.config_file)
cfg = OmegaConf.load(args.config_file)
if not os.path.exists(cfg.poses_dir):
os.makedirs(cfg.poses_dir)
if not os.path.exists(cfg.pointclouds_dir):
os.makedirs(cfg.pointclouds_dir)
if len(os.listdir(os.path.join(cfg.pointclouds_dir, "1"))) < 10:
print("Must have the show_gen_poses script running! It's needed to get the point clouds to pass into the model.")
exit(0)
# Prompt to confirm file deletion
if len([file for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]) > 0:
confirmation = input(f"Delete all initial files in {cfg.pointclouds_dir}? (y/n): ")
if confirmation.lower() == 'y':
# Remove all files in the directory
_ = [os.remove(os.path.join(os.path.join(cfg.pointclouds_dir,"1"), file)) for file in os.listdir(os.path.join(cfg.pointclouds_dir,"1")) if "initial" in file]
print("All initial files have been deleted.")
else:
exit(0)
test_dataset = DiffusionDataset(cfg.device, ds_roots=[cfg.pointclouds_dir], clear_cache=True)
data_cfg = cfg.dataset
test_dataloader = DataLoader(test_dataset, batch_size=data_cfg.batch_size, shuffle=False,
pin_memory=data_cfg.pin_memory, num_workers=data_cfg.num_workers)
guidance_function = guidance_functions(cfg.sampling, device=cfg.device)
# Initialize the model
os.environ["DATETIME"] = time.strftime("%Y_%m_%d-%H:%M:%S")
model = SimpleTransformerDiffuser.load_from_checkpoint(cfg.model_dir)
model.poses_dir = cfg.poses_dir
model.sampling_cfg = cfg.sampling
if cfg.sampling.guidance_sampling:
model.guidance_function = guidance_function.clip_guidance_function_logits
# model.guidance_function = guidance_function.clip_guidance_function
# model.guidance_function = guidance_function.away_from_each_other_guidance_function
# Initialize the PyTorch Lightning trainer
trainer = pl.Trainer()
loss = trainer.test(model, test_dataloader)
# print(model.guidance_alignment)
| [] |
2024-01-10 | llamaking136/Speaking-Chatgpt | newfile.py | import os
import requests
from chatgpt_wrapper import ChatGPT
import base64
import json
bot = ChatGPT()
response = bot.ask(input("> "))
print(response)
#If the code doesn't work although you did everything right - My apologise, you will need to
#insert api key from OpenAi, if you dont know how to do it - Ask chat gpt
token = "Input token of text-to-speech from google cloud "
url = "https://us-central1-texttospeech.googleapis.com/v1beta1/text:synthesize"
method = "POST"
headers = {
"X-goog-api-key": f"{token}",
"Content-Type": "application/json"
}
data = {
"audioConfig": {
"audioEncoding": "LINEAR16",
"effectsProfileId": [
"small-bluetooth-speaker-class-device"
],
"pitch": 0,
"speakingRate": 1
},
"input": {
"text": response
},
"voice": {
"languageCode": "en-US",
"name": "en-US-Neural2-F"
}
}
response = requests.post(url, headers=headers, json=data)
if response.status_code >= 300:
print("error requesting to the google api")
print(response.content)
content = json.loads(response.content)
audio = base64.b64decode(content["audioContent"])
with open("output.mp3", "wb") as f:
f.write(audio)
print("wrote to file `output.mp3`")
# print(response.content)
| [] |
2024-01-10 | alexsaezm/dotfiles | scripts~.local~scripts~askai | #!/usr/bin/env python3
# Accepts an input from a pipe and a question. Example:
# ls | askai "How many files do I have"
# If you don't provide a question, or you don't pass an input, it will open a chat.
#
# It reads the token from the ~/.config/askai.conf file
# The format of the aigrep.conf file is as follows:
# [openai]
# api_key = <YOUR KEY>
import argparse
import configparser
import openai
import os
import sys
def print_help():
print("Usage: askai [options]")
print("Options:")
print(" -h, --help Show this help message and exit")
print(" -q, --question Ask a question and exits")
def ask_openai(text):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=text,
max_tokens=500,
temperature=0.5,
)
return response.choices[0].text.strip()
def open_prompt():
while True:
user_input = input("> ")
if user_input.lower() == 'exit':
break
out = ask_openai(user_input)
print(out)
def get_token():
# Read the token from the configuration file
config = configparser.ConfigParser()
config_file = os.path.expanduser('~/.config/askai.conf')
config.read(config_file)
token = config.get('openai', 'api_key', fallback='')
# Check if the token exists
if not token:
print("API key not found in the configuration file.")
sys.exit(1)
return token
def main():
parser = argparse.ArgumentParser(description='Ask an AI')
parser.add_argument('-q', '--question', type=str, help='Ask a question and exits')
args = parser.parse_args()
full_input = ""
openai.api_key = get_token()
if args.question and sys.stdin.isatty():
full_input += f"Question: {args.question}\n"
print(ask_openai(full_input))
sys.exit(0)
if args.question and not sys.stdin.isatty():
full_input += f"Question: {args.question}\n"
full_input += f"Input: {sys.stdin.read()}\n"
print(ask_openai(full_input))
sys.exit(0)
if args.question == None and sys.stdin.isatty():
open_prompt()
sys.exit(0)
if args.question == None and not sys.stdin.isatty():
full_input += f"Input: {sys.stdin.read()}\n"
print(ask_openai(full_input))
open_prompt()
sys.exit(0)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | MrGongqi/ChatPaper | chat_paper.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
from get_paper_from_pdf import Paper
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', language='cn', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
self.language = language # 读者选择的语言
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ', '').replace(':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str+'.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name+ '-' +date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/'+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/'+path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/"+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/' + path
return image_url
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
# intro
text += list(paper.section_text_dict.values())[0]
max_token = 2500 * 4
text = text[:max_token]
chat_summary_text = self.chat_summary(text=text)
htmls.append('## Paper:' + str(paper_index+1))
htmls.append('\n\n\n')
htmls.append(chat_summary_text)
# TODO 往md文档中插入论文里的像素最大的一张图片,这个方案可以弄的更加智能一些:
first_image, ext = paper.get_image_path()
if first_image is None or self.gitee_key == '':
pass
else:
image_title = self.validateTitle(paper.title)
image_url = self.upload_gitee(image_path=first_image, image_name=image_title, ext=ext)
htmls.append("\n\n")
htmls.append("")
htmls.append("\n\n")
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
# TODO 把这个变成tenacity的自动判别!
max_token = 2500 * 4
text = summary_text + "\n\n<Methods>:\n\n" + method_text
text = text[:max_token]
chat_method_text = self.chat_method(text=text)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n"*4)
# 第三步总结全文,并打分:
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
max_token = 2500 * 4
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
text = text[:max_token]
chat_conclusion_text = self.chat_conclusion(text=text)
htmls.append(chat_conclusion_text)
htmls.append("\n"*4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)[:25]+"."+self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=[
{"role": "system", "content": "你是一个["+self.key_word+"]领域的审稿人,你需要严格评审这篇文章"}, # chatgpt 角色
{"role": "assistant", "content": "这是一篇英文文献的<summary>和<conclusion>部分内容,其中<summary>你已经总结好了,但是<conclusion>部分,我需要你帮忙归纳下面问题:"+text}, # 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. 做出如下总结:
- (1):这篇工作的意义如何?
- (2):从创新点、性能、工作量这三个维度,总结这篇文章的优点和缺点。
.......
按照后面的格式输出:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):创新点: xxx; 性能: xxx; 工作量: xxx;\n
务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要和之前的<summary>内容重复,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行,.......代表按照实际需求填写,如果没有可以不用写.
"""},
]
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "你是一个["+self.key_word+"]领域的科研人员,善于使用精炼的语句总结论文"}, # chatgpt 角色
{"role": "assistant", "content": "这是一篇英文文献的<summary>和<Method>部分内容,其中<summary>你已经总结好了,但是<Methods>部分,我需要你帮忙阅读并归纳下面问题:"+text}, # 背景知识
{"role": "user", "content": """
7. 详细描述这篇文章的方法思路。比如说它的步骤是:
- (1):...
- (2):...
- (3):...
- .......
按照后面的格式输出:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
.......\n\n
务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要和之前的<summary>内容重复,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行,.......代表按照实际需求填写,如果没有可以不用写.
"""},
]
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "你是一个["+self.key_word+"]领域的科研人员,善于使用精炼的语句总结论文"}, # chatgpt 角色
{"role": "assistant", "content": "这是一篇英文文献的标题,作者,链接,Abstract和Introduction部分内容,我需要你帮忙阅读并归纳下面问题:"+text}, # 背景知识
{"role": "user", "content": """
1. 标记出这篇文献的标题(加上中文翻译)
2. 列举所有的作者姓名 (使用英文)
3. 标记第一作者的单位(只输出中文翻译)
4. 标记出这篇文章的关键词(使用英文)
5. 论文链接,Github代码链接(如果有的话,没有的话请填写Github:None)
6. 按照下面四个点进行总结:
- (1):这篇文章的研究背景是什么?
- (2):过去的方法有哪些?它们存在什么问题?本文和过去的研究有哪些本质的区别?Is the approach well motivated?
- (3):本文提出的研究方法是什么?
- (4):本文方法在什么任务上,取得了什么性能?性能能否支持他们的目标?
按照后面的格式输出:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要有太多重复的信息,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行.
"""},
]
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=args.sort,
args=args
)
reader1.show_info()
paper_list = [Paper(path=args.pdf_path)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=args.sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: ChatGPT robot', help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning', help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot', help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
parser.add_argument("--sort", default=arxiv.SortCriterion.Relevance, help="another is arxiv.SortCriterion.LastUpdatedDate")
parser.add_argument("--save_image", default=False, help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
args = parser.parse_args()
import time
start_time = time.time()
main(args=args)
print("summary time:", time.time() - start_time)
| [
"]领域的科研人员,善于使用精炼的语句总结论文",
"这是一篇英文文献的<summary>和<conclusion>部分内容,其中<summary>你已经总结好了,但是<conclusion>部分,我需要你帮忙归纳下面问题:PLACEHOLDER",
"]领域的审稿人,你需要严格评审这篇文章",
" \n 8. 做出如下总结:\n - (1):这篇工作的意义如何?\n - (2):从创新点、性能、工作量这三个维度,总结这篇文章的优点和缺点。 \n .......\n 按照后面的格式输出: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):创新点: xxx; 性能: xxx; 工作量: xxx;\n \n \n 务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要和之前的<summary>内容重复,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行,.......代表按照实际需求填写,如果没有可以不用写. \n ",
"这是一篇英文文献的标题,作者,链接,Abstract和Introduction部分内容,我需要你帮忙阅读并归纳下面问题:PLACEHOLDER",
"这是一篇英文文献的<summary>和<Method>部分内容,其中<summary>你已经总结好了,但是<Methods>部分,我需要你帮忙阅读并归纳下面问题:PLACEHOLDER",
" \n 1. 标记出这篇文献的标题(加上中文翻译)\n 2. 列举所有的作者姓名 (使用英文)\n 3. 标记第一作者的单位(只输出中文翻译) \n 4. 标记出这篇文章的关键词(使用英文)\n 5. 论文链接,Github代码链接(如果有的话,没有的话请填写Github:None)\n 6. 按照下面四个点进行总结:\n - (1):这篇文章的研究背景是什么?\n - (2):过去的方法有哪些?它们存在什么问题?本文和过去的研究有哪些本质的区别?Is the approach well motivated?\n - (3):本文提出的研究方法是什么?\n - (4):本文方法在什么任务上,取得了什么性能?性能能否支持他们的目标?\n 按照后面的格式输出: \n 1. Title: xxx\n\n\n 2. Authors: xxx\n\n\n 3. Affiliation: xxx\n\n \n 4. Keywords: xxx\n\n \n 5. Urls: xxx or xxx , xxx \n\n \n 6. Summary: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n - (4):xxx.\n\n \n \n 务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要有太多重复的信息,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行. \n ",
" \n 7. 详细描述这篇文章的方法思路。比如说它的步骤是:\n - (1):...\n - (2):...\n - (3):...\n - .......\n 按照后面的格式输出: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n .......\n\n \n \n 务必使用中文回答(专有名词需要用英文标注),语句尽量简洁且学术,不要和之前的<summary>内容重复,数值使用原文数字, 务必严格按照格式,将对应内容输出到xxx中,按照\n换行,.......代表按照实际需求填写,如果没有可以不用写. \n "
] |
2024-01-10 | yangtianchangxiao/autodl_happo | algorithms~hatrpo_trainer.py | import numpy as np
import torch
import torch.nn as nn
from utils.util import get_gard_norm, huber_loss, mse_loss
from utils.popart import PopArt
from algorithms.utils.util import check
from algorithms.actor_critic import Actor
class HATRPO():
"""
Trainer class for MATRPO to update policies.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param policy: (HATRPO_Policy) policy to update.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self,
args,
policy,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.policy = policy
self.clip_param = args.clip_param
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self.kl_threshold = args.kl_threshold
self.ls_step = args.ls_step
self.accept_ratio = args.accept_ratio
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
if self._use_popart:
self.value_normalizer = PopArt(1, device=self.device)
else:
self.value_normalizer = None
def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch):
"""
Calculate value function loss.
:param values: (torch.Tensor) value function predictions.
:param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss)
:param return_batch: (torch.Tensor) reward to go returns.
:param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.
:return value_loss: (torch.Tensor) value function loss.
"""
if self._use_popart:
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
error_clipped = self.value_normalizer(return_batch) - value_pred_clipped
error_original = self.value_normalizer(return_batch) - values
else:
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def flat_grad(self, grads):
grad_flatten = []
for grad in grads:
if grad is None:
continue
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(self, hessians):
hessians_flatten = []
for hessian in hessians:
if hessian is None:
continue
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def flat_params(self, model):
params = []
for param in model.parameters():
params.append(param.data.view(-1))
params_flatten = torch.cat(params)
return params_flatten
def update_model(self, model, new_params):
index = 0
for params in model.parameters():
params_length = len(params.view(-1))
new_param = new_params[index: index + params_length]
new_param = new_param.view(params.size())
params.data.copy_(new_param)
index += params_length
def kl_approx(self, q, p):
r = torch.exp(p - q)
kl = r - 1 - p + q
return kl
def kl_divergence(self, obs, rnn_states, action, masks, available_actions, active_masks, new_actor, old_actor):
_, _, mu, std, probs = new_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks)
_, _, mu_old, std_old, probs_old = old_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks)
if mu.grad_fn==None:
probs_old=probs_old.detach()
kl= self.kl_approx(probs_old,probs)
else:
logstd = torch.log(std)
mu_old = mu_old.detach()
std_old = std_old.detach()
logstd_old = torch.log(std_old)
# kl divergence between old policy and new policy : D( pi_old || pi_new )
# pi_old -> mu0, logstd0, std0 / pi_new -> mu, logstd, std
# be careful of calculating KL-divergence. It is not symmetric metric
kl = logstd - logstd_old + (std_old.pow(2) + (mu_old - mu).pow(2)) / (2.0 * std.pow(2)) - 0.5
if len(kl.shape)>1:
kl=kl.sum(1, keepdim=True)
return kl
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size()).to(device=self.device)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = self.fisher_vector_product(actor, obs, rnn_states, action, masks, available_actions, active_masks, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def fisher_vector_product(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, p):
p.detach()
kl = self.kl_divergence(obs, rnn_states, action, masks, available_actions, active_masks, new_actor=actor, old_actor=actor)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True, allow_unused=True)
kl_grad = self.flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters(), allow_unused=True)
kl_hessian_p = self.flat_hessian(kl_hessian_p)
return kl_hessian_p + 0.1 * p
def trpo_update(self, sample, update_actor=True):
"""
Update actor and critic networks.
:param sample: (Tuple) contains data batch with which to update networks.
:update_actor: (bool) whether to update actor network.
:return value_loss: (torch.Tensor) value function loss.
:return critic_grad_norm: (torch.Tensor) gradient norm from critic update.
;return policy_loss: (torch.Tensor) actor(policy) loss value.
:return dist_entropy: (torch.Tensor) action entropies.
:return actor_grad_norm: (torch.Tensor) gradient norm from actor update.
:return imp_weights: (torch.Tensor) importance sampling weights.
"""
share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \
adv_targ, available_actions_batch, factor_batch = sample
old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)
adv_targ = check(adv_targ).to(**self.tpdv)
value_preds_batch = check(value_preds_batch).to(**self.tpdv)
return_batch = check(return_batch).to(**self.tpdv)
active_masks_batch = check(active_masks_batch).to(**self.tpdv)
factor_batch = check(factor_batch).to(**self.tpdv)
values, action_log_probs, dist_entropy, action_mu, action_std, _ = self.policy.evaluate_actions(share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch)
# critic update
value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch)
self.policy.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm)
else:
critic_grad_norm = get_gard_norm(self.policy.critic.parameters())
self.policy.critic_optimizer.step()
# actor update
ratio = torch.prod(torch.exp(action_log_probs - old_action_log_probs_batch),dim=-1,keepdim=True)
if self._use_policy_active_masks:
loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()
loss_grad = torch.autograd.grad(loss, self.policy.actor.parameters(), allow_unused=True)
loss_grad = self.flat_grad(loss_grad)
step_dir = self.conjugate_gradient(self.policy.actor,
obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
loss_grad.data,
nsteps=10)
loss = loss.data.cpu().numpy()
params = self.flat_params(self.policy.actor)
fvp = self.fisher_vector_product(self.policy.actor,
obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
step_dir)
shs = 0.5 * (step_dir * fvp).sum(0, keepdim=True)
step_size = 1 / torch.sqrt(shs / self.kl_threshold)[0]
full_step = step_size * step_dir
old_actor = Actor(self.policy.args,
self.policy.obs_space,
self.policy.act_space,
self.device)
self.update_model(old_actor, params)
expected_improve = (loss_grad * full_step).sum(0, keepdim=True)
expected_improve = expected_improve.data.cpu().numpy()
# Backtracking line search
flag = False
fraction = 1
for i in range(self.ls_step):
new_params = params + fraction * full_step
self.update_model(self.policy.actor, new_params)
values, action_log_probs, dist_entropy, action_mu, action_std, _ = self.policy.evaluate_actions(share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
if self._use_policy_active_masks:
new_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
new_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()
new_loss = new_loss.data.cpu().numpy()
loss_improve = new_loss - loss
kl = self.kl_divergence(obs_batch,
rnn_states_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
new_actor=self.policy.actor,
old_actor=old_actor)
kl = kl.mean()
if kl < self.kl_threshold and (loss_improve / expected_improve) > self.accept_ratio and loss_improve.item()>0:
flag = True
break
expected_improve *= 0.5
fraction *= 0.5
if not flag:
params = self.flat_params(old_actor)
self.update_model(self.policy.actor, params)
print('policy update does not impove the surrogate')
return value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, ratio
def train(self, buffer, update_actor=True):
"""
Perform a training update using minibatch GD.
:param buffer: (SharedReplayBuffer) buffer containing training data.
:param update_actor: (bool) whether to update actor network.
:return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).
"""
if self._use_popart:
advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1])
else:
advantages = buffer.returns[:-1] - buffer.value_preds[:-1]
advantages_copy = advantages.copy()
advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
train_info = {}
train_info['value_loss'] = 0
train_info['kl'] = 0
train_info['dist_entropy'] = 0
train_info['loss_improve'] = 0
train_info['expected_improve'] = 0
train_info['critic_grad_norm'] = 0
train_info['ratio'] = 0
if self._use_recurrent_policy:
data_generator = buffer.recurrent_generator(advantages, self.num_mini_batch, self.data_chunk_length)
elif self._use_naive_recurrent:
data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch)
else:
data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, imp_weights \
= self.trpo_update(sample, update_actor)
train_info['value_loss'] += value_loss.item()
train_info['kl'] += kl
train_info['loss_improve'] += loss_improve.item()
train_info['expected_improve'] += expected_improve
train_info['dist_entropy'] += dist_entropy.item()
train_info['critic_grad_norm'] += critic_grad_norm
train_info['ratio'] += imp_weights.mean()
num_updates = self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.policy.actor.train()
self.policy.critic.train()
def prep_rollout(self):
self.policy.actor.eval()
self.policy.critic.eval()
| [] |
2024-01-10 | yangtianchangxiao/autodl_happo | envs~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class GuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # could cause zombie process
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
obs, rews, dones, infos, joint_maps = env.step(data)
if 'bool' in dones.__class__.__name__:
if dones[0]:
obs, rews, dones, infos, joint_maps = env.reset()
else:
if np.all(dones[0]):
obs, rews, dones, infos, joint_maps = env.reset()
remote.send((obs, rews, dones, infos, joint_maps))
elif cmd == 'reset':
obs, joint_map = env.reset()
remote.send((obs, joint_map))
# elif cmd == 'reset_task':
# ob = env.reset_task()
# remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
# elif cmd == 'get_num_agents':
# remote.send((env.n_agents))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
# self.remotes[0].send(('get_num_agents', None))
# self.n_agents = self.remotes[0].recv()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
print("init")
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs= zip(*results)
return np.stack(obs), np.stack(share_obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def choosesimpleworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSimpleSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseguardworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self, reset_choose):
results = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
obs = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~schemas~workflow~blocks~generic_llm_agent.py | from typing import Any
import openai
from loguru import logger
from reworkd_platform.schemas.workflow.base import Block, BlockIOBase
from reworkd_platform.settings import settings
class GenericLLMAgentInput(BlockIOBase):
prompt: str
class GenericLLMAgentOutput(GenericLLMAgentInput):
result: str
class GenericLLMAgent(Block):
type = "GenericLLMAgent"
description = "Extract key details from text using OpenAI"
input: GenericLLMAgentInput
async def run(self, workflow_id: str, **kwargs: Any) -> BlockIOBase:
try:
response = await execute_prompt(prompt=self.input.prompt)
except Exception as err:
logger.error(f"Failed to extract text with OpenAI: {err}")
raise
return GenericLLMAgentOutput(**self.input.dict(), result=response)
async def execute_prompt(prompt: str) -> str:
openai.api_key = settings.openai_api_key
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
response_message_content = response["choices"][0]["message"]["content"]
logger.info(f"response = {response_message_content}")
return response_message_content
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~web~api~agent~tools~wikipedia_search.py | from lanarky.responses import StreamingResponse
from langchain import WikipediaAPIWrapper
from reworkd_platform.web.api.agent.stream_mock import stream_string
from reworkd_platform.web.api.agent.tools.tool import Tool
class Wikipedia(Tool):
description = (
"Search Wikipedia for information about historical people, companies, events, "
"places or research. This should be used over search for broad overviews of "
"specific nouns."
)
public_description = "Search Wikipedia for historical information."
arg_description = "A simple query string of just the noun in question."
image_url = "/tools/wikipedia.png"
async def call(self, goal: str, task: str, input_str: str, **kwargs) -> StreamingResponse:
wikipedia_client = WikipediaAPIWrapper(
wiki_client=None, # Meta private value but mypy will complain its missing
)
# TODO: Make the below async
wikipedia_search = wikipedia_client.run(input_str)
# return summarize_with_sources(self.model, self.language, goal, task, [wikipedia_search])
return stream_string("Wikipedia is currently not working")
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~web~api~agent~tools~reason.py | from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from reworkd_platform.web.api.agent.tools.tool import Tool
class Reason(Tool):
description = (
"Reason about task via existing information or understanding. "
"Make decisions / selections from options."
)
async def call(
self, goal: str, task: str, input_str: str, *args,
) -> FastAPIStreamingResponse:
from reworkd_platform.web.api.agent.prompts import execute_task_prompt
chain = LLMChain(llm=self.model, prompt=execute_task_prompt)
return StreamingResponse.from_chain(
chain,
{"goal": goal, "language": self.language, "task": task},
media_type="text/event-stream",
)
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~services~pinecone~pinecone.py | from __future__ import annotations
import uuid
from typing import Any, Dict, List
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from pinecone import Index # import doesnt work on plane wifi
from pydantic import BaseModel
from reworkd_platform.settings import settings
from reworkd_platform.timer import timed_function
from reworkd_platform.web.api.memory.memory import AgentMemory
OPENAI_EMBEDDING_DIM = 1536
class Row(BaseModel):
id: str
values: List[float]
metadata: Dict[str, Any] = {}
class QueryResult(BaseModel):
id: str
score: float
metadata: Dict[str, Any] = {}
class PineconeMemory(AgentMemory):
"""
Wrapper around pinecone
"""
def __init__(self, index_name: str):
self.index = Index(settings.pinecone_index_name)
self.namespace = index_name
@timed_function(level="DEBUG")
def __enter__(self) -> AgentMemory:
self.embeddings: Embeddings = OpenAIEmbeddings(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=settings.openai_api_key,
)
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
pass
@timed_function(level="DEBUG")
def reset_class(self) -> None:
self.index.delete(delete_all=True, namespace=self.namespace)
@timed_function(level="DEBUG")
def add_tasks(self, tasks: List[str]) -> List[str]:
if len(tasks) == 0:
return []
embeds = self.embeddings.embed_documents(tasks)
if len(tasks) != len(embeds):
raise ValueError("Embeddings and tasks are not the same length")
rows = [
Row(values=vector, metadata={"text": tasks[i]}, id=str(uuid.uuid4()))
for i, vector in enumerate(embeds)
]
self.index.upsert(
vectors=[row.dict() for row in rows], namespace=self.namespace
)
return [row.id for row in rows]
@timed_function(level="DEBUG")
def get_similar_tasks(
self, text: str, score_threshold: float = 0.95
) -> List[QueryResult]:
# Get similar tasks
vector = self.embeddings.embed_query(text)
results = self.index.query(
vector=vector,
top_k=5,
include_metadata=True,
include_values=True,
namespace=self.namespace,
)
return [
QueryResult(id=row.id, score=row.score, metadata=row.metadata)
for row in getattr(results, "matches", [])
if row.score > score_threshold
]
@staticmethod
def should_use() -> bool:
return False
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~schemas~workflow~blocks~company_context_agent.py | from typing import Any
import openai
from loguru import logger
from reworkd_platform.schemas.workflow.base import Block, BlockIOBase
from reworkd_platform.settings import settings
class CompanyContextAgentInput(BlockIOBase):
company_name: str
class CompanyContextAgentOutput(CompanyContextAgentInput):
result: str
class CompanyContextAgent(Block):
type = "OpenAIAgent"
description = "Extract key details from text using OpenAI"
input: CompanyContextAgentInput
async def run(self, workflow_id: str, **kwargs: Any) -> BlockIOBase:
try:
response = await execute_prompt(company=self.input.company_name)
except Exception as err:
logger.error(f"Failed to extract text with OpenAI: {err}")
raise
return CompanyContextAgentOutput(**self.input.dict(), result=response)
async def execute_prompt(company: str) -> str:
openai.api_key = settings.openai_api_key
prompt = f"""
Write a one-sentence description of "{company}".
Define their market, sector, and primary products.
Be as clear, informative, and descriptive as necessary.
You will not make up information or add any information outside of the above text.
Only use the given information and nothing more.
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
response_message_content = response["choices"][0]["message"]["content"]
return response_message_content
| [
"\n Write a one-sentence description of \"PLACEHOLDER\".\n Define their market, sector, and primary products.\n\n Be as clear, informative, and descriptive as necessary.\n You will not make up information or add any information outside of the above text.\n Only use the given information and nothing more.\n "
] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain.schema import HumanMessage
from loguru import logger
from pydantic import ValidationError
from reworkd_platform.schemas.user import UserBase
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis, AnalysisArguments
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
openai_error_handler,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_settings import WrappedChatOpenAI
from reworkd_platform.web.api.agent.prompts import (
analyze_task_prompt,
chat_prompt,
create_tasks_prompt,
start_goal_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.open_ai_function import get_tool_function
from reworkd_platform.web.api.agent.tools.tools import (
get_default_tool,
get_tool_from_name,
get_tool_name,
get_user_tools,
)
from reworkd_platform.web.api.agent.tools.utils import summarize
from reworkd_platform.web.api.errors import OpenAIError
from reworkd_platform.web.api.memory.memory import AgentMemory
from reworkd_platform.db.crud.oauth import OAuthCrud
class OpenAIAgentService(AgentService):
def __init__(
self,
model: WrappedChatOpenAI,
settings: ModelSettings,
agent_memory: AgentMemory,
token_service: TokenService,
callbacks: Optional[List[AsyncCallbackHandler]],
user: UserBase,
oauth_crud: OAuthCrud
):
self.model = model
self.agent_memory = agent_memory
self.settings = settings
self.token_service = token_service
self.callbacks = callbacks
self.user = user
self.oauth_crud = oauth_crud
async def start_goal_agent(self, *, goal: str) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
goal=goal,
language=self.settings.language,
).to_string(),
)
completion = await call_model_with_handling(
self.model,
ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
),
{"goal": goal, "language": self.settings.language},
settings=self.settings,
callbacks=self.callbacks,
)
task_output_parser = TaskOutputParser(completed_tasks=[])
tasks = parse_with_handling(task_output_parser, completion)
with self.agent_memory as memory:
memory.reset_class()
memory.add_tasks(tasks)
return tasks
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
user_tools = await get_user_tools(tool_names, self.user, self.oauth_crud)
functions = list(map(get_tool_function, user_tools))
prompt = analyze_task_prompt.format_prompt(
goal=goal,
task=task,
language=self.settings.language,
)
self.token_service.calculate_max_tokens(
self.model,
prompt.to_string(),
str(functions),
)
message = await openai_error_handler(
func=self.model.apredict_messages,
messages=prompt.to_messages(),
functions=functions,
settings=self.settings,
callbacks=self.callbacks,
)
function_call = message.additional_kwargs.get("function_call", {})
completion = function_call.get("arguments", "")
try:
pydantic_parser = PydanticOutputParser(pydantic_object=AnalysisArguments)
analysis_arguments = parse_with_handling(pydantic_parser, completion)
return Analysis(
action=function_call.get("name", get_tool_name(get_default_tool())),
**analysis_arguments.dict(),
)
except (OpenAIError, ValidationError):
return Analysis.get_default_analysis()
# TODO request context
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> StreamingResponse:
# TODO: More mature way of calculating max_tokens
if self.model.max_tokens > 3000:
self.model.max_tokens = max(self.model.max_tokens - 1000, 3000)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model, self.settings.language).call(
goal, task, analysis.arg, self.user, self.oauth_crud,
)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=create_tasks_prompt)]
)
args = {
"goal": goal,
"language": self.settings.language,
"tasks": "\n".join(tasks),
"lastTask": last_task,
"result": result,
}
self.token_service.calculate_max_tokens(
self.model, prompt.format_prompt(**args).to_string()
)
completion = await call_model_with_handling(
self.model, prompt, args, settings=self.settings, callbacks=self.callbacks
)
previous_tasks = (completed_tasks or []) + tasks
tasks = [completion] if completion not in previous_tasks else []
unique_tasks = []
with self.agent_memory as memory:
for task in tasks:
similar_tasks = memory.get_similar_tasks(task)
# Check if similar tasks are found
if not similar_tasks:
unique_tasks.append(task)
else:
logger.info(f"Similar tasks to '{task}' found: {similar_tasks}")
if unique_tasks:
memory.add_tasks(unique_tasks)
return unique_tasks
async def summarize_task_agent(
self,
*,
goal: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
self.model.max_tokens = 8000 # Total tokens = prompt tokens + completion tokens
snippet_max_tokens = 7000 # Leave room for the rest of the prompt
text_tokens = self.token_service.tokenize("".join(results))
text = self.token_service.detokenize(text_tokens[0:snippet_max_tokens])
logger.info(f"Summarizing text: {text}")
return summarize(
model=self.model,
language=self.settings.language,
goal=goal,
text=text,
)
async def chat(
self,
*,
message: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate(prompt=chat_prompt),
*[HumanMessage(content=result) for result in results],
HumanMessage(content=message),
]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
language=self.settings.language,
).to_string(),
)
chain = LLMChain(llm=self.model, prompt=prompt)
return StreamingResponse.from_chain(
chain,
{"language": self.settings.language},
media_type="text/event-stream",
)
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~schemas~workflow~blocks~summary_agent.py | import os
import tempfile
from collections import defaultdict
from typing import Any
import openai
import pinecone
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Pinecone
from tabula.io import read_pdf
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.schemas.user import UserBase
from reworkd_platform.schemas.workflow.base import Block, BlockIOBase
from reworkd_platform.services.aws.s3 import SimpleStorageService
from reworkd_platform.settings import settings
from reworkd_platform.web.api.agent.model_settings import create_model
class SummaryAgentInput(BlockIOBase):
company_context: str
class SummaryAgentOutput(SummaryAgentInput):
result: str
class SummaryAgent(Block):
type = "SummaryAgent"
description = "Extract key details from text using OpenAI"
input: SummaryAgentInput
async def run(self, workflow_id: str, **kwargs: Any) -> BlockIOBase:
with tempfile.TemporaryDirectory() as temp_dir:
files = SimpleStorageService(
bucket=settings.s3_bucket_name
).download_folder(
prefix=f"{workflow_id}/",
path=temp_dir,
)
docsearch = self.chunk_documents_to_pinecone(
files=files,
embeddings=(
OpenAIEmbeddings(
client=None,
# Meta private value but mypy will complain its missing
openai_api_key=settings.openai_api_key,
)
),
path=temp_dir,
)
response = await self.execute_query_on_pinecone(
company_context=self.input.company_context, docsearch=docsearch
)
return SummaryAgentOutput(**self.input.dict(), result=response)
def name_table(self, table: str) -> str:
openai.api_key = settings.openai_api_key
prompt = f"""
Write a title for the table that is less than 9 words: {table}
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
response_message_content = response["choices"][0]["message"]["content"]
return response_message_content
def read_and_preprocess_tables(
self, relevant_table_metadata: dict[str, list[int]]
) -> list[str]:
processed = []
parsed_dfs_from_file: list[Any] | dict[str, Any] = []
for source in relevant_table_metadata.keys():
page_numbers = relevant_table_metadata[source]
filtered_page_numbers = list(filter(lambda x: x != 0, page_numbers))
if len(filtered_page_numbers) > 1:
filtered_page_numbers.sort()
start_page = filtered_page_numbers[0]
end_page = filtered_page_numbers[-1]
parsed_dfs_from_file = read_pdf(
source, pages=f"{start_page}-{end_page}"
)
if isinstance(parsed_dfs_from_file, list):
for df in parsed_dfs_from_file:
if not df.empty:
df_name = self.name_table(str(df.iloc[:5]))
processed_df = "\n".join([df.to_csv(index=False)])
processed_df_with_title = "\n".join([df_name, processed_df])
processed.append(processed_df_with_title)
elif isinstance(parsed_dfs_from_file, dict):
for key, df in parsed_dfs_from_file.items():
if not df.empty:
df_name = self.name_table(str(df.iloc[:5]))
processed_df = "\n".join([df.to_csv(index=False)])
processed_df_with_title = "\n".join([df_name, processed_df])
processed.append(processed_df_with_title)
else:
# Handle unexpected case
raise ValueError("Unexpected type encountered.")
return processed
def chunk_documents_to_pinecone(
self, files: list[str], embeddings: Embeddings, path: str
) -> Pinecone:
index_name = "prod"
index = pinecone.Index(index_name)
index.delete(delete_all=True)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
texts = []
for file in files:
filepath = os.path.join(path, file)
# table_data = self.read_and_preprocess_tables(filepath)
data = PyPDFLoader(filepath).load()
pdf_data = data
texts.extend(text_splitter.split_documents(pdf_data))
# texts.extend(text_splitter.create_documents(table_data))
docsearch = Pinecone.from_documents(
[t for t in texts],
embeddings,
index_name=index_name,
)
return docsearch
async def execute_query_on_pinecone(
self, company_context: str, docsearch: Pinecone
) -> str:
docs = docsearch.similarity_search(company_context, k=7)
relevant_table_metadata = defaultdict(list)
for doc in docs:
doc_source = doc.metadata["source"]
page_number = int(doc.metadata["page"])
relevant_table_metadata[doc_source].append(page_number)
processed_tables = self.read_and_preprocess_tables(relevant_table_metadata)
prompt = f"""Help extract information relevant to a company with the following details: {company_context} from the following documents. Start with the company background info. Then, include information relevant to the market, strategies, and products. Here are the documents: {docs}. After each point, reference the source you got the information from.
Also list any interesting quantitative metrics or trends based on the following tables: {processed_tables}. Include which table you got information from.
Cite sources for sentences using the page number from original source document. Do not list sources at the end of the writing.
Example: "This is a cited sentence. (Source: Luxury Watch Market Size Report, Page 17).
Format your response as slack markdown.
"""
llm = create_model(
ModelSettings(model="gpt-3.5-turbo-16k", max_tokens=2000),
UserBase(id="", name=None, email="[email protected]"),
streaming=False,
)
return await load_qa_chain(llm).arun(input_documents=docs, question=prompt)
| [
"\n Write a title for the table that is less than 9 words: PLACEHOLDER\n ",
"Help extract information relevant to a company with the following details: PLACEHOLDER from the following documents. Start with the company background info. Then, include information relevant to the market, strategies, and products. Here are the documents: PLACEHOLDER. After each point, reference the source you got the information from.\n\n Also list any interesting quantitative metrics or trends based on the following tables: PLACEHOLDER. Include which table you got information from.\n\n Cite sources for sentences using the page number from original source document. Do not list sources at the end of the writing.\n\n Example: \"This is a cited sentence. (Source: Luxury Watch Market Size Report, Page 17).\n\n Format your response as slack markdown.\n "
] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~schemas~workflow~blocks~agents~content_refresher_agent.py | import re
from typing import Any
import anthropic
import requests
from bs4 import BeautifulSoup
from loguru import logger
from scrapingbee import ScrapingBeeClient
from reworkd_platform.schemas.workflow.base import Block, BlockIOBase
from reworkd_platform.settings import settings
class ContentRefresherInput(BlockIOBase):
url: str
class ContentRefresherOutput(ContentRefresherInput):
original_content: str
refreshed_content: str
class ContentRefresherAgent(Block):
type = "ContentRefresherAgent"
description = "Refresh the content on an existing page"
input: ContentRefresherInput
async def run(self, workflow_id: str, **kwargs: Any) -> ContentRefresherOutput:
logger.info(f"Starting {self.type}")
target_url = self.input.url
target_content = get_page_content(target_url)
logger.info(target_content)
keywords = find_content_kws(target_content)
logger.info(keywords)
source_urls = search_results(keywords)
if target_url in source_urls: # TODO: check based on content overlap
source_urls.remove(target_url)
logger.info(source_urls)
source_contents = [
get_page_content(url)
for url in source_urls[:3] # TODO: remove limit of 3 sources
] # TODO: async/multithread the LLM calls
source_contents = [
content for content in source_contents if content is not None
]
logger.info(source_contents)
new_infos = "\n\n".join(
[
find_new_info(target_content, source_content)
for source_content in source_contents
]
)
logger.info(new_infos)
updated_target_content = add_info(target_content, new_infos)
logger.info(updated_target_content)
return ContentRefresherOutput(
**self.input.dict(),
original_content=target_content,
refreshed_content=updated_target_content,
)
scraper = ScrapingBeeClient(
api_key=settings.scrapingbee_api_key,
)
claude = anthropic.Anthropic(
api_key=settings.anthropic_api_key,
)
def get_page_content(url: str) -> str:
page = requests.get(url)
if page.status_code != 200:
page = scraper.get(url)
html = BeautifulSoup(page.content, "html.parser")
pgraphs = html.find_all("p")
pgraphs = "\n".join(
[
f"{i + 1}. " + re.sub(r"\s+", " ", p.text).strip()
for i, p in enumerate(pgraphs)
]
)
prompt = f"Below is a numbered list of the text in all the <p> tags on a web page:\n{pgraphs}\nSome of these lines may not be part of the main content of the page (e.g. footer text, ads, etc). Please list the line numbers that *are* part of the main content (i.e. the article's paragraphs) of the page. You can list consecutive line numbers as a range (e.g. 23-27) and separated by a comma."
response = claude.completions.create(
model="claude-2",
prompt=f"\n\nHuman: {prompt}\n\nAssistant: Here are the line numbers of the main content:",
max_tokens_to_sample=500,
temperature=0,
)
line_nums = response.completion.strip()
if len(line_nums) == 0:
return ""
pgraphs = pgraphs.split("\n")
content = []
for line_num in line_nums.split(","):
if "-" in line_num:
start, end = map(int, line_num.split("-"))
for i in range(start, end + 1):
text = ".".join(pgraphs[i - 1].split(".")[1:]).strip()
content.append(text)
else:
text = ".".join(pgraphs[int(line_num) - 1].split(".")[1:]).strip()
content.append(text)
return "\n".join(content)
def find_content_kws(content: str) -> str:
# Claude: find search keywords that content focuses on
prompt = f"Below is content from a web article:\n{content}\nPlease list the keywords that best describe the content of the article. Format them so we can use them to query a search engine effectively."
response = claude.completions.create(
model="claude-2",
prompt=f"\n\nHuman: {prompt}\n\nAssistant: Here is a short search query that best matches the content of the article:",
max_tokens_to_sample=20,
temperature=0,
)
response_message = response.completion.strip()
return response_message
def search_results(search_query: str) -> list[str]:
# use SERP API
response = requests.post(
f"https://google.serper.dev/search",
headers={
"X-API-KEY": settings.serp_api_key or "",
"Content-Type": "application/json",
},
params={
"q": search_query,
},
)
response.raise_for_status()
search_results = response.json()
urls = [result["link"] for result in search_results["organic"]]
return urls
def find_new_info(target: str, source: str) -> str:
# Claude: info mentioned in source that is not mentioned in target
prompt = f"Below is the TARGET article:\n{target}\n----------------\nBelow is the SOURCE article:\n{source}\n----------------\nIn a bullet point list, identify all facts, figures, or ideas that are mentioned in the SOURCE article but not in the TARGET article."
response = claude.completions.create(
model="claude-2",
prompt=f"\n\nHuman: {prompt}\n\nAssistant: Here is a list of claims in the SOURCE that are not in the TARGET:",
max_tokens_to_sample=5000,
temperature=0,
)
response_message = response.completion.strip()
new_info = "\n".join(response_message.split("\n\n"))
return new_info
def add_info(target: str, info: str) -> str:
# Claude: rewrite target to include the info
prompt = f"Below are notes from some SOURCE articles:\n{info}\n----------------\nBelow is the TARGET article:\n{target}\n----------------\nPlease rewrite the TARGET article to include the information from the SOURCE articles."
response = claude.completions.create(
model="claude-2",
prompt=f"\n\nHuman: {prompt}\n\nAssistant: Here is a rewritten version of the target article that incorporates relevant information from the source articles:",
max_tokens_to_sample=5000,
temperature=0,
)
response_message = response.completion.strip()
return response_message
| [
"\n\nHuman: PLACEHOLDER\n\nAssistant: Here is a rewritten version of the target article that incorporates relevant information from the source articles:",
"Below are notes from some SOURCE articles:\nPLACEHOLDER\n----------------\nBelow is the TARGET article:\nPLACEHOLDER\n----------------\nPlease rewrite the TARGET article to include the information from the SOURCE articles.",
"\n\nHuman: PLACEHOLDER\n\nAssistant: Here are the line numbers of the main content:",
"Below is the TARGET article:\nPLACEHOLDER\n----------------\nBelow is the SOURCE article:\nPLACEHOLDER\n----------------\nIn a bullet point list, identify all facts, figures, or ideas that are mentioned in the SOURCE article but not in the TARGET article.",
"\n\nHuman: PLACEHOLDER\n\nAssistant: Here is a short search query that best matches the content of the article:",
"\n\nHuman: PLACEHOLDER\n\nAssistant: Here is a list of claims in the SOURCE that are not in the TARGET:",
"Below is content from a web article:\nPLACEHOLDER\nPlease list the keywords that best describe the content of the article. Format them so we can use them to query a search engine effectively.",
"Below is a numbered list of the text in all the <p> tags on a web page:\nPLACEHOLDER\nSome of these lines may not be part of the main content of the page (e.g. footer text, ads, etc). Please list the line numbers that *are* part of the main content (i.e. the article's paragraphs) of the page. You can list consecutive line numbers as a range (e.g. 23-27) and separated by a comma."
] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~tests~test_helpers.py | import pytest
from openai.error import (
ServiceUnavailableError,
InvalidRequestError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.agent.helpers import openai_error_handler
from reworkd_platform.web.api.errors import OpenAIError
async def act(*args, settings: ModelSettings = ModelSettings(), **kwargs):
return await openai_error_handler(*args, settings=settings, **kwargs)
@pytest.mark.asyncio
async def test_service_unavailable_error():
async def mock_service_unavailable_error():
raise ServiceUnavailableError("Service Unavailable")
with pytest.raises(OpenAIError):
await act(mock_service_unavailable_error)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"settings,should_log",
[
(ModelSettings(custom_api_key="xyz"), False),
(ModelSettings(custom_api_key=None), True),
],
)
async def test_should_log(settings, should_log):
async def mock_invalid_request_error_model_access():
raise InvalidRequestError(
"The model: xyz does not exist or you do not have access to it.",
param="model",
)
with pytest.raises(Exception) as exc_info:
await openai_error_handler(
mock_invalid_request_error_model_access, settings=settings
)
assert isinstance(exc_info.value, OpenAIError)
error: OpenAIError = exc_info.value
assert error.should_log == should_log
| [] |
2024-01-10 | sidhq/agentgpt-with-sid | platform~reworkd_platform~web~api~memory~weaviate.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, cast
import numpy as np
import weaviate
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
from loguru import logger
from weaviate import UnexpectedStatusCodeException
from reworkd_platform.settings import settings
from reworkd_platform.web.api.memory.memory import AgentMemory
def _default_schema(index_name: str, text_key: str) -> Dict[str, Any]:
return {
"class": index_name,
"properties": [
{
"name": text_key,
"dataType": ["text"],
}
],
}
CLASS_PREFIX = "Reworkd_AgentGPT_"
class WeaviateMemory(AgentMemory):
"""
Wrapper around the Weaviate vector database
"""
db: Optional[Weaviate] = None
def __init__(self, index_name: str):
self.index_name = CLASS_PREFIX + index_name
self.text_key = "agent_memory"
def __enter__(self) -> AgentMemory:
# If the database requires authentication, retrieve the API key
auth = (
weaviate.auth.AuthApiKey(api_key=settings.vector_db_api_key)
if settings.vector_db_api_key is not None
and settings.vector_db_api_key != ""
else None
)
self.client = weaviate.Client(settings.vector_db_url, auth_client_secret=auth)
self._create_class()
# Instantiate client with embedding provider
self.embeddings = OpenAIEmbeddings(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=settings.openai_api_key,
)
self.db = Weaviate(
self.client,
self.index_name,
self.text_key,
embedding=self.embeddings,
by_text=False,
)
return self
def _create_class(self) -> None:
# Create the schema if it doesn't already exist
schema = _default_schema(self.index_name, self.text_key)
if not self.client.schema.contains(schema):
self.client.schema.create_class(schema)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.client.__del__()
def add_tasks(self, tasks: List[str]) -> List[str]:
if self.db is None:
raise Exception("WeaviateMemory not initialized")
return self.db.add_texts(tasks)
def get_similar_tasks(self, query: str, score_threshold: float = 0.98) -> List[str]:
# Get similar tasks
results = self._similarity_search_with_score(query)
def get_score(result: Tuple[str, float]) -> float:
return result[1]
results.sort(key=get_score, reverse=True)
# Return formatted response
return [text for [text, score] in results if score >= score_threshold]
def reset_class(self) -> None:
try:
self.client.schema.delete_class(self.index_name)
self._create_class()
except UnexpectedStatusCodeException as error:
logger.error(error)
def _similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[str, float]]:
"""
A remake of _similarity_search_with_score from langchain to use a near vector
"""
# Build query
query_obj = self.client.query.get(self.index_name, [self.text_key])
embedding = self.embeddings.embed_query(query)
vector = {"vector": embedding}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores: list[tuple[str, float]] = []
for res in result["data"]["Get"][self.index_name]:
text = cast(str, res.pop(self.text_key))
score = float(np.dot(res["_additional"]["vector"], embedding))
docs_and_scores.append((text, score))
return docs_and_scores
| [] |
2024-01-10 | AliMostafaRadwan/AI-Powerd-LMS | VARK.py | import streamlit as st
import openai
API = "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"
# Define OpenAI API key
openai.api_key = API
# Set up the model and prompt
model_engine = "text-davinci-003"
prompt = "Once upon a time, in a land far, far away, there was a princess who..."
# Generate a response
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
response = completion.choices[0].text
print(response) | [
"Once upon a time, in a land far, far away, there was a princess who..."
] |
2024-01-10 | use-forloops-please/Chat_bot | Chatbot.py | import openai
openai.api_key = "Please add your key here"
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Give me 3 ideas for apps I could build with OpenAI APIs "}])
print(completion['choices'][0]['message']['content'])
| [
"Give me 3 ideas for apps I could build with OpenAI APIs "
] |
2024-01-10 | SajithJude/dev_cb | pages~uploadpdftoimage.py | import streamlit as st
from llama_index import GPTSimpleVectorIndex, Document, SimpleDirectoryReader, QuestionAnswerPrompt, LLMPredictor, ServiceContext
import json
from langchain import OpenAI
from llama_index import download_loader
from tempfile import NamedTemporaryFile
from llama_index import (
GPTVectorStoreIndex,
ResponseSynthesizer,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
def process_pdf(uploaded_file):
loader = PDFReader()
with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(uploaded_file.getvalue())
documents = loader.load_data(file=Path(temp_file.name))
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=1900))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
if "index" not in st.session_state:
index = GPTVectorStoreIndex.from_documents(documents,service_context=service_context)
retriever = index.as_retriever(retriever_mode='embedding')
index = RetrieverQueryEngine(retriever)
st.session_state.index = index
# st.session_state.index = index
return st.session_state.index
def call_openai(source):
messages=[{"role": "user", "content": source}]
response = openai.ChatCompletion.create(
model="gpt-4-0314",
max_tokens=7000,
temperature=0.1,
messages = messages
)
return response.choices[0].message.content
st.title("CourseBot")
st.caption("AI-powered course creation made easy")
DATA_DIR = "data"
PDFReader = download_loader("PDFReader")
loader = PDFReader()
###################### defining tabs ##########################################
###################### Upload chapter column ##########################################
uploaded_file = st.file_uploader("Upload a Chapter as a PDF file", type="pdf")
toc_option = st.radio("Choose a method to provide TOC", ("Generate TOC", "Copy Paste TOC"))
forma = """"{
"Topics": [
{
"Topic 1": [
"Subtopic 1.1",
"Subtopic 1.2",
"Subtopic 1.3"
]
},
{
"Topic 2": [
"Subtopic 2.1",
"Subtopic 2.2",
"Subtopic 2.3"
]
},
continue with topics...
]
}
"""
if uploaded_file is not None:
index = process_pdf(uploaded_file)
if "index" not in st.session_state:
st.session_state.index = index
st.success("Index created successfully")
if toc_option == "Generate TOC":
toc = st.button("Genererate TOC")
try:
if toc:
toc_res = st.session_state.index.query(f" create a table of contents with topics and subtopics by reading through the document and create a table of contents that accurately reflects the main topics and subtopics covered in the document. The table of contents should be in the following format: " + str(forma))
str_toc = str(toc_res)
table_of_contents = json.loads(str_toc)
if "table_of_contents" not in st.session_state:
st.session_state.table_of_contents = table_of_contents
st.write(st.session_state.table_of_contents)
st.success("TOC loaded, Go to the next tab")
except (KeyError, AttributeError) as e:
print("Error generating TOC")
print(f"Error: {type(e).__name__} - {e}")
elif toc_option == "Copy Paste TOC":
toc_input = st.text_area("Paste your Table of contents:")
if st.button("Save TOC"):
try:
# table_of_contents = json.loads(toc_input)
src = "Convert the following table of contents into a json string, use the JSON format given bellow:\n"+ "Table of contents:\n"+ toc_input.strip() + "\n JSON format:\n"+ str(forma) + ". Output should be a valid JSON string."
toc_res = call_openai(src)
str_toc = str(toc_res)
table_of_contents = json.loads(str_toc)
# st.write(table_of_contents)
if "table_of_contents" not in st.session_state:
st.session_state.table_of_contents = table_of_contents
st.write(st.session_state.table_of_contents)
except json.JSONDecodeError as e:
st.error("Invalid JSON format. Please check your input.")
###################### refining toc start ##########################################
| [] |
2024-01-10 | SajithJude/dev_cb | pages~updated.py | import streamlit as st
from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, QuestionAnswerPrompt, LLMPredictor, ServiceContext
import json
from langchain import OpenAI
from llama_index import download_loader
from tempfile import NamedTemporaryFile
import base64
import io
import fitz
from PIL import Image
import ast
import os
import glob
PDFReader = download_loader("PDFReader")
import os
import openai
import json
import xml.etree.ElementTree as ET
from xml.dom import minidom
from pathlib import Path
from llama_index import download_loader
from xml.etree.ElementTree import Element, SubElement, tostring
import requests
import zipfile
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from langchain import OpenAI
st.set_page_config(page_title=None, page_icon=None, layout="wide", initial_sidebar_state="collapsed")
openai.api_key = os.getenv("OPENAI_API_KEY")
st.title("CourseBot")
st.caption("AI-powered course creation made easy")
DATA_DIR = "data"
PDFReader = download_loader("PDFReader")
loader = PDFReader()
if not os.path.exists("images"):
os.makedirs("images")
# Create the "pages" folder if it doesn't exist
if not os.path.exists("pages"):
os.makedirs("pages")
def load_saved_course(course_file):
with open(course_file, 'r') as infile:
return json.load(infile)
def call_openai3(source):
response = openai.Completion.create(
model="text-davinci-003",
prompt=source,
temperature=0.1,
max_tokens=3500,
top_p=1,
frequency_penalty=0.3,
presence_penalty=0
)
return response.choices[0].text
def call_openai(source):
messages=[{"role": "user", "content": source}]
response = openai.ChatCompletion.create(
model="gpt-4-0314",
max_tokens=7000,
temperature=0.1,
messages = messages
)
return response.choices[0].message.content
def clear_all_json_files():
"""Clear all JSON files in all directories under the current working directory"""
root_directory = os.path.abspath(os.getcwd())
# Iterate over all files and directories under the root directory
for dirpath, dirnames, filenames in os.walk(root_directory):
# Iterate over all files in the current directory
for filename in filenames:
# Check if the file has a .json extension
if filename.endswith('.json'):
# Open the JSON file, clear its contents, and save the empty file
file_path = os.path.join(dirpath, filename)
with open(file_path, 'w') as json_file:
json.dump({}, json_file)
def clear_images_folder():
for file in os.listdir("images"):
if file.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
os.remove(os.path.join("images", file))
def clear_pages_folder():
for file in os.listdir("pages"):
if file.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
os.remove(os.path.join("pages", file))
def update_json(topic_data):
with open("output.json", "w") as f:
st.session_state.toc = {"Topics": [{k: v} for k, v in topic_data.items()]}
json.dump({"Topics": [{k: v} for k, v in topic_data.items()]}, f)
def load_db():
if not os.path.exists("db.json"):
with open("db.json", "w") as f:
json.dump({}, f)
with open("db.json", "r") as f:
db = json.load(f)
return db
def delete_chapter(chapter_name):
db = load_db()
if chapter_name in db:
del db[chapter_name]
with open("db.json", "w") as f:
json.dump(db, f)
return True
return False
# def generate_xml_structure(new_dict,coursedesctip,coursedescriptionvoiceover,cn):
# root = ET.Element("Slides")
# # First slide with topic names
# slide = ET.SubElement(root, f"Slide1")
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Course_Name"
# crsnmelement = ET.SubElement(slide, "Course_Name")
# crsnmelement.text = cn.strip()
# cd = ET.SubElement(slide, "Course_Description")
# cd.text = coursedesctip.strip()
# cdvo = ET.SubElement(slide, "VoiceOver")
# cdvo1 = ET.SubElement(cdvo, "VoiceOver_1")
# cdvo1.text = coursedescriptionvoiceover.strip()
# slide_counter = 2
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# tpcount=1
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Topics"
# topic_list = ET.SubElement(slide, "Topics")
# for topic in new_dict:
# topic_name = ET.SubElement(topic_list, f"Topic_{tpcount}")
# topic_name.text = topic
# tpcount +=1
# vocount=1
# voiceovertopic_list = ET.SubElement(slide, "VoiceOver")
# for topic in new_dict:
# topic_voiceover = ET.SubElement(voiceovertopic_list, f"VoiceOver_{vocount}")
# topic_voiceover.text = topic
# vocount +=1
# slide_counter += 1
# # Iterate through topics and subtopics
# for topic, details in new_dict.items():
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# # slideName = ET.SubElement(slide, "Slide_Name")
# # slideName.text = "Topic_Name"
# # Add subtopics if they exist
# if details["Subtopics"]:
# sub_slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(sub_slide, "Slide_Name")
# slideName.text = "Topic_Name"
# Topic_Name = ET.SubElement(sub_slide, "Topic_Name")
# Topic_Name.text= topic
# subtopiccounter=1
# for subtopic in details["Subtopics"]:
# subtopic_elem = ET.SubElement(sub_slide, f"Subtopic_{subtopiccounter}")
# subtopic_elem.text = subtopic["Subtopic"]
# subtopiccounter +=1
# slide_counter += 1
# # Add bullets (4 per slide)
# for subtopic in details["Subtopics"]:
# sub_slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(sub_slide, "Slide_Name")
# slideName.text = "SubTopic"
# Subtopicelement = ET.SubElement(sub_slide, "SubTopic")
# # for subtopic in details["Subtopics"]:
# Subtopicelement.text = subtopic["Subtopic"]
# bullet_count = 1
# bullets_slide = None
# for i, bullet in enumerate(subtopic["Bullets"]):
# if bullet_count % 4 == 0:
# pass
# # bullets_slide = ET.SubElement(sub_slide, "BulletsSlide")
# bullet_elem = ET.SubElement(sub_slide, f"Bullet_{bullet_count}")
# bullet_elem.text = bullet
# bullet_count += 1
# vobullet_count = 1
# bullets_VO_element = ET.SubElement(sub_slide, "VoiceOver")
# for i, bullet in enumerate(subtopic["VoiceOverBullets"]):
# if vobullet_count % 4 == 0:
# pass
# bullet_voiceover_elem = ET.SubElement(bullets_VO_element, f"VoiceOver_{vobullet_count}")
# bullet_voiceover_elem.text = bullet
# vobullet_count += 1
# slide_counter += 1
# else:
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Topic_Summary"
# Topic_Name = ET.SubElement(slide, "Topic_Name")
# Topic_Name.text= topic
# Topic_Summary = ET.SubElement(slide, "Topic_Summary")
# Topic_Summary.text= details["Topic_Summary"].strip()
# topic_elem = ET.SubElement(slide, "VoiceOver")
# topic_elem.text = details["VoiceOver"].strip()
# slide_counter += 1
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Congratulations"
# messageel = ET.SubElement(slide, "Message1")
# messageel.text = "Congratulations"
# messageel2 = ET.SubElement(slide, "Message2")
# messageel2.text = "Congratulations on successful completion of the course."
# # Generate XML string
# xml_string = ET.tostring(root, encoding="utf-8", method="xml").decode("utf-8")
# return xml_string
# import xml.etree.ElementTree as ET
def generate_xml_structure(new_dict,coursedesctip,coursedescriptionvoiceover,cn):
root = ET.Element("Slides")
# First slide with topic names
slide = ET.SubElement(root, f"Slide1")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Course_Name"
crsnmelement = ET.SubElement(slide, "Course_Name")
crsnmelement.text = cn.strip()
cd = ET.SubElement(slide, "Course_Description")
cd.text = coursedesctip.strip()
cdvo = ET.SubElement(slide, "VoiceOver")
cdvo1 = ET.SubElement(cdvo, "VoiceOver_1")
cdvo1.text = coursedescriptionvoiceover.strip()
slide_counter = 2
slide = ET.SubElement(root, f"Slide{slide_counter}")
tpcount=1
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topics"
# topic_list = ET.SubElement(slide, "Topics")
for topic in new_dict:
topic_name = ET.SubElement(slide, f"Topic_{tpcount}")
topic_name.text = topic
tpcount +=1
vocount=1
voiceovertopic_list = ET.SubElement(slide, "VoiceOver")
for topic in new_dict:
topic_voiceover = ET.SubElement(voiceovertopic_list, f"VoiceOver_{vocount}")
topic_voiceover.text = topic
vocount +=1
# Iterate through topics and subtopics
for topic, details in new_dict.items():
# Add subtopics if they exist
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Name"
tpname_element = ET.SubElement(slide, "Topic_Name")
tpname_element.text = topic
if details["Subtopics"]:
subtopiccounter=1
subtopiccounter_1=1
for subtopic in details["Subtopics"]:
sp_element = ET.SubElement(slide, f"SubTopic_{subtopiccounter_1}")
sp_element.text = subtopic["Subtopic"]
subtopiccounter_1+=1
tpname_vo_element = ET.SubElement(slide, "VoiceOver")
for subtopic in details["Subtopics"]:
vo_tag = ET.SubElement(tpname_vo_element, f"VoiceOver_{subtopiccounter}")
vo_tag.text = subtopic["Subtopic"]
# slide_counter += 1
for subtopic in details["Subtopics"]:
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "SubTopic"
Subtopicelement = ET.SubElement(slide, "SubTopic")
Subtopicelement.text = subtopic["Subtopic"]
bullet_count = 1
bullets_slide = None
for i, bullet in enumerate(subtopic["Bullets"]):
if bullet_count % 4 == 0:
pass
bullet_elem = ET.SubElement(slide, f"Bullet_{bullet_count}")
bullet_elem.text = bullet
bullet_count += 1
vobullet_count = 1
bullets_VO_element = ET.SubElement(slide, "VoiceOver")
for i, bullet in enumerate(subtopic["VoiceOverBullets"]):
if vobullet_count % 4 == 0:
pass
bullet_voiceover_elem = ET.SubElement(bullets_VO_element, f"VoiceOver_{vobullet_count}")
bullet_voiceover_elem.text = bullet
vobullet_count += 1
#topic summary for subtopic slides
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Summary"
Topic_Name = ET.SubElement(slide, "Topic_Name")
Topic_Name.text= topic
Topic_Summary = ET.SubElement(slide, "Topic_Summary")
Topic_Summary.text= details["Topic_Summary"].strip()
topic_elem = ET.SubElement(slide, "VoiceOver")
topic_elem.text = details["VoiceOver"].strip()
else:
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Summary"
Topic_Name = ET.SubElement(slide, "Topic_Name")
Topic_Name.text= topic
Topic_Summary = ET.SubElement(slide, "Topic_Summary")
Topic_Summary.text= details["Topic_Summary"].strip()
topic_elem = ET.SubElement(slide, "VoiceOver")
topic_elem.text = details["VoiceOver"].strip()
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Congratulations"
messageel = ET.SubElement(slide, "Message1")
messageel.text = "Congratulations"
messageel2 = ET.SubElement(slide, "Message2")
messageel2.text = "Congratulations on successful completion of the course."
# Generate XML string
xml_string = ET.tostring(root, encoding="utf-8", method="xml").decode("utf-8")
# xml_string = xml_string.replace('<?xml version="1.0" ?>', '')
# st.write(xml_string)
return xml_string
# Example usage
# xml_output = generate_xml_structure(your_data_structure)
# print(xml_output)
# # Example usage
# xml_output = generate_xml_structure(st.session_state.new_dict)
# print(xml_output)
def process_pdf(uploaded_file):
loader = PDFReader()
with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(uploaded_file.getvalue())
documents = loader.load_data(file=Path(temp_file.name))
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=1900))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
if "index" not in st.session_state:
index = GPTVectorStoreIndex.from_documents(documents,service_context=service_context)
retriever = index.as_retriever(retriever_mode='embedding')
index = RetrieverQueryEngine(retriever)
st.session_state.index = index
# st.session_state.index = index
return st.session_state.index
###################### defining tabs ##########################################
# upload_col, refine_toc, extract_col, miss_col, edit_col,voice_col, xml_col, manage_col = st.tabs(["⚪ __Upload Chapter__","⚪ __Refine_TOC__", "⚪ __Extract_Contents__","⚪ __missing_Contents__", "⚪ __Edit Contents__", "⚪ Voice Over__", "⚪ __Export Generated XML__", "⚪ __Manage XMLs__"])
upload_col, refine_toc, extract_col, voice_col, xml_col = st.tabs(["⚪ __Upload Chapter__","⚪ __Refine_TOC__", "⚪ __Extract_Contents__", "⚪ __Voice Over__", "⚪ __Export Generated XML__"])
if "toc" not in st.session_state:
st.session_state.toc = {}
###################### Upload chapter column ##########################################
uploaded_file = upload_col.file_uploader("Upload a Chapter as a PDF file", type="pdf")
# toc_option = upload_col.radio("Choose a method to provide TOC", ("Generate TOC", "Copy Paste TOC"))
forma = """"{
"Topics": [
{
"n.n Topic ": [
"n.n.n Subtopic ",
"n.n.n Subtopic ",
]
}
]
}
"""
if uploaded_file is not None:
# clear_all_json_files()
# index =
if "index" not in st.session_state:
st.session_state.index = process_pdf(uploaded_file)
upload_col.success("Index created successfully")
clear_images_folder()
clear_pages_folder()
# read PDF file
with open(uploaded_file.name, "wb") as f:
f.write(uploaded_file.getbuffer())
# display PDF file
with fitz.open(uploaded_file.name) as doc:
for page in doc: # iterate through the pages
pix = page.get_pixmap() # render page to an image
pix.save("pages/page-%i.png" % page.number)
for page_index in range(len(doc)):
page = doc[page_index]
image_list = page.get_images(full=True)
for image_index, img in enumerate(page.get_images(), start=1):
xref = img[0]
base_image = doc.extract_image(xref)
image_bytes = base_image["image"]
image_ext = base_image["ext"]
image = Image.open(io.BytesIO(image_bytes))
image_filename = f"images/image_page{page_index}_{image_index}.{image_ext}"
image.save(image_filename)
# if toc_option == "Generate TOC":
# toc = upload_col.button("Genererate TOC")
# edirpeompt = upload_col.text_input("Input prompt ")
# try:
# if toc:
# toc_res = st.session_state.index.query(str(edirpeompt) + "\n. The output table of contents should be in the following format: " + str(forma))
# str_toc = str(toc_res)
# table_of_contents = json.loads(str_toc)
# if "table_of_contents" not in st.session_state:
# st.session_state.table_of_contents = table_of_contents
# upload_col.write(st.session_state.table_of_contents)
# upload_col.success("TOC loaded, Go to the next tab")
# except (KeyError, AttributeError) as e:
# print("Error generating TOC")
# print(f"Error: {type(e).__name__} - {e}")
# elif toc_option == "Copy Paste TOC":
try:
toc_input = upload_col.text_area("Paste your Table of contents:")
if upload_col.button("Save TOC"):
# try:
# table_of_contents = json.loads(toc_input)
toc_res = "Convert the following table of contents into a json string, use the JSON format given bellow:\n"+ "Table of contents:\n"+ toc_input.strip() + "\n JSON format:\n"+ str(forma) + ". Output should be a valid JSON string."
str_toc = call_openai(toc_res)
str_to = str(str_toc)
st.write(str_to)
table_of_contents = json.loads(str_to.strip())
# if "table_of_contents" not in st.session_state:
st.session_state.table_of_contents = table_of_contents
upload_col.write(st.session_state.table_of_contents)
upload_col.success("TOC loaded, Go to the next tab")
except json.JSONDecodeError as e:
str_toc = call_openai(toc_res)
table_of_contents = json.loads(str(str_toc))
st.session_state.table_of_contents = table_of_contents
upload_col.write(st.session_state.table_of_contents)
# upload_col.error("Invalid JSON format. Please check your input.")
upload_col.error(e)
###################### refining toc start ##########################################
try:
with refine_toc:
column1, column2 = st.columns(2, gap="large")
data = st.session_state.table_of_contents
topic_data = {list(t.keys())[0]: list(t.values())[0] for t in data["Topics"]}
if "topic_data" not in st.session_state:
st.session_state['topic_data'] = topic_data
column1.write("# Editor")
column1.write("### Topics:")
topic_name = column1.text_input("Enter New topic name:")
if column1.button("Save New Topic"):
if topic_name not in st.session_state['topic_data']:
st.session_state['topic_data'][topic_name] = []
update_json(topic_data)
topic_options = list(st.session_state['topic_data'].keys())
selected_topic = column1.selectbox("Select a Topic to edit Subtopics", topic_options)
delete_topic = column1.button("Remove Selected Topic")
if delete_topic:
if selected_topic in st.session_state['topic_data']:
del st.session_state['topic_data'][selected_topic]
update_json(st.session_state['topic_data'])
st.experimental_rerun()
subtopics = st.session_state['topic_data'][selected_topic]
column1.write("### Subtopics:")
subtopics_input = column1.multiselect("Remove Unwanted Subtopics", subtopics, default=subtopics)
if subtopics_input:
st.session_state['topic_data'][selected_topic] = subtopics_input
update_json(st.session_state['topic_data'])
add = column1.button("Create New Subtopic")
if "add" in st.session_state or add:
st.session_state['add'] = True
new_subtopic = column1.text_input("Enter New Subtopic name:")
if column1.button("Save New Subtopic"):
if new_subtopic not in st.session_state['topic_data'][selected_topic]:
st.session_state['topic_data'][selected_topic].append(new_subtopic)
add= None
st.session_state['add'] = False
st.experimental_rerun()
if column1.button("Save"):
try:
if "new_dict" not in st.session_state:
st.session_state.new_dict = {}
for topic in st.session_state.toc["Topics"]:
for key, value in topic.items():
# Add a description for the topic
st.session_state.new_dict[key] = {'content': '', 'Subtopics': []}
# Add descriptions for the values
for item in value:
st.session_state.new_dict[key]['Subtopics'].append({'content': '', 'Subtopic': item})
st.write(st.session_state.new_dict)
except (KeyError, AttributeError) as e:
print("Error Formating TOC "+str(e))
print(f"Error: {type(e).__name__} - {e}")
column2.write("# Table of Contents")
for topic, subtopics in st.session_state['topic_data'].items():
column2.markdown(f"**{topic}**")
for subtopic in subtopics:
column2.write(f"- {subtopic}")
except (KeyError, AttributeError) as e:
print("Error refining toc")
print(f"Error: {type(e).__name__} - {e}")
###################### extract content ##########################################
try:
pagecol, ecol = extract_col.columns([2,5],gap="large")
pages_files = [f for f in os.listdir("pages") if f.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))]
selected_page = pagecol.number_input("Change page number to compare:",step=1)
selected_image = f"page-{selected_page}.png"
# Display the selected image
if selected_image:
pagecol.image(os.path.join("pages", selected_image), use_column_width=True)
else:
pagecol.warning("No images found in the 'pages' folder.")
# course_name = ecol.text_input("Enter course name")
quer = ecol.button("Extract Contents")
# saved_extracts = [file for file in os.listdir('.') if file.endswith('.json')]
# course_names = list(set([item['course_name'] for item in data]))
# seca, secb = extract_col.columns(2)
if quer:
progress_bar = ecol.progress(0)
total_items = sum(len(subtopics_dict['Subtopics']) for _, subtopics_dict in st.session_state.new_dict.items()) + len(st.session_state.new_dict)
items_processed = 0
for topic, subtopics_dict in st.session_state.new_dict.items():
for subtopic_dict in subtopics_dict['Subtopics']:
subtopic_name = subtopic_dict['Subtopic']
subtopicres = st.session_state.index.query("extract all the information under the subtopic "+str(subtopic_name)+ ", in 4 paragraphs where each paragraph has minimum 40 words.")
subtopic_dict['content'] = subtopicres.response
items_processed += 1
progress_bar.progress(items_processed / total_items)
ecol.info(f"Extracted {subtopic_name}")
topicres = st.session_state.index.query("extract all the information belonging to following section into a paragraph "+str(topic))
subtopics_dict['content'] = topicres.response
items_processed += 1
progress_bar.progress(items_processed / total_items)
# st.session_state.new_dict = data['data']
for topic_key, topic_value in st.session_state.new_dict.items():
expander = ecol.expander(f"{topic_key}")
expander.write(topic_value["content"])
for subtopic in topic_value["Subtopics"]:
expander.markdown(f"**{subtopic['Subtopic']}**")
expander.write(subtopic["content"])
# save = ecol.button("Save project")
# if save:
# json_filename = f"{course_name}.json"
# with open(json_filename, 'w') as outfile:
# json.dump(st.session_state.new_dict, outfile)
except (KeyError, FileNotFoundError,AttributeError) as e:
# st.error(e)
print("Error Extracting Data")
print(f"Error: {type(e).__name__} - {e}")
###################### voice over ##########################################
# try:
edcol, excol = voice_col.columns([1,3])
# Course Description
course_description_limit = edcol.number_input("Course Description Word Count Limit", value=30, min_value=1)
# Course Description VoiceOver
course_description_voiceover_limit = edcol.number_input("Course Description VoiceOver Word Count Limit", value=50, min_value=1)
# Topic Summary
topic_summary_limit = edcol.number_input("Topic Summary Word Count Limit", value=30, min_value=1)
# Topic Summary VoiceOver
topic_summary_voiceover_limit = edcol.number_input("Topic Summary VoiceOver Word Count Limit", value=50, min_value=1)
# Number of Bullets per Slide
num_bullets_per_slide = edcol.number_input("Number of Bullets per Slide", value=4, min_value=1)
# Number of Words per Bullet
num_words_bullet = edcol.number_input("Number of Words per Bullet", value=10, min_value=1)
# Bullet VoiceOver
bullet_voiceover_limit = edcol.number_input("VoiceOver per Bullet Word Count Limit", value=20, min_value=1)
# Paraphrasing Percentage Range
# paraphrasing_range = edcol.slider("Paraphrasing % Range", min_value=25, max_value=35, value=(25, 35))
saved_courses = [file for file in os.listdir('.') if file.endswith('.json')]
# Create a select box for saved courses
selectcol,loadcol = excol.columns(2)
cn = excol.text_input("Enter a Course Name")
selected_course = selectcol.selectbox("Select a saved course", saved_courses)
loadcol.write("")
loadcol.write("")
if loadcol.button("Load Project"):
st.session_state.new_dict = load_saved_course(selected_course)
excol.success("Project loaded,, you can now continue with Generate XML")
voice_col.write(st.session_state.new_dict)
gencol, savecol = excol.columns(2)
ex = gencol.button("Generate Voice Over")
# voice_col.write(st.session_state.new_dict)
if ex:
for topic_key, topic_value in st.session_state.new_dict.items():
# Add "VoiceOver" key to the main topic
topic = st.session_state.new_dict[topic_key]
topic_content = topic['content']
topic_voiceover_prompt = f"generate a voice over for the following paragraph in {topic_summary_voiceover_limit} words: {topic_content}"
st.session_state.new_dict[topic_key]["VoiceOver"] = str(call_openai3(topic_voiceover_prompt))
topic_summary_prompt = f"generate a voice over for the following paragraph in {topic_summary_limit} words: {topic_content}"
st.session_state.new_dict[topic_key]["Topic_Summary"] = str(call_openai3(topic_summary_prompt))
# Check if the topic has subtopics
# if "Subtopics" in topic_value:
# Iterate through the subtopics
for subtopic in topic_value["Subtopics"]:
subtopic_content = subtopic['content']
subtopic_content
subtopic_bullet_prompt = f"Divide the following content :\n {subtopic_content.strip()} \n into {num_bullets_per_slide} unordered bullet points , where each bullet point should have exactly {num_words_bullet} words, The response should be a valid json list of strings."
bullets = call_openai3(subtopic_bullet_prompt)
# st.write(bullets)
bullets
listbul = ast.literal_eval(bullets.strip())
subtopic['Bullets'] = listbul
subtopic_voiceover_prompt = f"By dividing the following content :\n {subtopic_content.strip()} \n Generate {num_bullets_per_slide} voiceover bullet scripts ,where each voiceover bullet script should consist of exactly {bullet_voiceover_limit} words, The response should be a valid json list of strings."
BulletVoiceOver = call_openai3(subtopic_voiceover_prompt)
listvoice = ast.literal_eval(BulletVoiceOver.strip())
subtopic['VoiceOverBullets'] = listvoice
sv_voice = savecol.button("Save voiceover")
if sv_voice:
json_filename = f"{cn}.json"
with open(json_filename, 'w') as outfile:
json.dump(st.session_state.new_dict, outfile)
# excol.write(st.session_state.new_dict)
if excol.button("generate xml"):
lsttopics=[]
for topic in st.session_state.new_dict.keys():
lsttopics.append(topic)
course_descriptioninput= f"Generate a course description in exactly {course_description_limit} words for a course containing the following topics:\n"+str(lsttopics)
coursedesctip = call_openai3(course_descriptioninput)
course_descriptionvoin= f"Generate a voice over in exactly {course_description_voiceover_limit} words for a course description containing the following topics:\n"+str(lsttopics) +"\n Exclude objectives in the voice over"
coursedesctipvo = call_openai3(course_descriptionvoin)
# coursedesctipvo
# coursedesctip
# st.session_state.new_dict
edcol.write(st.session_state.new_dict)
xml_output = generate_xml_structure(st.session_state.new_dict,coursedesctip,coursedesctipvo,cn)
pretty_xml = minidom.parseString(xml_output).toprettyxml()
file_name = f"{cn}.xml"
b64_xml = base64.b64encode(xml_output.encode("utf-8")).decode("utf-8")
download_button = f'<a href="data:application/xml;base64,{b64_xml}" download="{file_name}">Download XML file</a>'
# Add the download button
excol.markdown(download_button, unsafe_allow_html=True)
excol.code(pretty_xml)
###################### export generated xml ##########################################
# try:
# # with
# ondu, naduvan, rendu = xml_col.columns([4,3,4],gap="large")
# ondu.write("### Select Images")
# ondu.write("")
# ondu.write("")
# left, right = ondu.columns(2)
# image_topic = left.selectbox("Select a topic", list(st.session_state.new_dict.keys()),label_visibility="collapsed")
# add_to_topic = right.button("Add Image to Topic")
# # Dropdown menu for selecting a subtopic based on the selected topic
# image_subtopic = left.selectbox("Select a subtopic", [subtopic["Subtopic"] for subtopic in st.session_state.new_dict[image_topic]["Subtopics"]],label_visibility="collapsed")
# add_to_subtopic = right.button("Add image to Subtopic")
# image_files = [f for f in os.listdir("images") if f.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))]
# selected_images = []
# # for image in image_files:
# expander = ondu.expander("Select images")
# n_pages = 20
# image_exts = ['.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif']
# page_index = ondu.number_input("Enter page number", min_value=1, max_value=n_pages, value=1)
# with ondu.expander(f"Page {page_index}", expanded=True):
# image_files = [f for f in os.listdir("images") if f.startswith(f'image_page{page_index}_') and f.endswith(tuple(image_exts))]
# # if image_files:
# for image_filename in image_files:
# file_path = os.path.join("images", image_filename)
# if os.path.isfile(file_path):
# ondu.image(file_path, caption=os.path.basename(file_path),width=150)
# else:
# st.warning(f"Image not found: {os.path.basename(file_path)}")
# # else:
# # st.warning("No images found for this page.")
# selected_image = image_filename
# if add_to_topic:
# if "img" not in st.session_state.new_dict[image_topic]:
# st.session_state.new_dict[image_topic]["img"] = []
# st.session_state.new_dict[image_topic]["img"].append(selected_image)
# ondu.success(f"Image {selected_image} added to topic {image_topic}")
# if add_to_subtopic:
# for subtopic in st.session_state.new_dict[image_topic]["Subtopics"]:
# if subtopic["Subtopic"] == image_subtopic:
# if "img" not in subtopic:
# subtopic["img"] = []
# subtopic["img"].append(selected_image)
# ondu.success(f"Image {selected_image} added to subtopic {image_subtopic}")
# break
# naduvan.write("### Compare ")
# pages_files = [f for f in os.listdir("pages") if f.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))]
# # if pages_files:
# selected_page = naduvan.number_input("Compare Images",step=1)
# selected_image = f"page-{selected_page}.png"
# # Display the selected image
# if selected_image:
# naduvan.image(os.path.join("pages", selected_image), use_column_width=True)
# else:
# naduvan.warning("No images found in the 'pages' folder.")
# rendu.write("### Configure ")
# # chapter_name = rendu.text_input("enter chapter name")
# # r1,r2 = rendu.columns(2)
# # NoOfBullets = r1.text_input("No. of Bullets per Sub Topic")
# # NoOfWordsPerBullet = r1.text_input("No. of words per Bullet")
# # NoOfWordsForVOPerBullet = r1.text_input("No. of words for Voice Over per Bullet")
# save_xml = rendu.button("Save XML")
# if save_xml:
# # if "edited" not in st.session_state:
# # st.session_state.edited = st.session_state.missing
# #xml_col.write(st.session_state.new_dict)
# xml_output = json_to_xml(st.session_state.new_dict, chapter_name, NoOfWordsForVOPerBullet, NoOfWordsPerBullet, NoOfBullets)
# pretty_xml = minidom.parseString(xml_output).toprettyxml()
# xml_file_path = os.path.join("images", f"{chapter_name}.xml")
# with open(xml_file_path, "w") as xml_file:
# xml_file.write(pretty_xml)
# # rendu.success(f"XML file saved as {xml_file_path}")
# with xml_col.expander("XML content"):
# xml_col.code(pretty_xml)
# # Zip the entire "images" folder with its contents
# def zipdir(path, ziph):
# for root, dirs, files in os.walk(path):
# for file in files:
# ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), path))
# zip_file_path = f"images/{chapter_name}.zip"
# with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
# zipdir("images", zipf)
# rendu.success(f"Zipped folder saved as {zip_file_path}")
# # st.session_state.table_of_contents = {}
# # st.session_state.selected_items = []
# # st.session_state.new_dict = {}
# # st.session_state.index = ""
# # st.session_state.new_dict = {}
# except (KeyError,NameError, AttributeError) as e:
# print("Error saving XML")
# print(f"Error: {type(e).__name__} - {e}")
# # ###################### Manage XML ##########################################
# # db = load_db()
# # chapter_list = list(db.keys())
# # if chapter_list:
# # filesinsidefolder = manage_col.selectbox("Select a zip file", [f for f in os.listdir("images") if f.endswith(('.zip'))])
# # if filesinsidefolder and filesinsidefolder.endswith('.zip'):
# # file_path = os.path.join("images", filesinsidefolder)
# # with open(file_path, "rb") as f:
# # file_bytes = f.read()
# # manage_col.download_button(
# # label="Download Zip File",
# # data=file_bytes,
# # file_name=filesinsidefolder,
# # mime="application/zip",
# # )
# # else:
# # manage_col.warning("No file selected.")
# # selected_chapter = manage_col.selectbox("Select a chapter first:", chapter_list)
# # delete_button = manage_col.button("Delete Chapter")
# # post_button= manage_col.button("Continue with CourseBOT 2")
# # if post_button:
# # url = "https://coursebot2.flipick.com/couresbuilderapi/api/Course/ImportCourse"
# # payload = json.dumps({
# # "ImportXML": str(db[selected_chapter])
# # })
# # headers = {
# # 'Content-Type': 'application/json'
# # }
# # response = requests.request("POST", url, headers=headers, data=payload)
# # print(response)
# # response_dict = json.loads(response.text)
# # url_to_launch = response_dict["result"]["urlToLaunch"]
# # manage_col.subheader("Click on the url bellow to continue.")
# # manage_col.write(url_to_launch)
# # if delete_button:
# # if delete_chapter(selected_chapter):
# # manage_col.success(f"Chapter {selected_chapter} deleted successfully.")
# # db = load_db()
# # chapter_list = list(db.keys())
# # if chapter_list:
# # selected_chapter = manage_col.selectbox("Select a chapter:", chapter_list)
# # manage_col.code(db[selected_chapter], language="xml")
# # else:
# # manage_col.warning("No chapters found. Upload a chapter and save its XML first.")
# # else:
# # manage_col.error(f"Failed to delete chapter {selected_chapter}.")
# # else:
# # manage_col.warning("No chapters found. Upload a chapter and save its XML first.") | [
"generate a voice over for the following paragraph in PLACEHOLDER words: PLACEHOLDER"
] |
2024-01-10 | SajithJude/dev_cb | pages~version2.py | import streamlit as st
from llama_index import GPTVectorStoreIndex, Document, SimpleDirectoryReader, QuestionAnswerPrompt, LLMPredictor, ServiceContext
import json
from langchain import OpenAI
from llama_index import download_loader
from tempfile import NamedTemporaryFile
import base64
import io
import fitz
from PIL import Image
import ast
import os
import glob
PDFReader = download_loader("PDFReader")
import os
import openai
import json
import xml.etree.ElementTree as ET
from xml.dom import minidom
from pathlib import Path
from llama_index import download_loader
from xml.etree.ElementTree import Element, SubElement, tostring
import requests
import zipfile
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from langchain import OpenAI
st.set_page_config(page_title=None, page_icon=None, layout="wide", initial_sidebar_state="collapsed")
openai.api_key = os.getenv("OPENAI_API_KEY")
st.title("CourseBot")
st.caption("AI-powered course creation made easy")
DATA_DIR = "data"
PDFReader = download_loader("PDFReader")
loader = PDFReader()
if not os.path.exists("images"):
os.makedirs("images")
# Create the "pages" folder if it doesn't exist
if not os.path.exists("pages"):
os.makedirs("pages")
def load_saved_course(course_file):
with open(course_file, 'r') as infile:
return json.load(infile)
def call_openai3(source):
response = openai.Completion.create(
model="text-davinci-003",
prompt=source,
temperature=0.1,
max_tokens=3500,
top_p=1,
frequency_penalty=0.3,
presence_penalty=0
)
return response.choices[0].text
def call_openai(source):
messages=[{"role": "user", "content": source}]
response = openai.ChatCompletion.create(
model="gpt-4-0314",
max_tokens=7000,
temperature=0.1,
messages = messages
)
return response.choices[0].message.content
def clear_all_json_files():
"""Clear all JSON files in all directories under the current working directory"""
root_directory = os.path.abspath(os.getcwd())
# Iterate over all files and directories under the root directory
for dirpath, dirnames, filenames in os.walk(root_directory):
# Iterate over all files in the current directory
for filename in filenames:
# Check if the file has a .json extension
if filename.endswith('.json'):
# Open the JSON file, clear its contents, and save the empty file
file_path = os.path.join(dirpath, filename)
with open(file_path, 'w') as json_file:
json.dump({}, json_file)
def clear_images_folder():
for file in os.listdir("images"):
if file.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
os.remove(os.path.join("images", file))
def clear_pages_folder():
for file in os.listdir("pages"):
if file.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
os.remove(os.path.join("pages", file))
def update_json(topic_data):
with open("output.json", "w") as f:
st.session_state.toc = {"Topics": [{k: v} for k, v in topic_data.items()]}
json.dump({"Topics": [{k: v} for k, v in topic_data.items()]}, f)
def load_db():
if not os.path.exists("db.json"):
with open("db.json", "w") as f:
json.dump({}, f)
with open("db.json", "r") as f:
db = json.load(f)
return db
def delete_chapter(chapter_name):
db = load_db()
if chapter_name in db:
del db[chapter_name]
with open("db.json", "w") as f:
json.dump(db, f)
return True
return False
def form_callback(value):
st.write(value)
res = st.session_state.index.query("extract all the information belonging to following section into a paragraph "+str(value))
st.write(res.response)
# def generate_xml_structure(new_dict,coursedesctip,coursedescriptionvoiceover,cn):
# root = ET.Element("Slides")
# # First slide with topic names
# slide = ET.SubElement(root, f"Slide1")
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Course_Name"
# crsnmelement = ET.SubElement(slide, "Course_Name")
# crsnmelement.text = cn.strip()
# cd = ET.SubElement(slide, "Course_Description")
# cd.text = coursedesctip.strip()
# cdvo = ET.SubElement(slide, "VoiceOver")
# cdvo1 = ET.SubElement(cdvo, "VoiceOver_1")
# cdvo1.text = coursedescriptionvoiceover.strip()
# slide_counter = 2
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# tpcount=1
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Topics"
# topic_list = ET.SubElement(slide, "Topics")
# for topic in new_dict:
# topic_name = ET.SubElement(topic_list, f"Topic_{tpcount}")
# topic_name.text = topic
# tpcount +=1
# vocount=1
# voiceovertopic_list = ET.SubElement(slide, "VoiceOver")
# for topic in new_dict:
# topic_voiceover = ET.SubElement(voiceovertopic_list, f"VoiceOver_{vocount}")
# topic_voiceover.text = topic
# vocount +=1
# slide_counter += 1
# # Iterate through topics and subtopics
# for topic, details in new_dict.items():
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# # slideName = ET.SubElement(slide, "Slide_Name")
# # slideName.text = "Topic_Name"
# # Add subtopics if they exist
# if details["Subtopics"]:
# sub_slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(sub_slide, "Slide_Name")
# slideName.text = "Topic_Name"
# Topic_Name = ET.SubElement(sub_slide, "Topic_Name")
# Topic_Name.text= topic
# subtopiccounter=1
# for subtopic in details["Subtopics"]:
# subtopic_elem = ET.SubElement(sub_slide, f"Subtopic_{subtopiccounter}")
# subtopic_elem.text = subtopic["Subtopic"]
# subtopiccounter +=1
# slide_counter += 1
# # Add bullets (4 per slide)
# for subtopic in details["Subtopics"]:
# sub_slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(sub_slide, "Slide_Name")
# slideName.text = "SubTopic"
# Subtopicelement = ET.SubElement(sub_slide, "SubTopic")
# # for subtopic in details["Subtopics"]:
# Subtopicelement.text = subtopic["Subtopic"]
# bullet_count = 1
# bullets_slide = None
# for i, bullet in enumerate(subtopic["Bullets"]):
# if bullet_count % 4 == 0:
# pass
# # bullets_slide = ET.SubElement(sub_slide, "BulletsSlide")
# bullet_elem = ET.SubElement(sub_slide, f"Bullet_{bullet_count}")
# bullet_elem.text = bullet
# bullet_count += 1
# vobullet_count = 1
# bullets_VO_element = ET.SubElement(sub_slide, "VoiceOver")
# for i, bullet in enumerate(subtopic["VoiceOverBullets"]):
# if vobullet_count % 4 == 0:
# pass
# bullet_voiceover_elem = ET.SubElement(bullets_VO_element, f"VoiceOver_{vobullet_count}")
# bullet_voiceover_elem.text = bullet
# vobullet_count += 1
# slide_counter += 1
# else:
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Topic_Summary"
# Topic_Name = ET.SubElement(slide, "Topic_Name")
# Topic_Name.text= topic
# Topic_Summary = ET.SubElement(slide, "Topic_Summary")
# Topic_Summary.text= details["Topic_Summary"].strip()
# topic_elem = ET.SubElement(slide, "VoiceOver")
# topic_elem.text = details["VoiceOver"].strip()
# slide_counter += 1
# slide = ET.SubElement(root, f"Slide{slide_counter}")
# slideName = ET.SubElement(slide, "Slide_Name")
# slideName.text = "Congratulations"
# messageel = ET.SubElement(slide, "Message1")
# messageel.text = "Congratulations"
# messageel2 = ET.SubElement(slide, "Message2")
# messageel2.text = "Congratulations on successful completion of the course."
# # Generate XML string
# xml_string = ET.tostring(root, encoding="utf-8", method="xml").decode("utf-8")
# return xml_string
# import xml.etree.ElementTree as ET
def generate_xml_structure(new_dict,coursedesctip,coursedescriptionvoiceover,cn):
root = ET.Element("Slides")
# First slide with topic names
slide = ET.SubElement(root, f"Slide1")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Course_Name"
crsnmelement = ET.SubElement(slide, "Course_Name")
crsnmelement.text = cn.strip()
cd = ET.SubElement(slide, "Course_Description")
cd.text = coursedesctip.strip()
cdvo = ET.SubElement(slide, "VoiceOver")
cdvo1 = ET.SubElement(cdvo, "VoiceOver_1")
cdvo1.text = coursedescriptionvoiceover.strip()
slide_counter = 2
slide = ET.SubElement(root, f"Slide{slide_counter}")
tpcount=1
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topics"
# topic_list = ET.SubElement(slide, "Topics")
for topic in new_dict:
topic_name = ET.SubElement(slide, f"Topic_{tpcount}")
topic_name.text = topic
tpcount +=1
vocount=1
voiceovertopic_list = ET.SubElement(slide, "VoiceOver")
for topic in new_dict:
topic_voiceover = ET.SubElement(voiceovertopic_list, f"VoiceOver_{vocount}")
topic_voiceover.text = topic
vocount +=1
# Iterate through topics and subtopics
for topic, details in new_dict.items():
# Add subtopics if they exist
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Name"
tpname_element = ET.SubElement(slide, "Topic_Name")
tpname_element.text = topic
if details["Subtopics"]:
subtopiccounter=1
subtopiccounter_1=1
for subtopic in details["Subtopics"]:
sp_element = ET.SubElement(slide, f"SubTopic_{subtopiccounter_1}")
sp_element.text = subtopic["Subtopic"]
subtopiccounter_1+=1
tpname_vo_element = ET.SubElement(slide, "VoiceOver")
for subtopic in details["Subtopics"]:
vo_tag = ET.SubElement(tpname_vo_element, f"VoiceOver_{subtopiccounter}")
vo_tag.text = subtopic["Subtopic"]
# slide_counter += 1
for subtopic in details["Subtopics"]:
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "SubTopic"
Subtopicelement = ET.SubElement(slide, "SubTopic")
Subtopicelement.text = subtopic["Subtopic"]
bullet_count = 1
bullets_slide = None
for i, bullet in enumerate(subtopic["Bullets"]):
if bullet_count % 4 == 0:
pass
bullet_elem = ET.SubElement(slide, f"Bullet_{bullet_count}")
bullet_elem.text = bullet
bullet_count += 1
vobullet_count = 1
bullets_VO_element = ET.SubElement(slide, "VoiceOver")
for i, bullet in enumerate(subtopic["VoiceOverBullets"]):
if vobullet_count % 4 == 0:
pass
bullet_voiceover_elem = ET.SubElement(bullets_VO_element, f"VoiceOver_{vobullet_count}")
bullet_voiceover_elem.text = bullet
vobullet_count += 1
#topic summary for subtopic slides
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Summary"
Topic_Name = ET.SubElement(slide, "Topic_Name")
Topic_Name.text= topic
Topic_Summary = ET.SubElement(slide, "Topic_Summary")
Topic_Summary.text= details["Topic_Summary"].strip()
topic_elem = ET.SubElement(slide, "VoiceOver")
topic_elem.text = details["VoiceOver"].strip()
else:
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Topic_Summary"
Topic_Name = ET.SubElement(slide, "Topic_Name")
Topic_Name.text= topic
Topic_Summary = ET.SubElement(slide, "Topic_Summary")
Topic_Summary.text= details["Topic_Summary"].strip()
topic_elem = ET.SubElement(slide, "VoiceOver")
topic_elem.text = details["VoiceOver"].strip()
slide_counter += 1
slide = ET.SubElement(root, f"Slide{slide_counter}")
slideName = ET.SubElement(slide, "Slide_Name")
slideName.text = "Congratulations"
messageel = ET.SubElement(slide, "Message1")
messageel.text = "Congratulations"
messageel2 = ET.SubElement(slide, "Message2")
messageel2.text = "Congratulations on successful completion of the course."
# Generate XML string
xml_string = ET.tostring(root, encoding="utf-8", method="xml").decode("utf-8")
# xml_string = xml_string.replace('<?xml version="1.0" ?>', '')
# st.write(xml_string)
return xml_string
# Example usage
# xml_output = generate_xml_structure(your_data_structure)
# print(xml_output)
# # Example usage
# xml_output = generate_xml_structure(st.session_state.new_dict)
# print(xml_output)
def process_pdf(uploaded_file):
loader = PDFReader()
with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(uploaded_file.getvalue())
documents = loader.load_data(file=Path(temp_file.name))
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=3900))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
if "index" not in st.session_state:
index = GPTVectorStoreIndex.from_documents(documents,service_context=service_context)
retriever = index.as_retriever(retriever_mode='embedding')
index = RetrieverQueryEngine(retriever)
st.session_state.index = index
# st.session_state.index = index
return st.session_state.index
###################### defining tabs ##########################################
# upload_col, refine_toc, extract_col, miss_col, edit_col,voice_col, xml_col, manage_col = st.tabs(["⚪ __Upload Chapter__","⚪ __Refine_TOC__", "⚪ __Extract_Contents__","⚪ __missing_Contents__", "⚪ __Edit Contents__", "⚪ Voice Over__", "⚪ __Export Generated XML__", "⚪ __Manage XMLs__"])
upload_col, refine_toc, extract_col, voice_col, xml_col = st.tabs(["⚪ __Upload Chapter__","⚪ __Refine_TOC__", "⚪ __Extract_Contents__", "⚪ __Voice Over__", "⚪ __Export Generated XML__"])
if "toc" not in st.session_state:
st.session_state.toc = {}
###################### Upload chapter column ##########################################
uploaded_file = upload_col.file_uploader("Upload a Chapter as a PDF file", type="pdf")
# toc_option = upload_col.radio("Choose a method to provide TOC", ("Generate TOC", "Copy Paste TOC"))
forma = """"{
"Topics": [
{
"n.n Topic ": [
"n.n.n Subtopic ",
"n.n.n Subtopic ",
]
}
]
}
"""
if uploaded_file is not None:
# clear_all_json_files()
# index =
if "index" not in st.session_state:
st.session_state.index = process_pdf(uploaded_file)
upload_col.success("Index created successfully")
clear_images_folder()
clear_pages_folder()
# read PDF file
with open(uploaded_file.name, "wb") as f:
f.write(uploaded_file.getbuffer())
# display PDF file
with fitz.open(uploaded_file.name) as doc:
for page in doc: # iterate through the pages
pix = page.get_pixmap() # render page to an image
pix.save("pages/page-%i.png" % page.number)
for page_index in range(len(doc)):
page = doc[page_index]
image_list = page.get_images(full=True)
for image_index, img in enumerate(page.get_images(), start=1):
xref = img[0]
base_image = doc.extract_image(xref)
image_bytes = base_image["image"]
image_ext = base_image["ext"]
image = Image.open(io.BytesIO(image_bytes))
image_filename = f"images/image_page{page_index}_{image_index}.{image_ext}"
image.save(image_filename)
pastecol, copycol = upload_col.columns(2,gap="medium")
copycol.write("AI Generated TOC for unstructured documents")
sampletoc = copycol.button("AI Generated Table")
if sampletoc:
sample_table = st.session_state.index.query("Generate a table of contents with only sections of topics and subtopics for this book")
copycol.write("Click on the top right corner to copy, and Paste it on the left, make edits of nessecary and Save")
copycol.code(sample_table.response)
# elif toc_option == "Copy Paste TOC":
try:
toc_input = pastecol.text_area("Copy the Table of contents from your book and paste them here")
if pastecol.button("Process and Save"):
# try:
# table_of_contents = json.loads(toc_input)
with st.spinner('Please wait, it might take a while to process the structure'):
toc_res = "Convert the following table of contents into a json string, use the JSON format given bellow:\n"+ "Table of contents:\n"+ toc_input.strip() + "\n JSON format:\n"+ str(forma) + ". Output should be a valid JSON string."
str_toc = call_openai(toc_res)
str_to = str(str_toc)
# st.write(str_to)
table_of_contents = json.loads(str_to.strip())
# if "table_of_contents" not in st.session_state:
st.session_state.table_of_contents = table_of_contents
pastecol.success("TOC loaded, Go to the next tab")
pastecol.write(st.session_state.table_of_contents)
except json.JSONDecodeError as e:
str_toc = call_openai(toc_res)
table_of_contents = json.loads(str(str_toc))
st.session_state.table_of_contents = table_of_contents
pastecol.write(st.session_state.table_of_contents)
# pastecol.error("Invalid JSON format. Please check your input.")
pastecol.error(e)
###################### extract content ##########################################
if "new_dict" not in st.session_state:
st.session_state.new_dict = {}
for topic in st.session_state.table_of_contents["Topics"]:
for key, value in topic.items():
# Add a description for the topic
st.session_state.new_dict[key] = {'content': '', 'Subtopics': []}
# Add descriptions for the values
for item in value:
st.session_state.new_dict[key]['Subtopics'].append({'content': '', 'Subtopic': item})
pagecol, ecol = extract_col.columns([2,5],gap="large")
for topic_key, topic_value in st.session_state.new_dict.items():
pagecol.write(f"###### {topic_key}")
pagecol.button("Extract Topic", key=f"{topic_key}",on_click=form_callback,args=(f"{topic_key}"))
# expande.write(topic_value["content"])
for subtopic in topic_value["Subtopics"]:
expande = pagecol.expander(f"{subtopic['Subtopic']}")
expande.button("Extract Subtopic", key=f"{subtopic['Subtopic']}",on_click=form_callback, args=(f"{subtopic['Subtopic']}") )
# expande.write(subtopic["content"])
quer = ecol.button("Extract Contents")
if quer:
progress_bar = ecol.progress(0)
total_items = sum(len(subtopics_dict['Subtopics']) for _, subtopics_dict in st.session_state.new_dict.items()) + len(st.session_state.new_dict)
items_processed = 0
for topic, subtopics_dict in st.session_state.new_dict.items():
for subtopic_dict in subtopics_dict['Subtopics']:
subtopic_name = subtopic_dict['Subtopic']
subtopicres = st.session_state.index.query("extract all the information under the subtopic "+str(subtopic_name)+ ", in 4 paragraphs where each paragraph has minimum 40 words.")
subtopic_dict['content'] = subtopicres.response
items_processed += 1
progress_bar.progress(items_processed / total_items)
ecol.info(f"Extracted {subtopic_name}")
topicres = st.session_state.index.query("extract all the information belonging to following section into a paragraph "+str(topic))
subtopics_dict['content'] = topicres.response
items_processed += 1
progress_bar.progress(items_processed / total_items)
# st.session_state.new_dict = data['data']
for topic_key, topic_value in st.session_state.new_dict.items():
expander = ecol.expander(f"{topic_key}")
expander.write(topic_value["content"])
for subtopic in topic_value["Subtopics"]:
expander.markdown(f"**{subtopic['Subtopic']}**")
expander.write(subtopic["content"])
###################### voice over ##########################################
# try:
edcol, excol = voice_col.columns([1,3])
# Course Description
course_description_limit = edcol.number_input("Course Description Word Count Limit", value=30, min_value=1)
# Course Description VoiceOver
course_description_voiceover_limit = edcol.number_input("Course Description VoiceOver Word Count Limit", value=50, min_value=1)
# Topic Summary
topic_summary_limit = edcol.number_input("Topic Summary Word Count Limit", value=30, min_value=1)
# Topic Summary VoiceOver
topic_summary_voiceover_limit = edcol.number_input("Topic Summary VoiceOver Word Count Limit", value=50, min_value=1)
# Number of Bullets per Slide
num_bullets_per_slide = edcol.number_input("Number of Bullets per Slide", value=4, min_value=1)
# Number of Words per Bullet
num_words_bullet = edcol.number_input("Number of Words per Bullet", value=10, min_value=1)
# Bullet VoiceOver
bullet_voiceover_limit = edcol.number_input("VoiceOver per Bullet Word Count Limit", value=20, min_value=1)
# Paraphrasing Percentage Range
# paraphrasing_range = edcol.slider("Paraphrasing % Range", min_value=25, max_value=35, value=(25, 35))
saved_courses = [file for file in os.listdir('.') if file.endswith('.json')]
# Create a select box for saved courses
selectcol,loadcol = excol.columns(2)
cn = excol.text_input("Enter a Course Name")
selected_course = selectcol.selectbox("Select a saved course", saved_courses)
loadcol.write("")
loadcol.write("")
if loadcol.button("Load Project"):
st.session_state.new_dict = load_saved_course(selected_course)
excol.success("Project loaded,, you can now continue with Generate XML")
voice_col.write(st.session_state.new_dict)
gencol, savecol = excol.columns(2)
ex = gencol.button("Generate Voice Over")
# voice_col.write(st.session_state.new_dict)
if ex:
for topic_key, topic_value in st.session_state.new_dict.items():
# Add "VoiceOver" key to the main topic
topic = st.session_state.new_dict[topic_key]
topic_content = topic['content']
topic_voiceover_prompt = f"generate a voice over for the following paragraph in {topic_summary_voiceover_limit} words: {topic_content}"
st.session_state.new_dict[topic_key]["VoiceOver"] = str(call_openai3(topic_voiceover_prompt))
topic_summary_prompt = f"generate a voice over for the following paragraph in {topic_summary_limit} words: {topic_content}"
st.session_state.new_dict[topic_key]["Topic_Summary"] = str(call_openai3(topic_summary_prompt))
# Check if the topic has subtopics
# if "Subtopics" in topic_value:
# Iterate through the subtopics
for subtopic in topic_value["Subtopics"]:
subtopic_content = subtopic['content']
subtopic_content
subtopic_bullet_prompt = f"Divide the following content :\n {subtopic_content.strip()} \n into {num_bullets_per_slide} unordered bullet points , where each bullet point should have exactly {num_words_bullet} words, The response should be a valid json list of strings."
bullets = call_openai3(subtopic_bullet_prompt)
# st.write(bullets)
bullets
listbul = ast.literal_eval(bullets.strip())
subtopic['Bullets'] = listbul
subtopic_voiceover_prompt = f"By dividing the following content :\n {subtopic_content.strip()} \n Generate {num_bullets_per_slide} voiceover bullet scripts ,where each voiceover bullet script should consist of exactly {bullet_voiceover_limit} words, The response should be a valid json list of strings."
BulletVoiceOver = call_openai3(subtopic_voiceover_prompt)
listvoice = ast.literal_eval(BulletVoiceOver.strip())
subtopic['VoiceOverBullets'] = listvoice
sv_voice = savecol.button("Save voiceover")
if sv_voice:
json_filename = f"{cn}.json"
with open(json_filename, 'w') as outfile:
json.dump(st.session_state.new_dict, outfile)
# excol.write(st.session_state.new_dict)
if excol.button("generate xml"):
lsttopics=[]
for topic in st.session_state.new_dict.keys():
lsttopics.append(topic)
course_descriptioninput= f"Generate a course description in exactly {course_description_limit} words for a course containing the following topics:\n"+str(lsttopics)
coursedesctip = call_openai3(course_descriptioninput)
course_descriptionvoin= f"Generate a voice over in exactly {course_description_voiceover_limit} words for a course description containing the following topics:\n"+str(lsttopics) +"\n Exclude objectives in the voice over"
coursedesctipvo = call_openai3(course_descriptionvoin)
# coursedesctipvo
# coursedesctip
# st.session_state.new_dict
edcol.write(st.session_state.new_dict)
xml_output = generate_xml_structure(st.session_state.new_dict,coursedesctip,coursedesctipvo,cn)
pretty_xml = minidom.parseString(xml_output).toprettyxml()
file_name = f"{cn}.xml"
b64_xml = base64.b64encode(xml_output.encode("utf-8")).decode("utf-8")
download_button = f'<a href="data:application/xml;base64,{b64_xml}" download="{file_name}">Download XML file</a>'
# Add the download button
excol.markdown(download_button, unsafe_allow_html=True)
excol.code(pretty_xml)
###################### export generated xml ##########################################
# try:
# # with
# ondu, naduvan, rendu = xml_col.columns([4,3,4],gap="large")
# ondu.write("### Select Images")
# ondu.write("")
# ondu.write("")
# left, right = ondu.columns(2)
# image_topic = left.selectbox("Select a topic", list(st.session_state.new_dict.keys()),label_visibility="collapsed")
# add_to_topic = right.button("Add Image to Topic")
# # Dropdown menu for selecting a subtopic based on the selected topic
# image_subtopic = left.selectbox("Select a subtopic", [subtopic["Subtopic"] for subtopic in st.session_state.new_dict[image_topic]["Subtopics"]],label_visibility="collapsed")
# add_to_subtopic = right.button("Add image to Subtopic")
# image_files = [f for f in os.listdir("images") if f.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))]
# selected_images = []
# # for image in image_files:
# expander = ondu.expander("Select images")
# n_pages = 20
# image_exts = ['.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif']
# page_index = ondu.number_input("Enter page number", min_value=1, max_value=n_pages, value=1)
# with ondu.expander(f"Page {page_index}", expanded=True):
# image_files = [f for f in os.listdir("images") if f.startswith(f'image_page{page_index}_') and f.endswith(tuple(image_exts))]
# # if image_files:
# for image_filename in image_files:
# file_path = os.path.join("images", image_filename)
# if os.path.isfile(file_path):
# ondu.image(file_path, caption=os.path.basename(file_path),width=150)
# else:
# st.warning(f"Image not found: {os.path.basename(file_path)}")
# # else:
# # st.warning("No images found for this page.")
# selected_image = image_filename
# if add_to_topic:
# if "img" not in st.session_state.new_dict[image_topic]:
# st.session_state.new_dict[image_topic]["img"] = []
# st.session_state.new_dict[image_topic]["img"].append(selected_image)
# ondu.success(f"Image {selected_image} added to topic {image_topic}")
# if add_to_subtopic:
# for subtopic in st.session_state.new_dict[image_topic]["Subtopics"]:
# if subtopic["Subtopic"] == image_subtopic:
# if "img" not in subtopic:
# subtopic["img"] = []
# subtopic["img"].append(selected_image)
# ondu.success(f"Image {selected_image} added to subtopic {image_subtopic}")
# break
# naduvan.write("### Compare ")
# pages_files = [f for f in os.listdir("pages") if f.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))]
# # if pages_files:
# selected_page = naduvan.number_input("Compare Images",step=1)
# selected_image = f"page-{selected_page}.png"
# # Display the selected image
# if selected_image:
# naduvan.image(os.path.join("pages", selected_image), use_column_width=True)
# else:
# naduvan.warning("No images found in the 'pages' folder.")
# rendu.write("### Configure ")
# # chapter_name = rendu.text_input("enter chapter name")
# # r1,r2 = rendu.columns(2)
# # NoOfBullets = r1.text_input("No. of Bullets per Sub Topic")
# # NoOfWordsPerBullet = r1.text_input("No. of words per Bullet")
# # NoOfWordsForVOPerBullet = r1.text_input("No. of words for Voice Over per Bullet")
# save_xml = rendu.button("Save XML")
# if save_xml:
# # if "edited" not in st.session_state:
# # st.session_state.edited = st.session_state.missing
# #xml_col.write(st.session_state.new_dict)
# xml_output = json_to_xml(st.session_state.new_dict, chapter_name, NoOfWordsForVOPerBullet, NoOfWordsPerBullet, NoOfBullets)
# pretty_xml = minidom.parseString(xml_output).toprettyxml()
# xml_file_path = os.path.join("images", f"{chapter_name}.xml")
# with open(xml_file_path, "w") as xml_file:
# xml_file.write(pretty_xml)
# # rendu.success(f"XML file saved as {xml_file_path}")
# with xml_col.expander("XML content"):
# xml_col.code(pretty_xml)
# # Zip the entire "images" folder with its contents
# def zipdir(path, ziph):
# for root, dirs, files in os.walk(path):
# for file in files:
# ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), path))
# zip_file_path = f"images/{chapter_name}.zip"
# with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
# zipdir("images", zipf)
# rendu.success(f"Zipped folder saved as {zip_file_path}")
# # st.session_state.table_of_contents = {}
# # st.session_state.selected_items = []
# # st.session_state.new_dict = {}
# # st.session_state.index = ""
# # st.session_state.new_dict = {}
# except (KeyError,NameError, AttributeError) as e:
# print("Error saving XML")
# print(f"Error: {type(e).__name__} - {e}")
# # ###################### Manage XML ##########################################
# # db = load_db()
# # chapter_list = list(db.keys())
# # if chapter_list:
# # filesinsidefolder = manage_col.selectbox("Select a zip file", [f for f in os.listdir("images") if f.endswith(('.zip'))])
# # if filesinsidefolder and filesinsidefolder.endswith('.zip'):
# # file_path = os.path.join("images", filesinsidefolder)
# # with open(file_path, "rb") as f:
# # file_bytes = f.read()
# # manage_col.download_button(
# # label="Download Zip File",
# # data=file_bytes,
# # file_name=filesinsidefolder,
# # mime="application/zip",
# # )
# # else:
# # manage_col.warning("No file selected.")
# # selected_chapter = manage_col.selectbox("Select a chapter first:", chapter_list)
# # delete_button = manage_col.button("Delete Chapter")
# # post_button= manage_col.button("Continue with CourseBOT 2")
# # if post_button:
# # url = "https://coursebot2.flipick.com/couresbuilderapi/api/Course/ImportCourse"
# # payload = json.dumps({
# # "ImportXML": str(db[selected_chapter])
# # })
# # headers = {
# # 'Content-Type': 'application/json'
# # }
# # response = requests.request("POST", url, headers=headers, data=payload)
# # print(response)
# # response_dict = json.loads(response.text)
# # url_to_launch = response_dict["result"]["urlToLaunch"]
# # manage_col.subheader("Click on the url bellow to continue.")
# # manage_col.write(url_to_launch)
# # if delete_button:
# # if delete_chapter(selected_chapter):
# # manage_col.success(f"Chapter {selected_chapter} deleted successfully.")
# # db = load_db()
# # chapter_list = list(db.keys())
# # if chapter_list:
# # selected_chapter = manage_col.selectbox("Select a chapter:", chapter_list)
# # manage_col.code(db[selected_chapter], language="xml")
# # else:
# # manage_col.warning("No chapters found. Upload a chapter and save its XML first.")
# # else:
# # manage_col.error(f"Failed to delete chapter {selected_chapter}.")
# # else:
# # manage_col.warning("No chapters found. Upload a chapter and save its XML first.") | [
"generate a voice over for the following paragraph in PLACEHOLDER words: PLACEHOLDER"
] |
2024-01-10 | closedai-project/closedai | src~closedai~client.py | import openai
openai.api_key = "N/A"
openai.api_base = "http://127.0.0.1:8000"
| [] |
2024-01-10 | BraveGroup/Drive-WM | src~diffusers~pipelines~alt_diffusion~pipeline_alt_diffusion.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer
from ...configuration_utils import FrozenDict
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from .modeling_roberta_series import RobertaSeriesModelWithTransformation
from .pipeline_output import AltDiffusionPipelineOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import AltDiffusionPipeline
>>> pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion-m9", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> # "dark elf princess, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko and hoang lap"
>>> prompt = "黑暗精灵公主,非常详细,幻想,非常详细,数字绘画,概念艺术,敏锐的焦点,插图"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker
class AltDiffusionPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
):
r"""
Pipeline for text-to-image generation using Alt Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.XLMRobertaTokenizer`]):
A `XLMRobertaTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
_exclude_from_cpu_offload = ["safety_checker"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: RobertaSeriesModelWithTransformation,
tokenizer: XLMRobertaTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder: CLIPVisionModelWithProjection = None,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
**kwargs,
):
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return prompt_embeds, negative_prompt_embeds
def encode_image(self, image, device, num_images_per_prompt):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Alt Diffusion v1, v2, and Alt Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hooks
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
if ip_adapter_image is not None:
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
0
]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| [
"negative_prompt_embeds",
"prompt_embeds"
] |
2024-01-10 | BraveGroup/Drive-WM | src~diffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl_img2img.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import PIL.Image
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionModelWithProjection,
)
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import (
FromSingleFileMixin,
IPAdapterMixin,
StableDiffusionXLLoraLoaderMixin,
TextualInversionLoaderMixin,
)
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
is_invisible_watermark_available,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .pipeline_output import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLImg2ImgPipeline
>>> from diffusers.utils import load_image
>>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
>>> init_image = load_image(url).convert("RGB")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt, image=init_image).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
def retrieve_latents(encoder_output, generator):
if hasattr(encoder_output, "latent_dist"):
return encoder_output.latent_dist.sample(generator)
elif hasattr(encoder_output, "latents"):
return encoder_output.latents
else:
raise AttributeError("Could not access latents of provided encoder_output")
class StableDiffusionXLImg2ImgPipeline(
DiffusionPipeline,
TextualInversionLoaderMixin,
FromSingleFileMixin,
StableDiffusionXLLoraLoaderMixin,
IPAdapterMixin,
):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.
"""
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
_optional_components = [
"tokenizer",
"tokenizer_2",
"text_encoder",
"text_encoder_2",
"image_encoder",
"feature_extractor",
]
_callback_tensor_inputs = [
"latents",
"prompt_embeds",
"negative_prompt_embeds",
"add_text_embeds",
"add_time_ids",
"negative_pooled_prompt_embeds",
"add_neg_time_ids",
]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
image_encoder: CLIPVisionModelWithProjection = None,
feature_extractor: CLIPImageProcessor = None,
requires_aesthetics_score: bool = False,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
image_encoder=image_encoder,
feature_extractor=feature_extractor,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if self.text_encoder is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
else:
scale_lora_layers(self.text_encoder_2, lora_scale)
prompt = [prompt] if isinstance(prompt, str) else prompt
if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
if clip_skip is None:
prompt_embeds = prompt_embeds.hidden_states[-2]
else:
# "2" because SDXL always indexes from the penultimate layer.
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
if self.text_encoder_2 is not None:
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
if self.text_encoder_2 is not None:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2, lora_scale)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
strength,
num_inference_steps,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if num_inference_steps is None:
raise ValueError("`num_inference_steps` cannot be None.")
elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
raise ValueError(
f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
f" {type(num_inference_steps)}."
)
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
# get the original timestep using init_timestep
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_start * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
# if the scheduler is a 2nd order scheduler we might have to do +1
# because `num_inference_steps` might be even given that every timestep
# (except the highest one) is duplicated. If `num_inference_steps` is even it would
# mean that we cut the timesteps in the middle of the denoising step
# (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
# we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
num_inference_steps = num_inference_steps + 1
# because t_n+1 >= t_n, we slice the timesteps starting from the end
timesteps = timesteps[-num_inference_steps:]
return timesteps, num_inference_steps
return timesteps, num_inference_steps - t_start
def prepare_latents(
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
# Offload text encoder if `enable_model_cpu_offload` was enabled
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.text_encoder_2.to("cpu")
torch.cuda.empty_cache()
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
# make sure the VAE is in float32 mode, as it overflows in float16
if self.vae.config.force_upcast:
image = image.float()
self.vae.to(dtype=torch.float32)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
elif isinstance(generator, list):
init_latents = [
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
if self.vae.config.force_upcast:
self.vae.to(dtype)
init_latents = init_latents.to(dtype)
init_latents = self.vae.config.scaling_factor * init_latents
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
# expand init_latents for batch_size
additional_image_per_prompt = batch_size // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
)
else:
init_latents = torch.cat([init_latents], dim=0)
if add_noise:
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
def encode_image(self, image, device, num_images_per_prompt):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
def _get_add_time_ids(
self,
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype,
text_encoder_projection_dim=None,
):
if self.config.requires_aesthetics_score:
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
add_neg_time_ids = list(
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
)
else:
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if (
expected_add_embed_dim > passed_add_embed_dim
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
)
elif (
expected_add_embed_dim < passed_add_embed_dim
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
)
elif expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
return add_time_ids, add_neg_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def denoising_end(self):
return self._denoising_end
@property
def denoising_start(self):
return self._denoising_start
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
strength: float = 0.3,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`):
The image(s) to modify with the pipeline.
strength (`float`, *optional*, defaults to 0.3):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of
`denoising_start` being declared as an integer, the value of `strength` will be ignored.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_start (`float`, *optional*):
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image
Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality).
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image
Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality).
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
aesthetic_score (`float`, *optional*, defaults to 6.0):
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. When returning a tuple, the first element is a list with the generated images.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
strength,
num_inference_steps,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._denoising_end = denoising_end
self._denoising_start = denoising_start
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
text_encoder_lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=self.do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=self.clip_skip,
)
# 4. Preprocess image
image = self.image_processor.preprocess(image)
# 5. Prepare timesteps
def denoising_value_valid(dnv):
return isinstance(self.denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps,
strength,
device,
denoising_start=self.denoising_start if denoising_value_valid else None,
)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
add_noise = True if self.denoising_start is None else False
# 6. Prepare latent variables
latents = self.prepare_latents(
image,
latent_timestep,
batch_size,
num_images_per_prompt,
prompt_embeds.dtype,
device,
generator,
add_noise,
)
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 8. Prepare added time ids & embeddings
if negative_original_size is None:
negative_original_size = original_size
if negative_target_size is None:
negative_target_size = target_size
add_text_embeds = pooled_prompt_embeds
if self.text_encoder_2 is None:
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
else:
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
if ip_adapter_image is not None:
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
image_embeds = image_embeds.to(device)
# 9. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 9.1 Apply denoising_end
if (
self.denoising_end is not None
and self.denoising_start is not None
and denoising_value_valid(self.denoising_end)
and denoising_value_valid(self.denoising_start)
and self.denoising_start >= self.denoising_end
):
raise ValueError(
f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {self.denoising_end} when using type float."
)
elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
# 9.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
if ip_adapter_image is not None:
added_cond_kwargs["image_embeds"] = image_embeds
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
negative_pooled_prompt_embeds = callback_outputs.pop(
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
)
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"prompt_embeds",
"negative_pooled_prompt_embeds",
"[PLACEHOLDER, PLACEHOLDER]",
"False",
"negative_prompt_embeds",
"[]"
] |
2024-01-10 | BraveGroup/Drive-WM | src~diffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionModelWithProjection,
)
from ...image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import (
FromSingleFileMixin,
IPAdapterMixin,
StableDiffusionXLLoraLoaderMixin,
TextualInversionLoaderMixin,
)
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
USE_PEFT_BACKEND,
deprecate,
is_invisible_watermark_available,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .pipeline_output import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLPipeline
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLPipeline(
DiffusionPipeline,
FromSingleFileMixin,
StableDiffusionXLLoraLoaderMixin,
TextualInversionLoaderMixin,
IPAdapterMixin,
):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
watermarker will be used.
"""
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
_optional_components = [
"tokenizer",
"tokenizer_2",
"text_encoder",
"text_encoder_2",
"image_encoder",
"feature_extractor",
]
_callback_tensor_inputs = [
"latents",
"prompt_embeds",
"negative_prompt_embeds",
"add_text_embeds",
"add_time_ids",
"negative_pooled_prompt_embeds",
"negative_add_time_ids",
]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
image_encoder: CLIPVisionModelWithProjection = None,
feature_extractor: CLIPImageProcessor = None,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
image_encoder=image_encoder,
feature_extractor=feature_extractor,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if self.text_encoder is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
else:
scale_lora_layers(self.text_encoder_2, lora_scale)
prompt = [prompt] if isinstance(prompt, str) else prompt
if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
if clip_skip is None:
prompt_embeds = prompt_embeds.hidden_states[-2]
else:
# "2" because SDXL always indexes from the penultimate layer.
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
if self.text_encoder_2 is not None:
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
if self.text_encoder_2 is not None:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
else:
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2, lora_scale)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
def encode_image(self, image, device, num_images_per_prompt):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def denoising_end(self):
return self._denoising_end
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image. This is set to 1024 by default for the best results.
Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
of a plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is a list with the generated images.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
# 0. Default height and width to unet
height = height or self.default_sample_size * self.vae_scale_factor
width = width or self.default_sample_size * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._denoising_end = denoising_end
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=self.do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
if self.text_encoder_2 is None:
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
else:
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
add_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
if negative_original_size is not None and negative_target_size is not None:
negative_add_time_ids = self._get_add_time_ids(
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
else:
negative_add_time_ids = add_time_ids
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
if ip_adapter_image is not None:
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
image_embeds = image_embeds.to(device)
# 8. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 8.1 Apply denoising_end
if (
self.denoising_end is not None
and isinstance(self.denoising_end, float)
and self.denoising_end > 0
and self.denoising_end < 1
):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
# 9. Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
if ip_adapter_image is not None:
added_cond_kwargs["image_embeds"] = image_embeds
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
negative_pooled_prompt_embeds = callback_outputs.pop(
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
)
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
image = latents
if not output_type == "latent":
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"prompt_embeds",
"negative_pooled_prompt_embeds",
"[PLACEHOLDER, PLACEHOLDER]",
"False",
"negative_prompt_embeds",
"[]"
] |
2024-01-10 | BraveGroup/Drive-WM | examples~community~lpw_stable_diffusion_xl.py | ## ----------------------------------------------------------
# A SDXL pipeline can take unlimited weighted prompt
#
# Author: Andrew Zhu
# Github: https://github.com/xhinker
# Medium: https://medium.com/@xhinker
## -----------------------------------------------------------
import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import DiffusionPipeline, StableDiffusionXLPipeline
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
is_invisible_watermark_available,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
if is_invisible_watermark_available():
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
def parse_prompt_attention(text):
"""
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
Accepted tokens are:
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\\( - literal character '('
\\[ - literal character '['
\\) - literal character ')'
\\] - literal character ']'
\\ - literal character '\'
anything else - just text
>>> parse_prompt_attention('normal text')
[['normal text', 1.0]]
>>> parse_prompt_attention('an (important) word')
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\\(literal\\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
[['a ', 1.0],
['house', 1.5730000000000004],
[' ', 1.1],
['on', 1.0],
[' a ', 1.1],
['hill', 0.55],
[', sun, ', 1.1],
['sky', 1.4641000000000006],
['.', 1.1]]
"""
import re
re_attention = re.compile(
r"""
\\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)|
\)|]|[^\\()\[\]:]+|:
""",
re.X,
)
re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
def multiply_range(start_position, multiplier):
for p in range(start_position, len(res)):
res[p][1] *= multiplier
for m in re_attention.finditer(text):
text = m.group(0)
weight = m.group(1)
if text.startswith("\\"):
res.append([text[1:], 1.0])
elif text == "(":
round_brackets.append(len(res))
elif text == "[":
square_brackets.append(len(res))
elif weight is not None and len(round_brackets) > 0:
multiply_range(round_brackets.pop(), float(weight))
elif text == ")" and len(round_brackets) > 0:
multiply_range(round_brackets.pop(), round_bracket_multiplier)
elif text == "]" and len(square_brackets) > 0:
multiply_range(square_brackets.pop(), square_bracket_multiplier)
else:
parts = re.split(re_break, text)
for i, part in enumerate(parts):
if i > 0:
res.append(["BREAK", -1])
res.append([part, 1.0])
for pos in round_brackets:
multiply_range(pos, round_bracket_multiplier)
for pos in square_brackets:
multiply_range(pos, square_bracket_multiplier)
if len(res) == 0:
res = [["", 1.0]]
# merge runs of identical weights
i = 0
while i + 1 < len(res):
if res[i][1] == res[i + 1][1]:
res[i][0] += res[i + 1][0]
res.pop(i + 1)
else:
i += 1
return res
def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str):
"""
Get prompt token ids and weights, this function works for both prompt and negative prompt
Args:
pipe (CLIPTokenizer)
A CLIPTokenizer
prompt (str)
A prompt string with weights
Returns:
text_tokens (list)
A list contains token ids
text_weight (list)
A list contains the correspodent weight of token ids
Example:
import torch
from transformers import CLIPTokenizer
clip_tokenizer = CLIPTokenizer.from_pretrained(
"stablediffusionapi/deliberate-v2"
, subfolder = "tokenizer"
, dtype = torch.float16
)
token_id_list, token_weight_list = get_prompts_tokens_with_weights(
clip_tokenizer = clip_tokenizer
,prompt = "a (red:1.5) cat"*70
)
"""
texts_and_weights = parse_prompt_attention(prompt)
text_tokens, text_weights = [], []
for word, weight in texts_and_weights:
# tokenize and discard the starting and the ending token
token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt
# the returned token is a 1d list: [320, 1125, 539, 320]
# merge the new tokens to the all tokens holder: text_tokens
text_tokens = [*text_tokens, *token]
# each token chunk will come with one weight, like ['red cat', 2.0]
# need to expand weight for each token.
chunk_weights = [weight] * len(token)
# append the weight back to the weight holder: text_weights
text_weights = [*text_weights, *chunk_weights]
return text_tokens, text_weights
def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False):
"""
Produce tokens and weights in groups and pad the missing tokens
Args:
token_ids (list)
The token ids from tokenizer
weights (list)
The weights list from function get_prompts_tokens_with_weights
pad_last_block (bool)
Control if fill the last token list to 75 tokens with eos
Returns:
new_token_ids (2d list)
new_weights (2d list)
Example:
token_groups,weight_groups = group_tokens_and_weights(
token_ids = token_id_list
, weights = token_weight_list
)
"""
bos, eos = 49406, 49407
# this will be a 2d list
new_token_ids = []
new_weights = []
while len(token_ids) >= 75:
# get the first 75 tokens
head_75_tokens = [token_ids.pop(0) for _ in range(75)]
head_75_weights = [weights.pop(0) for _ in range(75)]
# extract token ids and weights
temp_77_token_ids = [bos] + head_75_tokens + [eos]
temp_77_weights = [1.0] + head_75_weights + [1.0]
# add 77 token and weights chunk to the holder list
new_token_ids.append(temp_77_token_ids)
new_weights.append(temp_77_weights)
# padding the left
if len(token_ids) > 0:
padding_len = 75 - len(token_ids) if pad_last_block else 0
temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos]
new_token_ids.append(temp_77_token_ids)
temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0]
new_weights.append(temp_77_weights)
return new_token_ids, new_weights
def get_weighted_text_embeddings_sdxl(
pipe: StableDiffusionXLPipeline,
prompt: str = "",
prompt_2: str = None,
neg_prompt: str = "",
neg_prompt_2: str = None,
num_images_per_prompt: int = 1,
):
"""
This function can process long prompt with weights, no length limitation
for Stable Diffusion XL
Args:
pipe (StableDiffusionPipeline)
prompt (str)
prompt_2 (str)
neg_prompt (str)
neg_prompt_2 (str)
num_images_per_prompt (int)
Returns:
prompt_embeds (torch.Tensor)
neg_prompt_embeds (torch.Tensor)
"""
if prompt_2:
prompt = f"{prompt} {prompt_2}"
if neg_prompt_2:
neg_prompt = f"{neg_prompt} {neg_prompt_2}"
eos = pipe.tokenizer.eos_token_id
# tokenizer 1
prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt)
neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt)
# tokenizer 2
prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt)
neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt)
# padding the shorter one for prompt set 1
prompt_token_len = len(prompt_tokens)
neg_prompt_token_len = len(neg_prompt_tokens)
if prompt_token_len > neg_prompt_token_len:
# padding the neg_prompt with eos token
neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
else:
# padding the prompt
prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
# padding the shorter one for token set 2
prompt_token_len_2 = len(prompt_tokens_2)
neg_prompt_token_len_2 = len(neg_prompt_tokens_2)
if prompt_token_len_2 > neg_prompt_token_len_2:
# padding the neg_prompt with eos token
neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
else:
# padding the prompt
prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
embeds = []
neg_embeds = []
prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy())
neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights(
neg_prompt_tokens.copy(), neg_prompt_weights.copy()
)
prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights(
prompt_tokens_2.copy(), prompt_weights_2.copy()
)
neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights(
neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy()
)
# get prompt embeddings one by one is not working.
for i in range(len(prompt_token_groups)):
# get positive prompt embeddings with weights
token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=pipe.device)
weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=pipe.device)
token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device)
# use first text encoder
prompt_embeds_1 = pipe.text_encoder(token_tensor.to(pipe.device), output_hidden_states=True)
prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2]
# use second text encoder
prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(pipe.device), output_hidden_states=True)
prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2]
pooled_prompt_embeds = prompt_embeds_2[0]
prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states]
token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0)
for j in range(len(weight_tensor)):
if weight_tensor[j] != 1.0:
token_embedding[j] = (
token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j]
)
token_embedding = token_embedding.unsqueeze(0)
embeds.append(token_embedding)
# get negative prompt embeddings with weights
neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=pipe.device)
neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device)
neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=pipe.device)
# use first text encoder
neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(pipe.device), output_hidden_states=True)
neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2]
# use second text encoder
neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(pipe.device), output_hidden_states=True)
neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2]
negative_pooled_prompt_embeds = neg_prompt_embeds_2[0]
neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states]
neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0)
for z in range(len(neg_weight_tensor)):
if neg_weight_tensor[z] != 1.0:
neg_token_embedding[z] = (
neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z]
)
neg_token_embedding = neg_token_embedding.unsqueeze(0)
neg_embeds.append(neg_token_embedding)
prompt_embeds = torch.cat(embeds, dim=1)
negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
bs_embed * num_images_per_prompt, -1
)
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# -------------------------------------------------------------------------------------------------------------------------------
# reuse the backbone code from StableDiffusionXLPipeline
# -------------------------------------------------------------------------------------------------------------------------------
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0"
, torch_dtype = torch.float16
, use_safetensors = True
, variant = "fp16"
, custom_pipeline = "lpw_stable_diffusion_xl",
)
prompt = "a white cat running on the grass"*20
prompt2 = "play a football"*20
prompt = f"{prompt},{prompt2}"
neg_prompt = "blur, low quality"
pipe.to("cuda")
images = pipe(
prompt = prompt
, negative_prompt = neg_prompt
).images[0]
pipe.to("cpu")
torch.cuda.empty_cache()
images
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(dtype)
self.vae.decoder.conv_in.to(dtype)
self.vae.decoder.mid_block.to(dtype)
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: str = None,
prompt_2: Optional[str] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str`):
The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str`):
The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str`):
The prompt not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str`):
The prompt not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
of a plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
Examples:
Returns:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.default_sample_size * self.vae_scale_factor
width = width or self.default_sample_size * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
(cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None)
negative_prompt = negative_prompt if negative_prompt is not None else ""
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = get_weighted_text_embeddings_sdxl(
pipe=self, prompt=prompt, neg_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids = self._get_add_time_ids(
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
# 8. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 7.1 Apply denoising_end
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
# Overrride to properly handle the loading and unloading of the additional text encoder.
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
# We could have accessed the unet config from `lora_state_dict()` too. We pass
# it here explicitly to be able to tell that it's coming from an SDXL
# pipeline.
state_dict, network_alphas = self.lora_state_dict(
pretrained_model_name_or_path_or_dict,
unet_config=self.unet.config,
**kwargs,
)
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
if len(text_encoder_state_dict) > 0:
self.load_lora_into_text_encoder(
text_encoder_state_dict,
network_alphas=network_alphas,
text_encoder=self.text_encoder,
prefix="text_encoder",
lora_scale=self.lora_scale,
)
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
if len(text_encoder_2_state_dict) > 0:
self.load_lora_into_text_encoder(
text_encoder_2_state_dict,
network_alphas=network_alphas,
text_encoder=self.text_encoder_2,
prefix="text_encoder_2",
lora_scale=self.lora_scale,
)
@classmethod
def save_lora_weights(
self,
save_directory: Union[str, os.PathLike],
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
is_main_process: bool = True,
weight_name: str = None,
save_function: Callable = None,
safe_serialization: bool = False,
):
state_dict = {}
def pack_weights(layers, prefix):
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
return layers_state_dict
state_dict.update(pack_weights(unet_lora_layers, "unet"))
if text_encoder_lora_layers and text_encoder_2_lora_layers:
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
self.write_lora_layers(
state_dict=state_dict,
save_directory=save_directory,
is_main_process=is_main_process,
weight_name=weight_name,
save_function=save_function,
safe_serialization=safe_serialization,
)
def _remove_text_encoder_monkey_patch(self):
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
| [
"1",
"PLACEHOLDER PLACEHOLDER",
"[PLACEHOLDER, PLACEHOLDER]",
"False",
"[]"
] |
2024-01-10 | sw00d/PropConnect | src~backend~tests~integration~test_gpt.py | import openai
from commands.management.commands.generate_data import generate_vendors
from conversations.models import Vendor, Tenant, Conversation, Message
from conversations.utils import create_chat_completion_with_functions, create_chat_completion
from factories import CompanyFactory
from tests.utils import CkcAPITestCase
from unittest.mock import patch
class TestGPTFunctionAutoCalling(CkcAPITestCase):
def setUp(self):
# seed the db
company = CompanyFactory()
generate_vendors(company)
self.tenant = Tenant.objects.create(number="1") # Add necessary parameters
vendor = Vendor.objects.first() # Add necessary parameters
self.conversation = Conversation.objects.create(tenant=self.tenant, vendor=vendor, company=company)
def test_tenant_name_and_address_assign(self):
assert self.conversation.tenant.name is None
Message.objects.create(message_content='My name is Sam Wood.', role="user",
conversation=self.conversation)
self.conversation.refresh_from_db()
create_chat_completion_with_functions(self.conversation)
self.conversation.refresh_from_db()
Message.objects.create(message_content='My address is 2093 E. Greenleaf ave san diego CA, 92117', role="user",
conversation=self.conversation)
self.conversation.refresh_from_db()
create_chat_completion_with_functions(self.conversation)
self.conversation.refresh_from_db()
assert self.conversation.tenant.name == 'Sam Wood'
assert 'Greenleaf' in self.conversation.tenant.address
class TestGPTErrorCases(CkcAPITestCase):
@patch('openai.ChatCompletion.create')
def test_rate_limit_error_is_recursive(self, mock_create):
# Arrange
mock_create.side_effect = openai.error.RateLimitError('Rate limit error')
mock_create.side_effect = [openai.error.RateLimitError('Rate limit error'),
{'choices': [{'message': {'content': 'Test content'}}]}]
conversation = [] # add some test conversation messages here
# Act
res = create_chat_completion(conversation)
assert res == 'Test content'
#
# class TestVendorDetection(CkcAPITestCase):
# def setUp(self):
# # seed the db
# company = CompanyFactory()
# generate_vendors(company)
# tenant = Tenant.objects.create(number="1") # Add necessary parameters
# vendor = Vendor.objects.first() # Add necessary parameters
# self.conversation = Conversation.objects.create(tenant=tenant, vendor=vendor, company=company)
#
# def test_vendor_detection_returns_none(self):
# Message.objects.create(
# message_content='Hi there, my name is sam wood. 4861 conrad ave. I have a maintenance request.',
# role="user", conversation=self.conversation)
#
# self.conversation.refresh_from_db()
# response = get_vendor_from_conversation(self.conversation)
#
# assert response is None
#
# def test_vendor_detection_too_vague(self):
# Message.objects.create(message_content='Something broke in my house.', role="user",
# conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response is None
#
# def test_vendor_detection_plumber(self):
# Message.objects.create(message_content='My toilet is broken.', role="user", conversation=self.conversation)
# Message.objects.create(message_content='Its leaking everywhere.', role="user", conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='plumber')
#
# def test_vendor_detection_electrician(self):
# # Electrician
# Message.objects.create(message_content='My lights are flickering.', role="user", conversation=self.conversation)
# Message.objects.create(
# message_content='The light switch stays on and its a new bulb but it keeps flicking randomly', role="user",
# conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='electrician')
#
# def test_vendor_detection_handyman(self):
# # Handyman
# Message.objects.create(message_content='Theres air going under my door and I think it needs something under '
# 'there.', role="user", conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='handyman')
#
# def test_vendor_detection_landscaper(self):
# # Landscaper
# Message.objects.create(message_content='Can you send somebody to trim my bushes in the front yard?',
# role="user", conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='landscaper')
#
# def test_vendor_detection_appliance_specialist(self):
# # Appliance Specialist
# Message.objects.create(message_content='My fridge is not cooling properly.', role="user",
# conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='appliance specialist')
#
# def test_vendor_detection_hvac(self):
# # HVAC Professional
# Message.objects.create(message_content='My AC isnt working', role="user",
# conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='air-condition specialist')
#
# def test_vendor_detection_locksmith(self):
# # Locksmith
# Message.objects.create(message_content='I am locked out of my house.', role="user",
# conversation=self.conversation)
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='locksmith')
#
# def test_vendor_detection_flooring_specialist(self):
# # Flooring Specialist
# Message.objects.create(
# message_content='Theres a huge crack in the tile in my kitchen and dining room.',
# role="user",
# conversation=self.conversation
# )
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='flooring specialist')
#
# def test_vendor_detection_painter(self):
# # Painter
# Message.objects.create(
# message_content='I need to repaint my room.',
# role="user",
# conversation=self.conversation
# )
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='painter')
#
# def test_vendor_detection_drywall_specialist(self):
# # Drywall Specialist
# Message.objects.create(
# message_content='There is a hole in my wall cause I ran into it with a baseball bat. I need it patched up.',
# role="user",
# conversation=self.conversation
# )
#
# self.conversation.refresh_from_db()
#
# response = get_vendor_from_conversation(self.conversation)
# # Doing this None check here because sometimes gpt doesn't return it correctly
# assert response == Vendor.objects.get(vocation='drywall specialist') or response is None
#
# def test_complex_plumber_selection(self):
# # delete all vendors
# Vendor.objects.all().delete()
#
# Vendor.objects.create(name="Plumber Sam", vocation="plumber", company=self.conversation.company, active=True,
# has_opted_in=True)
#
# conversation_data = [
# {'role': 'user', 'content': 'Hello'},
# {'role': 'user', 'content': 'My toilet is messed up.'},
# {'role': 'user',
# 'content': 'Sam Wood, and the toilet is leaking everywhere cause somebody pooped too big in it'},
# {'role': 'user', 'content': "It's causing my bathroom to flood and there is crack in the toilet"},
# ]
#
# for i, data in enumerate(conversation_data):
# Message.objects.create(
# message_content=data['content'],
# role=data['role'],
# conversation=self.conversation
# )
#
# self.conversation.refresh_from_db()
# response = get_vendor_from_conversation(self.conversation)
# assert response == Vendor.objects.get(vocation='plumber')
| [
"Test content"
] |
2024-01-10 | sw00d/PropConnect | src~backend~apps~companies~serializers.py | # serializers.py
from openai.error import InvalidRequestError
from rest_framework import serializers
from settings.base import STRIPE_SECRET_KEY
from .models import Company
from djstripe.models import Customer, PaymentMethod, Subscription
import stripe
stripe.api_key = STRIPE_SECRET_KEY
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = ['billing_details']
class CompanyCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id',
'name',
'website',
'number_of_doors',
'zip_code',
)
def create(self, validated_data):
# pop payment_method_id if not necessary for company creation
validated_data.pop('payment_method_id', None)
company = super().create(validated_data)
# get user from request
user = self.context.get("request").user
# assign company to user
user.company = company
user.save()
# assign user to be company's point_of_contact
company.point_of_contact = user
company.save()
return company
class SubscriptionSerializer(serializers.ModelSerializer):
price = serializers.SerializerMethodField()
class Meta:
model = Subscription
fields = (
'id',
'status',
'price',
)
def get_price(self, obj):
if obj.plan:
return obj.plan.amount_decimal / 100
return None
class CompanyUpdateSerializer(serializers.ModelSerializer):
payment_method_id = serializers.CharField(write_only=True)
current_subscription = SubscriptionSerializer(read_only=True)
class Meta:
model = Company
fields = (
'id',
'current_subscription',
'name',
'number_of_doors',
'street_1',
'street_2',
'city',
'state',
'zip_code',
'country',
'assistant_phone_number',
'payment_method_id', # stripe payment method id
'assistant_phone_is_verified',
)
def update(self, instance, validated_data):
payment_method_id = validated_data.pop('payment_method_id', None)
# call super update
if validated_data:
instance = super().update(instance, validated_data)
if payment_method_id:
# TODO Test this
try:
if not instance.customer_stripe_id:
# If no customer exists fails, create Stripe customer
stripe_customer = stripe.Customer.create(
payment_method=payment_method_id,
email=instance.users.first().email,
name=instance.name,
invoice_settings={
'default_payment_method': payment_method_id,
},
)
# Update the customer_stripe_id field with the Stripe customer's id
instance.customer_stripe_id = stripe_customer.id
except stripe.error.StripeError as e:
# Handle Stripe errors here
raise serializers.ValidationError(f"Stripe error: {e}")
instance.save()
# TODO Ask somebody if I need to do this
# try:
# # Sync customer with dj-stripe
# customer, created = Customer.get_or_create(subscriber=instance.users.first())
# customer.api_retrieve()
#
# # Avoid an unnecessary API call by using the data from stripe_customer
# customer.default_payment_method = PaymentMethod.sync_from_stripe_data(
# // set the default payment method here
# )
# customer.save()
# except Exception as e: # Replace with a more specific exception if possible
# # Handle other errors here
# raise serializers.ValidationError(f"Error updating customer: {e}")
#
# # Save Stripe customer ID in Company model
# instance.save()
return instance
| [] |
2024-01-10 | sw00d/PropConnect | src~backend~tests~integration~test_full_conversation_flow.py | from datetime import timedelta
import stripe
from django.core import mail
from django.utils.timezone import now
from django.http import HttpRequest
from unittest.mock import patch, Mock, MagicMock, call
from openai import OpenAIError
from commands.management.commands.generate_data import generate_vendors
from conversations.models import Conversation, Vendor, PhoneNumber, Tenant, Message
from conversations.tasks import set_old_conversations_to_not_active, start_vendor_tenant_conversation
from conversations.utils import handle_assistant_conversation, create_chat_completion, create_chat_completion_with_functions
from tests.utils import CkcAPITestCase
from factories import CompanyFactory
class TestFullConversationFlow(CkcAPITestCase):
@patch.object(stripe.Subscription, 'retrieve')
def setUp(self, mock_retrieve):
# Mock subscription
mock_subscription = MagicMock()
mock_retrieve.return_value = mock_subscription
self.company = CompanyFactory.create()
generate_vendors(self.company)
@patch('conversations.utils.send_message')
@patch('conversations.tasks.Client')
@patch('conversations.tasks.purchase_phone_number_util')
def test_start_vendor_tenant_conversation(self, mock_purchase_phone_number, mock_client, mock_send_message):
# Arrange
tenant = Tenant.objects.create(number="1") # Add necessary parameters
vendor = Vendor.objects.first() # Add necessary parameters
conversation = Conversation.objects.create(
tenant=tenant,
vendor=vendor,
company=self.company,
tenant_intro_message="Hi tenant!",
vendor_intro_message="Hi vendor!"
)
# Create a mock Twilio client
mock_client_instance = mock_client.return_value
mock_client_instance.available_phone_numbers.return_value.local.list.return_value = [
Mock(phone_number='+0987654321')]
# Create an available phone number for the test
PhoneNumber.objects.create(number="+1234567890", most_recent_conversation=None, is_base_number=False)
# Use the existing phone number
start_vendor_tenant_conversation(conversation.id, vendor.id)
# Assert
conversation.refresh_from_db() # Fetch the latest state from the database
self.assertIsNotNone(conversation.vendor_id)
self.assertEqual(conversation.vendor_id, vendor.id)
phone_number = PhoneNumber.objects.get(most_recent_conversation=conversation)
self.assertIsNotNone(phone_number)
self.assertEqual(phone_number.number, '+1234567890') # Verify we used the existing number
mock_purchase_phone_number.assert_not_called() # We should not have needed to purchase a number
# NOW WITH PURCHASING A NUMBER
PhoneNumber.objects.all().delete() # Delete the existing phone number
conversation2 = Conversation.objects.create(
tenant=tenant,
vendor=vendor,
company=self.company,
tenant_intro_message="Hi tenant!",
vendor_intro_message="Hi vendor!"
)
# Use the existing phone number
start_vendor_tenant_conversation(conversation2.id, vendor.id)
# Assert
conversation2.refresh_from_db() # Fetch the latest state from the database
self.assertIsNotNone(conversation2.vendor_id)
self.assertEqual(conversation2.vendor_id, vendor.id)
phone_number = PhoneNumber.objects.get(most_recent_conversation=conversation2)
self.assertIsNotNone(phone_number)
self.assertNotEqual(phone_number.number, '+1234567890') # Verify we used a new number
assert PhoneNumber.objects.count() == 1
mock_purchase_phone_number.assert_called() # We should not have needed to purchase a number
@patch.object(stripe.Subscription, 'retrieve')
@patch('conversations.tasks.purchase_phone_number_util')
def test_handle_assistant_conversation_with_simple_situation(
self,
mock_purchase_phone_number_util,
mock_retrieve,
):
self.company.assistant_phone_number = '+0987654321'
test_company = self.company
test_company.save()
request = HttpRequest()
request.POST = {'Body': 'Hi the storm broke my window', 'From': '+12345678901', "To": '+0987654321'}
handle_assistant_conversation(request)
assert Conversation.objects.count() == 1
conversation = Conversation.objects.first()
messages = Message.objects.all()
assert conversation.company == test_company
response_from_gpt = messages.last().message_content
assert type(response_from_gpt) == str
assert Conversation.objects.count() == 1
assert conversation.messages.count() == 3
# Second/follow up message(s)
request.POST = {'Body': "Sam wood. 2323 greenleaf ave. It has a crack going across it but it's not shattered. maybe the hail got it idk",
'From': '+12345678901', "To": self.company.assistant_phone_number}
handle_assistant_conversation(request)
second_response = conversation.messages.last().message_content
assert 'if you feel you have provided enough information' in second_response
assert type(second_response) == str
assert conversation.messages.count() == 5
# Third/follow up message(s)
request.POST = {
'Body': "Nope.",
'From': '+12345678901', "To": self.company.assistant_phone_number}
handle_assistant_conversation(request)
third_response = conversation.messages.last().message_content
assert 'if you feel you have provided enough information' in third_response
assert type(third_response) == str
assert conversation.messages.count() == 7
# Fourth/follow up message(s)
request.POST = {
'Body': "Done.",
'From': '+12345678901', "To": self.company.assistant_phone_number}
handle_assistant_conversation(request)
fourth_response = conversation.messages.last().message_content
assert "You'll be receiving a text from our staff shortly!" in fourth_response
assert type(fourth_response) == str
assert conversation.messages.count() == 9
assert len(mail.outbox) == 1
def populate_conversation_details(conversation, attempts=0):
# Base cases
if attempts >= 10:
return
if conversation.tenant.name is not None and conversation.tenant.address is not None:
print('Assigned tenant name and address in {} attempts'.format(attempts))
return
# Call the chat completion function
create_chat_completion_with_functions(conversation)
# Recursive call
populate_conversation_details(conversation, attempts + 1)
# Usage
if conversation.tenant.name is None or conversation.tenant.address is None:
populate_conversation_details(conversation)
assert conversation.tenant.name is not None
assert conversation.tenant.address is not None
def test_set_old_conversations_to_not_active(self):
tenant = Tenant.objects.create(number="1") # Add necessary parameters
vendor = Vendor.objects.first() # Add necessary parameters
conversation1 = Conversation.objects.create(tenant=tenant, vendor=vendor)
conversation2 = Conversation.objects.create(tenant=tenant, vendor=vendor)
Message.objects.create(sender_number="123", role="user", message_content="Hello",
conversation=conversation1)
Message.objects.create(sender_number="123", role="user", message_content="Hello",
conversation=conversation2)
# Make one message old
old_message = conversation1.messages.first()
old_message.time_sent = now() - timedelta(days=4)
old_message.save()
set_old_conversations_to_not_active()
conversation1.refresh_from_db()
conversation2.refresh_from_db()
assert conversation1.is_active is False
assert conversation2.is_active is True
set_old_conversations_to_not_active(0)
assert conversation1.is_active is False
assert conversation2.is_active is True
@patch('openai.ChatCompletion.create')
def test_create_chat_completion_with_error(self, mock_create):
# Set up the mock object to raise an error
mock_create.side_effect = OpenAIError('OpenAI error')
# Call the function you are testing
conversation = [] # add some test conversation messages here
response = create_chat_completion(conversation)
# Check that the error handling code was run and the expected message is returned
assert response == "Sorry, we're having some issues over here. Please reach out directly to " \
"your property manager."
# Ensure the create method was called
mock_create.assert_called_once_with(model="gpt-3.5-turbo", messages=conversation)
| [] |
2024-01-10 | JulianBvW/TS-BC | LatentSpaceVPT.py | import sys
sys.path.append('openai_vpt')
import cv2
import torch
import pickle
import numpy as np
from gym3.types import DictType
from openai_vpt.lib.policy import MinecraftAgentPolicy
from distance_fns import DISTANCE_FUNCTIONS
from LatentSpaceMineCLIP import SLIDING_WINDOW_SIZE
AGENT_RESOLUTION = (128, 128)
CONTEXT = {'first': torch.tensor([[False]])}
class LatentSpaceVPT:
def __init__(self, distance_fn='euclidean', device='cuda'):
self.latents = [] # Python List while training, Numpy array while inference
self.distance_function = DISTANCE_FUNCTIONS[distance_fn]
self.device = device
@torch.no_grad()
def load(self, episode_actions, latents_folder='weights/ts_bc/latents_vpt/'):
for vid_id, _ in episode_actions.episode_starts:
_, name = vid_id.rsplit('/', 1)
vid_latents = np.load(latents_folder + name + '.npy', allow_pickle=True)
self.latents.append(vid_latents)
self.latents = torch.from_numpy(np.vstack(self.latents)).to(self.device)
print(f'Loaded VPT latent space with {len(self.latents)} latents')
return self
@torch.no_grad()
def load_OLD(self, latents_file='weights/ts_bc/latents_vpt.npy'): # TODO update to new format
self.latents = torch.from_numpy(np.load(latents_file, allow_pickle=True)).to(self.device)
print(f'Loaded VPT latent space with {len(self.latents)} latents')
return self
def save(self, latents_file='weights/ts_bc/latents_vpt'): # TODO remove?
latents = np.array(self.latents)
np.save(latents_file, latents)
@torch.no_grad()
def train_episode(self, vpt_model, frames, vid_id, save_dir='weights/ts_bc/latents_vpt/'):
episode_latents = []
model_state = vpt_model.initial_state(1)
resized_frames = np.empty((frames.shape[0], AGENT_RESOLUTION[1], AGENT_RESOLUTION[0], 3), dtype=np.uint8)
for ts in range(frames.shape[0]):
resized_frame = cv2.resize(frames[ts], AGENT_RESOLUTION)
resized_frames[ts] = resized_frame
frames = torch.tensor(resized_frames).to(self.device)
for ts in range(SLIDING_WINDOW_SIZE-1, len(frames)): # Start at Frame 15 because of MineCLIP needing 16-frame batches
frame = frames[ts].unsqueeze(0).unsqueeze(0) # Add 2 extra dimensions for vpt
(latent, _), model_state = vpt_model.net({'img': frame}, model_state, context=CONTEXT)
latent = latent[0][0].to('cpu').numpy().astype('float16')
episode_latents.append(latent)
del(frames)
np.save(save_dir + vid_id.rsplit('/', 1)[-1], np.array(episode_latents))
def get_distances(self, latent):
return self.distance_function(self.latents, latent)
def get_distance(self, idx, latent):
return self.distance_function(self.latents[idx], latent)
def get_nearest(self, latent): # TODO removed episode_starts
# TODO assert latents is numpy array
diffs = self.latents - latent
diffs = abs(diffs).sum(1) # Sum up along the single latents exponential difference to the current latent
nearest_idx = diffs.argmin()#.to('cpu').item() # TODO remove .to('cpu').item()
return nearest_idx
def load_vpt(model_file='weights/vpt/foundation-model-1x.model', weights_file='weights/vpt/foundation-model-1x.weights', device='cuda'):
agent_parameters = pickle.load(open(model_file, 'rb'))
policy_kwargs = agent_parameters['model']['args']['net']['args']
pi_head_kwargs = agent_parameters['model']['args']['pi_head_opts']
pi_head_kwargs['temperature'] = float(pi_head_kwargs['temperature'])
agent = MinecraftAgentPolicy(policy_kwargs=policy_kwargs, pi_head_kwargs=pi_head_kwargs, action_space=DictType())
agent.load_state_dict(torch.load(weights_file), strict=False)
agent.eval()
return agent.to(device)
| [] |
2024-01-10 | ayman-m/gaze | data~scripts~ada-embedding~all_commands.py | import pandas as pd
import openai
import os
from pathlib import Path
from dotenv import load_dotenv
from openai.embeddings_utils import get_embedding
import csv
env_path = Path('../..') / '.env'
if env_path.exists():
load_dotenv()
# Retrieve environment variables
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
df = pd.read_csv('data/output.csv')
# Prepare the CSV writer
csv_file = open('embeddings/openai-ada/all-command_embedding.csv', 'w', newline='')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(df.columns.tolist() + ['embedding'])
# Apply the get_embedding function to each description
for i, row in df.iterrows():
embedding = get_embedding(row['description'], engine='text-embedding-ada-002')
# Write the row with its new 'embedding' column to the CSV file
csv_writer.writerow(row.tolist() + [embedding])
if i % 100 == 0: # Print a message every 100 rows
print(f"Processed row: {i}")
csv_file.close()
| [] |
2024-01-10 | ayman-m/gaze | app~automate.py | import warnings
import json
import re
import pandas as pd
import requests
import ast
import numpy as np
from urllib3.exceptions import InsecureRequestWarning
from numba.core.errors import NumbaDeprecationWarning
from openai.embeddings_utils import get_embedding
from openai.embeddings_utils import cosine_similarity
import demisto_client
import demisto_client.demisto_api
from demisto_client.demisto_api.rest import ApiException
from app.helper import Decorator
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
warnings.filterwarnings('ignore', category=NumbaDeprecationWarning)
class LocalTextEmbedding:
"""
A class to represent a collection of text embeddings.
...
Attributes
----------
file_path : str
a string path for the csv file containing the text embeddings.
embedding_model : str
a string representing the name of the embedding model used.
df : pandas.DataFrame
a pandas DataFrame containing the loaded text embeddings.
Methods
-------
get_embedding_vectors(question)
Generates an embedding vector for the given question.
get_similarities(question_vector)
Calculates and adds a new 'similarities' column to the DataFrame with the cosine similarities of the question.
get_top_similar_rows(num_rows=2)
Returns the top similar rows from the DataFrame sorted by the 'similarities' column.
"""
def __init__(self, text_embedding_path, embedding_model="text-embedding-ada-002"):
"""
Constructs all the necessary attributes for the textEmbedding object.
Parameters
----------
text_embedding_path : str
path of the csv file containing the text embeddings.
embedding_model : str
name of the embedding model to use.
"""
self.file_path = text_embedding_path
self.embedding_model = embedding_model
self.df = pd.read_csv(text_embedding_path, usecols=['embedding', 'name'])
def get_embedding_vectors(self, question):
"""
Generates an embedding vector for the given question using the specified embedding model.
Parameters
----------
question : str
The question to generate the embedding vector for.
Returns
-------
numpy.ndarray
The embedding vector for the question.
"""
question_vector = get_embedding(question, engine=self.embedding_model)
return question_vector
def get_similarities(self, question_vector, num_rows=2):
"""
Calculates and adds a new 'similarities' column to the DataFrame with the cosine similarities of each text
in the DataFrame to the given question vector.
Parameters
----------
question_vector : numpy.ndarray
The embedding vector of the question to compare with.
"""
self.df["similarities"] = self.df['embedding'].apply(lambda x: cosine_similarity(np.array(ast.literal_eval(x)),
question_vector))
similar_rows = self.df.sort_values(by='similarities', ascending=False).head(num_rows)
return similar_rows
class PineConeTextEmbedding:
"""
A class to represent a collection of text embeddings.
...
Attributes
----------
file_path : str
a string path for the csv file containing the text embeddings.
embedding_model : str
a string representing the name of the embedding model used.
df : pandas.DataFrame
a pandas DataFrame containing the loaded text embeddings.
Methods
-------
get_embedding_vectors(question)
Generates an embedding vector for the given question.
get_similarities(question_vector)
Calculates and adds a new 'similarities' column to the DataFrame with the cosine similarities of the question.
get_top_similar_rows(num_rows=2)
Returns the top similar rows from the DataFrame sorted by the 'similarities' column.
"""
def __init__(self, embedding_index, embedding_model="text-embedding-ada-002"):
"""
Constructs all the necessary attributes for the textEmbedding object.
Parameters
----------
text_embedding_path : str
path of the csv file containing the text embeddings.
embedding_model : str
name of the embedding model to use.
"""
self.embedding_index = embedding_index
self.embedding_model = embedding_model
def get_embedding_vectors(self, question):
"""
Generates an embedding vector for the given question using the specified embedding model.
Parameters
----------
question : str
The question to generate the embedding vector for.
Returns
-------
numpy.ndarray
The embedding vector for the question.
"""
question_vector = get_embedding(question, engine=self.embedding_model)
return question_vector
def get_similarities(self, question_vector, top_k=2):
"""
Calculates and adds a new 'similarities' column to the DataFrame with the cosine similarities of each text
in the DataFrame to the given question vector.
Parameters
----------
question_vector : numpy.ndarray
The embedding vector of the question to compare with.
"""
similar_rows = self.embedding_index.query(vector=question_vector, top_k=top_k)
return similar_rows
class SOARClient:
ERROR_ENTRY_TYPE = 4
DEBUG_FILE_ENTRY_TYPE = 16
SECTIONS_HEADER_REGEX = re.compile(
r"^(Context Outputs|Human Readable section|Raw Response section)"
)
RAW_RESPONSE_HEADER = re.compile(r"^Raw Response section")
CONTEXT_HEADER = re.compile(r"Context Outputs:")
HUMAN_READABLE_HEADER = re.compile(r"Human Readable section")
FULL_LOG_REGEX = re.compile(r".*Full Integration Log")
def __init__(self, url, api_key, verify_ssl):
self.url = url
self.api_key = api_key
self.verify_ssl = verify_ssl
self.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
'Authorization': api_key
}
def _get_playground_id(self):
"""Retrieves Playground ID from the remote XSOAR instance."""
def playground_filter(page: int = 0):
return {"filter": {"type": [9], "page": page}}
client = demisto_client.configure(base_url=self.url, api_key=self.api_key, verify_ssl=self.verify_ssl)
answer = client.search_investigations(filter=playground_filter())
if answer.total == 0:
raise RuntimeError("No playgrounds were detected in the environment.")
elif answer.total == 1:
result = answer.data[0].id
else:
# if found more than one playground, try to filter to results against the current user
user_data, response, _ = client.generic_request(
path="/user",
method="GET",
content_type="application/json",
response_type=object,
)
if response != 200:
raise RuntimeError("Cannot find username")
username = user_data.get("username")
def filter_by_creating_user_id(playground):
return playground.creating_user_id == username
playgrounds = list(filter(filter_by_creating_user_id, answer.data))
for i in range(int((answer.total - 1) / len(answer.data))):
playgrounds.extend(
filter(
filter_by_creating_user_id,
client.search_investigations(
filter=playground_filter(i + 1)
).data,
)
)
if len(playgrounds) != 1:
raise RuntimeError(
f"There is more than one playground to the user. "
f"Number of playgrounds is: {len(playgrounds)}"
)
result = playgrounds[0].id
return result
def _run_query(self, playground_id: str, query):
"""Runs a query on XSOAR instance and prints the output.
Args:
playground_id: The investigation ID of the playground.
Returns:
list. A list of the log IDs if debug mode is on, otherwise an empty list.
"""
update_entry = {"investigationId": playground_id, "data": query}
client = demisto_client.configure(base_url=self.url, api_key=self.api_key, verify_ssl=self.verify_ssl)
answer = client.investigation_add_entries_sync(update_entry=update_entry)
if not answer:
print ("User command did not run, make sure it was written correctly.")
log_ids = []
for entry in answer:
# answer should have entries with `contents` - the readable output of the command
if entry.parent_content:
print("[yellow]### Command:[/yellow]")
if entry.contents:
print("[yellow]## Readable Output[/yellow]")
if entry.type == self.ERROR_ENTRY_TYPE:
print(f"[red]{entry.contents}[/red]\n")
else:
print(f"{entry.contents}\n")
# and entries with `file_id`s defined, that is the fileID of the debug log file
if entry.type == self.DEBUG_FILE_ENTRY_TYPE:
log_ids.append(entry.id)
return log_ids
@property
def up(self):
try:
requests.get(self.url + "/health", headers=self.headers, verify=self.verify_ssl)
except Exception as e:
print("Error Occurred. " + str(e.args))
return False
else:
return True
def create_incident(self, incident_type, incident_owner, incident_name, incident_severity, incident_detail):
data = {
"type": incident_type,
"name": incident_name,
"details": incident_detail,
"severity": incident_severity,
"owner": incident_owner,
"createInvestigation": True
}
try:
response_api = requests.post(self.url + "/incident", headers=self.headers, data=json.dumps(data),
verify=self.verify_ssl)
except Exception as e:
print("Error Occurred. " + str(e.args))
return str(e.args)
else:
return response_api.text
def search_incident(self, data):
try:
response_api = requests.post(self.url + "/incidents/search", headers=self.headers,
data=json.dumps(data), verify=self.verify_ssl)
except Exception as e:
print("Error Occurred. " + str(e.args))
return str(e.args)
else:
if response_api.status_code == 200:
return response_api.text
else:
return response_api.status_code
def execute_command(self, command: str, output_path: list, return_type: str = 'wr'):
"""
This function executes a specific command on the Demisto client and retrieves the result. It also allows to
specify the output path from where to fetch the execution results and return types.
Parameters
----------
command : str
The command to be executed on the Demisto client.
output_path : list
A list of output paths from where to fetch the command execution results. The function also deletes
the context entry of each output path before the execution.
return_type : str, optional
The return type of the command execution result. It can be 'both' (default), 'context' or 'wr'.
'both' - Both the output context and war room entries.
'context' - Output context only.
'wr' - War room entries only.
Returns
-------
If return_type is 'both', it returns a tuple of (output_context, war_room_entries).
If return_type is 'context', it returns a list of output_context.
If return_type is 'wr', it returns a list of war_room_entries.
Example
-------
>>> execute_command('!ip ip="8.8.8.8"', ["AutoFocus", "IPinfo"])
[{'IndicatorType': 'IP', 'IndicatorValue': '8.8.8.8', 'ASN': 'AS15169', 'Country': 'US', ...}]
"""
client = demisto_client.configure(base_url=self.url, api_key=self.api_key, verify_ssl=self.verify_ssl)
playground_id = self._get_playground_id()
wr_entries = []
output_context = []
if output_path == ['-'] or "WarRoomOutput" in output_path:
output_path = []
try:
if output_path:
for output in output_path:
delete_context_entry = demisto_client.demisto_api.UpdateEntry(data=f"!DeleteContext key={output}",
investigation_id=playground_id)
client.investigation_add_entries_sync(update_entry=delete_context_entry)
update_entry = demisto_client.demisto_api.UpdateEntry(data=command,
investigation_id=playground_id)
wr_entries = client.investigation_add_entries_sync(update_entry=update_entry)
except ApiException as e:
print("Exception when calling DefaultApi->investigation_add_entries_sync: %s\n" % e)
for output in output_path:
context_query = {"query": "${"+output+"}"}
context = client.generic_request(
f"investigation/{playground_id}/context", "POST", context_query
)[0]
output_context.append(context)
if return_type == 'both':
return output_context, wr_entries
elif return_type == 'context':
return output_context
else:
return wr_entries
def enrich_indicator(self, indicator: dict, return_type: str):
"""
This function is used to enrich the input indicator (i.e., domain, IP, URL, File, CVE) using
a specific command that retrieves additional information about the indicator from a predefined
data source like AutoFocus or IPinfo.
Parameters
----------
indicator : dict
A dictionary that represents the indicator. The key should be one of the following types:
'Domain', 'IP', 'URL', 'File', 'CVE'. The value is a list of indicators to be enriched.
return_type : str
The return type of the command execution result. It can be 'entry' (default), 'contents' or 'both'.
'entry' - Entry context only (default).
'contents' - Entry contents (raw response) only.
'both' - Both entry context and entry contents.
Returns
-------
results : list
A list of enriched indicators represented as dictionaries. Each enriched indicator includes
the original data from the input indicator plus the additional information retrieved by the
executed command. If the indicator type doesn't match any predefined type ('Domain', 'IP',
'URL', 'File', 'CVE'), no enrichment will be made and the function will return an empty list.
Example
-------
>>> enrich_indicator({'IP': ['8.8.8.8']}, 'entry')
[{'IndicatorType': 'IP', 'IndicatorValue': '8.8.8.8', 'ASN': 'AS15169', 'Country': 'US', ...}]
"""
results = []
if indicator.get('Domain'):
result = {}
for domain in indicator.get('Domain'):
enriched_entity = self.execute_command(command=f'!domain domain="{domain}"', output_path=["AutoFocus"],
return_type=return_type)
for dictionary in enriched_entity:
result.update(ast.literal_eval(dictionary)['Domain'])
results.append(Decorator.clean_dict(result))
if indicator.get('IP'):
result = {}
for ip in indicator.get('IP'):
enriched_entity = self.execute_command(command=f'!ip ip="{ip}"', output_path=["AutoFocus", "IPinfo"],
return_type=return_type)
for dictionary in enriched_entity:
result.update(ast.literal_eval(dictionary)['IP'])
results.append(Decorator.clean_dict(result))
if indicator.get('URL'):
result = {}
for url in indicator.get('URL'):
enriched_entity = self.execute_command(command=f'!url url="{url}"', output_path=["AutoFocus"],
return_type=return_type)
for dictionary in enriched_entity:
result.update(ast.literal_eval(dictionary)['URL'])
results.append(Decorator.clean_dict(result))
if indicator.get('File'):
for file in indicator.get('File'):
result = {}
enriched_entity = self.execute_command(command=f'!file file="{file}"', output_path=["File"],
return_type=return_type)
for dictionary in enriched_entity:
result.update(ast.literal_eval(dictionary)['File'])
results.append(Decorator.clean_dict(result))
if indicator.get('CVE'):
for cve in indicator.get('CVE'):
result = {}
enriched_entity = self.execute_command(command=f'!cve cve_id="{cve}"', output_path=["CVE"],
return_type=return_type)
for dictionary in enriched_entity:
result.update(ast.literal_eval(dictionary)['CVE'])
results.append(Decorator.clean_dict(result))
return results
| [] |
2024-01-10 | ayman-m/gaze | data~scripts~predictor_intent.py | import torch
import pickle
import ast
import os
import pandas as pd
import numpy as np
import openai
from pathlib import Path
from dotenv import load_dotenv
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification, Trainer
from openai.embeddings_utils import get_embedding
from openai.embeddings_utils import cosine_similarity
# Load environment variables from .env file if it exists
env_path = Path('.') / '.env'
if env_path.exists():
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
# Get user inputs
model_choice = input("Choose a model (BERT or ADA): ")
prompt = input("Enter a prompt to test: ")
# Check if the chosen model is valid
if model_choice not in ["BERT", "ADA"]:
print("Invalid model choice. Please choose a valid model.")
exit()
if model_choice == "BERT":
# Load the trained model
model = DistilBertForSequenceClassification.from_pretrained("models/bert")
model.eval()
# Load the tokenizer
tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
# Load the label encoder
with open('models/bert/label_encoder.pkl', 'rb') as file:
label_encoder = pickle.load(file)
# Prepare the text data into the correct format for the model
inputs = tokenizer(prompt, truncation=True, padding=True, max_length=512, return_tensors="pt")
# Perform inference
with torch.no_grad():
logits = model(**inputs).logits
# Get the predicted class indices and their corresponding probabilities
probabilities = torch.softmax(logits, dim=-1)
predicted_class_indices = torch.argsort(probabilities, descending=True)[0]
predicted_class_probabilities = probabilities[0, predicted_class_indices]
# Convert the predicted class indices back into their corresponding labels
predicted_labels = label_encoder.inverse_transform(predicted_class_indices)
# Print the predicted labels and their probabilities
for label, probability in zip(predicted_labels, predicted_class_probabilities):
print("Intent Name:", label)
print("Probability:", probability.item())
print()
if model_choice == "ADA":
df = pd.read_csv("data/processed/embedding/intents/ada-basic-intent-embedding.csv", usecols=['embedding', 'name'])
question_vector = get_embedding(prompt, engine="text-embedding-ada-002")
print (question_vector)
df["similarities"] = df['embedding'].apply(lambda x: cosine_similarity(np.array(ast.literal_eval(x)),
question_vector))
similar_rows = df.sort_values(by='similarities', ascending=False).head(3)
# Print the similar intent names and their similarities
for index, row in similar_rows.iterrows():
print("Intent Name:", row['name'])
print("Similarity:", row['similarities'])
print()
| [
"Enter a prompt to test: "
] |
2024-01-10 | ayman-m/gaze | data~scripts~predictor_command.py | import ast
import pinecone
import os
import pandas as pd
import numpy as np
import openai
import json
import time
from pathlib import Path
from dotenv import load_dotenv
from openai.embeddings_utils import get_embedding
from openai.embeddings_utils import cosine_similarity
# Load environment variables from .env file if it exists
env_path = Path('.') / '.env'
if env_path.exists():
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
PINECONE_KEY = os.environ.get("PINECONE_KEY")
PINECONE_ENV = os.environ.get("PINECONE_ENV")
def parse_embedding(embedding_str):
return np.array(json.loads(embedding_str))
# Get user inputs
embedding_location = input("Choose embedding location (Local or PineCone): ")
model_choice = input("Choose a command list (All or Enabled): ")
prompt = input("Enter a prompt to test: ")
df = None
# Check if the chosen model is valid
if model_choice not in ["All", "Enabled"]:
print("Invalid list choice. Please choose a valid command list.")
exit()
# Check if the chosen model is valid
if embedding_location not in ["Local", "PineCone"]:
print("Invalid location choice. Please choose a valid location.")
exit()
if embedding_location == "Local":
start_time = time.time()
if model_choice == "All":
df = pd.read_csv("data/processed/embedding/commands/ada-all-command-embedding.csv", usecols=['embedding', 'name'])
if model_choice == "Enabled":
df = pd.read_csv("data/processed/embedding/commands/ada-enabled-command-embedding.csv", usecols=['embedding', 'name'])
question_vector = get_embedding(prompt, engine="text-embedding-ada-002")
df["similarities"] = df['embedding'].apply(lambda x: cosine_similarity(np.array(ast.literal_eval(x)),
question_vector))
similar_rows = df.sort_values(by='similarities', ascending=False).head(3)
for index, row in similar_rows.iterrows():
print("Command Name:", row['name'])
print("Similarity:", row['similarities'])
print()
print("Time taken: {} seconds".format(time.time() - start_time))
elif embedding_location == "PineCone":
start_time = time.time()
pinecone.init(api_key=PINECONE_KEY, environment=PINECONE_ENV)
command_index = pinecone.Index(index_name='enabled-commands-index')
# Query an example vector
question_vector = get_embedding(prompt, engine="text-embedding-ada-002")
similar_rows = command_index.query(vector=question_vector, top_k=5)
print (similar_rows)
print("Time taken: {} seconds".format(time.time() - start_time))
# Print the similar intent names and their similarities
| [
"Enter a prompt to test: "
] |
2024-01-10 | ayman-m/gaze | data~scripts~ada-embedding~intents.py | import pandas as pd
import openai
import os
from pathlib import Path
from dotenv import load_dotenv
from openai.embeddings_utils import get_embedding
import csv
env_path = Path('..') / '.env'
if env_path.exists():
load_dotenv()
# Retrieve environment variables
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
csv_file = open('data/embeddings/intents/ada-basic-intent-embedding.csv', 'w', newline='')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['name', 'embedding'])
intent_folder = 'data/source/intents/basic' # Path to the folder containing intent files
# Loop through the intent files
for file_name in os.listdir(intent_folder):
file_path = os.path.join(intent_folder, file_name)
intent_name = os.path.splitext(file_name)[0] # Use file name without extension as intent name
# Read the intent file
with open(file_path, 'r') as file:
samples = file.read().splitlines()
# Apply the get_embedding function to each sample and write to CSV
for sample in samples:
embedding = get_embedding(sample, engine='text-embedding-ada-002')
csv_writer.writerow([intent_name, embedding])
# Close the CSV file
csv_file.close()
| [] |
2024-01-10 | Moshiii/resumelab_stremlit | pages~1_Refine_Resume_with_JD.py | import streamlit as st
import PyPDF2
import os
import io
import time
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain
import fitz
test = False
def read_resume(text_content):
prompt_template = '''plrease reformat the following text to a resume in the following format:
resume_text:
{text_content}
Desired format:
Summary:
personal summary
Skills:
list of skill limited to 10
Experience:
company, role
details
company, role
details
...
Projects:
project name (skill list)
details
project name (skill list)
details
...
Eduation:
university name and major | start time - end time
university name and major | start time - end time
...
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
result = llm_chain.predict(text_content=st.session_state['page_jd_text_content'])
return result
def read_jd(text_content):
prompt_template = '''plrease reformat the following text to a Job description in the following format:
resume_text:
{text_content}
Desired format:
Job Position:
Position name
Education qualification:
Degree and major
Experience requirement:
Experience and number of years
Programming Language:
list of Programming Languages
Hard skill:
list of Hard skill
Soft skill:
list of Soft skill
Job respobsiability:
summerized bullet points of responsiability
Company Value:
summerized company value and vision paragraph
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
result = llm_chain.predict(text_content=st.session_state['page_jd_jd_text_area'])
return result
def generate_refined_resume():
prompt_template = '''please polish the following resume based on the Job description.
please generate as close as possiable
Job description:
{JD}
resume_text:
{resume}
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
refined_resume = llm_chain.predict(
JD=st.session_state['page_jd_JD'], resume=st.session_state['page_jd_resume'])
return refined_resume
if 'page_jd_if_upload_clicked' not in st.session_state:
st.session_state['page_jd_if_upload_clicked'] = False
if 'page_jd_if_resume_uploaded' not in st.session_state:
st.session_state['page_jd_if_resume_uploaded'] = False
if 'page_jd_if_validate_clicked' not in st.session_state:
st.session_state['page_jd_if_validate_clicked'] = False
if 'page_jd_if_generate_clicked' not in st.session_state:
st.session_state['page_jd_if_generate_clicked'] = False
if 'page_jd_resume' not in st.session_state:
st.session_state['page_jd_resume'] = ""
if 'page_jd_JD' not in st.session_state:
st.session_state['page_jd_JD'] = ""
if 'page_jd_jd_text_area' not in st.session_state:
st.session_state['page_jd_jd_text_area'] = ""
if 'page_jd_generated' not in st.session_state:
st.session_state['page_jd_generated'] = False
if 'page_jd_refined_resume' not in st.session_state:
st.session_state['page_jd_refined_resume'] = ""
if 'page_jd_text_content' not in st.session_state:
st.session_state['page_jd_text_content'] = ""
st.markdown("Step 1. Provide your OpenAI API Key")
st.markdown("Step 2. Upload your Resume and Job Description")
st.markdown("Step 3. Click 'Read Resume and JD.' AI will parse your files to text, and you may edit it before moving to the next step.")
st.markdown("Step 5. Click 'Optimize based on JD.' AI will polish your resume based on the JD.")
st.markdown("Step 6. Click 'Download Resume.' to save your result")
API_O = st.text_input('OPENAI_API_KEY', st.session_state['openAI_key'],type="password")
# API_O = st.secrets["OPENAI_API_KEY"]
MODEL = "gpt-3.5-turbo"
if API_O:
llm = ChatOpenAI(temperature=0, openai_api_key=API_O,
model_name=MODEL, verbose=False)
else:
st.info("please provide API Key")
uploaded_file = st.file_uploader("Choose a file ", type="pdf")
jd_text_area = st.text_area('Upload JD', st.session_state['page_jd_JD'], 1000)
if st.button("Read Resume and JD"):
if uploaded_file is not None and jd_text_area !="":
st.session_state['page_jd_if_upload_clicked'] = True
else:
st.info("please make sure you provide all info")
if st.session_state['page_jd_if_upload_clicked'] == True:
if st.session_state['page_jd_text_content']=="":
pdf_reader = PyPDF2.PdfReader(uploaded_file)
with open(os.path.join("tempDir", uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
pdf_path = os.path.join("tempDir", uploaded_file.name)
doc = fitz.open(pdf_path)
text_content = ""
for page_num in range(len(doc)):
page = doc.load_page(page_num)
text_content += page.get_text()
st.session_state['page_jd_text_content'] = text_content
st.session_state['page_jd_jd_text_area'] = jd_text_area
st.session_state['page_jd_if_resume_uploaded'] = True
if st.session_state['page_jd_if_resume_uploaded']:
with st.spinner(text='Reading In progress'):
if test:
st.session_state['page_jd_resume'] = "test resume"
st.session_state['page_jd_JD'] = "test JD"
if st.session_state['page_jd_resume'] == "":
result = read_resume(st.session_state['page_jd_text_content'])
st.session_state['page_jd_resume'] = result
if st.session_state['page_jd_JD'] == "":
jd_result = read_jd(st.session_state['page_jd_jd_text_area'])
st.session_state['page_jd_JD'] = jd_result
st.success('Resume reading Completed')
st.session_state['page_jd_resume'] = st.text_area('Resume', st.session_state['page_jd_resume'], 1000)
st.session_state['page_jd_JD'] = st.text_area('JD', st.session_state['page_jd_JD'], 1000)
if st.button("Optimize based on JD"):
st.session_state['page_jd_if_generate_clicked'] = True
if st.session_state['page_jd_if_generate_clicked']:
with st.spinner(text='Optimize In progress'):
if test:
st.session_state['page_jd_refined_resume'] = "test refined_resume"
if st.session_state['page_jd_refined_resume'] == "":
refined_resume = generate_refined_resume()
st.session_state['page_jd_refined_resume'] = refined_resume
st.success('Resume Refined')
st.session_state['page_jd_refined_resume'] = st.text_area('Resume', st.session_state['page_jd_refined_resume'], 1000)
st.session_state['page_jd_generated'] = True
st.download_button('Download Resume', st.session_state['page_jd_refined_resume'],
file_name="Polished_resume_ResumeLab")
| [
"plrease reformat the following text to a resume in the following format:\n resume_text:\n {text_content}\n Desired format: \n Summary:\n personal summary\n Skills: \n list of skill limited to 10\n Experience: \n company, role\n details\n company, role\n details\n ...\n Projects: \n project name (skill list)\n details\n project name (skill list)\n details\n ...\n Eduation:\n university name and major | start time - end time\n university name and major | start time - end time\n ...\n ",
"please polish the following resume based on the Job description.\n please generate as close as possiable\n Job description:\n {JD}\n resume_text:\n {resume}\n ",
"plrease reformat the following text to a Job description in the following format:\n resume_text:\n {text_content}\n Desired format: \n Job Position:\n Position name\n Education qualification: \n Degree and major\n Experience requirement: \n Experience and number of years\n Programming Language: \n list of Programming Languages\n Hard skill:\n list of Hard skill\n Soft skill:\n list of Soft skill\n Job respobsiability:\n summerized bullet points of responsiability\n Company Value:\n summerized company value and vision paragraph\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.