date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | PacktPublishing/Hands-On-ROS-for-Robotics-Programming | Chapter11_OpenAI_Gym~cart-pole_ROS~cartpole_dqn~scripts~cartpole_dqn_debug.py | #!/usr/bin/env python
import rospy
import os
import random
import gym
import math
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from std_msgs.msg import Int16MultiArray
# Publish the reward of each episode
pub_reward = rospy.Publisher('reward', Int16MultiArray, queue_size=10)
reward_msg = Int16MultiArray()
from std_msgs.msg import Int16MultiArray
# Publish the reward of each episode
pub_ticks = rospy.Publisher('ticks', Int16MultiArray, queue_size=10)
ticks_msg = Int16MultiArray()
# import our training environment
from openai_ros.task_envs.cartpole_stay_up import stay_up
# FUNCTIONS +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# from class DQNCartPoleSolver
# Init model
model = Sequential()
model.add(Dense(24, input_dim=4, activation='tanh'))
model.add(Dense(48, activation='tanh'))
model.add(Dense(2, activation='linear'))
model.compile(loss='mse', optimizer=Adam(lr=alpha, decay=alpha_decay))
def remember(memory, state, action, reward, next_state, done):
memory.append((state, action, reward, next_state, done))
def choose_action(env, model, state, epsilon):
return env.action_space.sample() if (np.random.random() <= epsilon) else np.argmax(model.predict(state))
def get_epsilon(epsilon, epsilon_min, epsilon_decay, t):
return max(epsilon_min, min(epsilon, 1.0 - math.log10((t + 1) * epsilon_decay)))
def preprocess_state(env, state):
env.reset()
return np.reshape(state, [1, 4])
def replay(memory, model, gamma, epsilon, epsilon_min, epsilon_decay, batch_size):
x_batch, y_batch = [], []
minibatch = random.sample(
memory, min(len(memory), batch_size))
for state, action, reward, next_state, done in minibatch:
y_target = model.predict(state)
y_target[0][action] = reward if done else reward + gamma * np.max(model.predict(next_state)[0])
x_batch.append(state[0])
y_batch.append(y_target[0])
model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
if epsilon > epsilon_min:
epsilon *= epsilon_decay
# MAIN FLOW +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
rospy.init_node('cartpole_n1try_algorithm', anonymous=True, log_level=rospy.INFO)
# INITIALIZATION (as of class DQNCartPoleSolver)
# -- Commented out are the default values in the setup of the class
# -- to be applied if the ROS parameter didn't exist
# n_episodes=1000
# n_win_ticks=195
# min_episodes= 100
# max_env_steps=None
# gamma=1.0
# epsilon=1.0, epsilon_min=0.01, epsilon_log_decay=0.995
# alpha=0.01, alpha_decay=0.01
# batch_size=64
# monitor=False
# quiet=False
n_episodes = rospy.get_param('/cartpole_v0/episodes_training')
n_win_ticks = rospy.get_param('/cartpole_v0/n_win_ticks')
min_episodes = rospy.get_param('/cartpole_v0/min_episodes')
max_env_steps = None
gamma = rospy.get_param('/cartpole_v0/gamma')
epsilon = rospy.get_param('/cartpole_v0/epsilon')
epsilon_min = rospy.get_param('/cartpole_v0/epsilon_min')
epsilon_log_decay = rospy.get_param('/cartpole_v0/epsilon_decay')
alpha = rospy.get_param('/cartpole_v0/alpha')
alpha_decay = rospy.get_param('/cartpole_v0/alpha_decay')
batch_size = rospy.get_param('/cartpole_v0/batch_size')
monitor = rospy.get_param('/cartpole_v0/monitor')
quiet = rospy.get_param('/cartpole_v0/quiet')
# Commented out are the self __init__ of class DQNCartPoleSolver init parameter read from ROS parameter server
# Uncommented (or active line) of the variable name does not coincide with the ROS parameter variable
memory = deque(maxlen=100000)
env = gym.make('CartPoleStayUp-v0')
if monitor: env = gym.wrappers.Monitor(env, '../data/cartpole-1', force=True)
# gamma = gamma
# epsilon = epsilon
# epsilon_min = epsilon_min
epsilon_decay = epsilon_log_decay
# alpha = alpha
# alpha_decay = alpha_decay
# n_episodes = n_episodes
# n_win_ticks = n_win_ticks
# min_episodes = min_episodes
# batch_size = batch_size
# quiet = quiet
if max_env_steps is not None: env._max_episode_steps = max_env_steps
# FUNCTION run()
# from class DQNCartPoleSolver
print 'I am in' ##############################################################
rate = rospy.Rate(30)
scores = deque(maxlen=min_episodes)
# Exert an action to start
action = 0 #LEFT
#action = 1 #RIGHT
#action = 2 #LEFT BIG
#action = 3 #RIGHT BIG
# Compute the resulting state
env.reset()
state, reward0, done0, _ = env.step(action)
for e in range(n_episodes):
print 'Episode ' + str(e) ################################################
state = preprocess_state(env, state) ### -----!!!!!! state NOT DEFINED in first iteration e=0
print 'Action in Episode ' + str(e) ######################################
done = False
i = 0 # TICKS
cumulated_reward = 0 # REWARD
while not done:
print 'Action in Episode ' + str(e) ##################################
action = choose_action(env, model, state, get_epsilon(epsilon, epsilon_min, epsilon_decay, e))
next_state, reward, done, _ = env.step(action)
next_state = preprocess_state(env, next_state)
remember(memory, state, action, reward, next_state, done)
state = next_state
i += 1 # TICKS
cumulated_reward += reward # REWARD
scores.append(i)
mean_score = np.mean(scores)
if mean_score >= n_win_ticks and e >= min_episodes:
if not quiet: print('Ran {} episodes. Solved after {} trials'.format(e, e - min_episodes))
return e - min_episodes
if e % 1 == 0 and not quiet:
print('[Episode {}] {} ticks - Mean survival time over last {} episodes was {} ticks.'.format(e, i, min_episodes ,mean_score))
reward_msg.data = [e, mean_score] # episode, mean_score
pub_reward.publish(reward_msg) # mean_score
ticks_msg.data = [e, i] # episode, TICKS
pub_ticks.publish(ticks_msg) # TICKS
replay(memory, model, gamma, epsilon, epsilon_min, epsilon_decay, batch_size)
if not quiet: print('Did not solve after {} episodes'.format(e))
return e | [] |
2024-01-10 | viralgupta/portfolio | backend~server.py | from dotenv import load_dotenv
from flask import Flask, Response
from flask_cors import CORS
import time
from langchain.embeddings import GooglePalmEmbeddings
from langchain.llms.google_palm import GooglePalm
from langchain.vectorstores.chroma import Chroma
from langchain.chains import VectorDBQA
import os
app = Flask(__name__)
CORS(app) # Enable CORS for all routes
def read_files_in_folder():
data = []
sources = []
file_count=0
if not os.path.exists("./data/files"):
print(f"The folder ./data does not exist.")
return data, sources, file_count
for filename in os.listdir("./data/files"):
file_path = os.path.join("./data/files/", filename)
if filename == "file_count.txt":
with open(file_path, 'r', encoding='utf-8') as file:
file_content = file.read()
file_count = int(file_content)
continue
if os.path.isfile(file_path) and filename.endswith(".txt"):
sources.append(filename)
with open(file_path, 'r', encoding='utf-8') as file:
file_content = file.read()
data.append(file_content)
return data, sources, file_count
def create_vector_db(text_chunks):
embeddings = GooglePalmEmbeddings(google_api_key=os.environ['API_KEY'])
vector_store = Chroma.from_texts(texts=text_chunks, embedding=embeddings, persist_directory="./data/vector_store")
vector_store.persist()
return vector_store
def get_vector_db():
embeddings = GooglePalmEmbeddings(google_api_key=os.environ['API_KEY'])
vector_store = Chroma(persist_directory="./data/vector_store",embedding_function=embeddings)
return vector_store
@app.route('/api/askme/<question>')
def ask_me(question):
# load_dotenv(dotenv_path="./.envfile")
load_dotenv()
def generate():
data, sources, count = read_files_in_folder()
if count < len(sources):
store = create_vector_db(data)
with open("./data/files/file_count.txt", 'w', encoding='utf-8') as file:
file.write(str(len(sources)))
else:
store = get_vector_db()
QA = VectorDBQA.from_chain_type(llm=GooglePalm(google_api_key=os.environ['API_KEY']), chain_type="stuff", vectorstore=store)
answer = QA({'query': question}, return_only_outputs=True)
words = answer['result'].split()
for word in words:
time.sleep(0.1)
yield f"data: {word}\n\n"
return Response(generate(), content_type='text/event-stream')
if __name__ == "__main__":
port = 5000
app.run(host="0.0.0.0",port=port, debug=True) | [] |
2024-01-10 | Naveen-X/Mr.Stark | help_mod.py | class Script(object):
AI = [
{
"desc": "Generates AI response from OpenAI",
"cmds": ["gpt", "askgpt", "chatgpt"],
"usage": "/gpt Who are you?"
}
]
CARBON = [
{
"desc": "Creates a carbon in doc format",
"cmds": ["carbon"],
"usage": "/carbon reply to a text message or give some text as input"
},
{
"desc": "Creates a carbon in image format",
"cmds": ["icarbon"],
"usage": "/icarbon reply to a text message or give some text as input"
}
]
DEV = [
{
"desc": "Executes python code",
"cmds": ["eval", "e"],
"usage": "/e [python code]"
},
{
"desc": "Run bash/terminal cmd",
"cmds": ["bash", "sh"],
"usage": "/bash [cmd]"
}
]
FUN = [
{
"desc": "Get a cat image",
"cmds": ["cat"],
"usage": "/cat"
},
{
"desc": "Get a dog image",
"cmds": ["dog"],
"usage": "/dog"
},
{
"desc": "Get a panda image",
"cmds": ["panda"],
"usage": "/panda"
},
{
"desc": "Get a bored gif 🥱",
"cmds": ["bored"],
"usage": "/bored"
},
{
"desc": "get a image or gif of pikachu",
"cmds": ["pikachu"],
"usage": "/pikachu"
},
{
"desc": "Get a patting gif",
"cmds": ["pat"],
"usage": "/pat"
},
{
"desc": "Get a hugging gif",
"cmds": ["hug"],
"usage": "/hug"
},
{
"desc": "Get a winking gif",
"cmds": ["wink"],
"usage": "/wink"
}
]
FILETOOLS = [
{
"desc": "Downloads File to Local",
"cmds": ["download"],
"usage": "/download [reply to a doc/vid]"
},
{
"desc": "Upload Files from Local",
"cmds": ["upload"],
"usage": "/upload [filename/path of the file]"
},
]
GOOGLE = [
{
"desc": "Google searcher!",
"cmds": ["gs", "google"],
"usage": "/gs [text to search]"
},
]
GTA5 = [
{
"desc": "Gta-V wasted effect on replied image",
"cmds": ["wasted"],
"usage": "/wasted [reply to a photo]"
},
{
"desc": "Gta-V mission passed effect on replied image",
"cmds": ["passed"],
"usage": "/passed [reply to a photo]"
}
]
IMDB = [
{
"desc": "Get information about a Movie/Series",
"cmds": ["imdb"],
"usage": "/imdb [Movename/Series Name]"
}
]
IMAGETOOLS = [
{
"desc": "Create ur own memes",
"cmds": ["memify"],
"usage": "/memify [Upper text ; Lower text] | reply to a media"
},
{
"desc": "Crop Image Into Round & Cool Sticker",
"cmds": ["circle"],
"usage": "/circle [reply to a photo or sticker]"
},
{
"desc": "Get Fake Certificate With Given Name!",
"cmds": ["genca", "gencertificate"],
"usage": "/genca [Name on the certificate]"
},
{
"desc": "Enhance the replied notes!",
"cmds": ["hwn"],
"usage": "/hwn [Reply to Notes To Enhance It!]"
},
{
"desc": "Add Glitch effect to replied image",
"cmds": ["glitch"],
"usage": "/glitch [Reply to a image]"
},
{
"desc": "ghost the replied image/sticker",
"cmds": ["ghost"],
"usage": "/ghost [reply to a image or sticker]"
},
{
"desc": "Sketch the replied image",
"cmds": ["sketch"],
"usage": "/genca [reply to a image or sticker]"
},
{
"desc": "Colorize the replied photo",
"cmds": ["color"],
"usage": "/color [reply to a image or sticker]"
},
]
INSTADL = [
{
"desc": "Download post/reel from instagram",
"cmds": ["insta", "instadl", "insdl", "instadownload"],
"usage": "/instadl [instagram post/reel link]"
}
]
LOGOS = [
{
"desc": "Makes a logo for you with black bg",
"cmds": ["alogo"],
"usage": "/alogo [text for logo]"
},
{
"desc": "Makes a logo for you, try it out",
"cmds": ["slogo"],
"usage": "/slogo [text for logo]"
}
]
MISC = [
{
"desc": "sends the message u input or reply",
"cmds": ["send"],
"usage": "/send [ message / reply to message ]"
},
{
"desc": "Emojify text",
"cmds": ["emoji"],
"usage": "/emoji Stark"
},
{
"desc": "Emojify text with custom emoji",
"cmds": ["cmoji"],
"usage": "/send 🔥 Stark"
},
{
"desc": "Weebify ur text",
"cmds": ["weeb", "weebify"],
"usage": "/weebify [ input or reply to a message ]"
},
{
"desc": "Sends F",
"cmds": ["f", "ftext"],
"usage": "/f or /f [ custom emoji or a small text ]"
},
{
"desc": "Make a yt comment",
"cmds": ["ytc"],
"usage": "/ytc Hello I'm Stark"
},
{
"desc": "Makes a yt comment with random pfp",
"cmds": ["rytc"],
"usage": "/rytc Hello Im Stark"
}
]
MEDIAINFO = [
{
"desc": "Gets MediaInfo of Replied Video",
"cmds": ["mediainfo", "mediadata"],
"usage": "/mediainfo [Reply to a video]"
}
]
MONGODB = [
{
"desc": "Adds MongoDB to database so that u can access",
"cmds": ["adddb"],
"usage": "/adddb [mongo uri]"
},
{
"desc": "Get access to the MongoDB uri u added using /adddb",
"cmds": ["showdb"],
"usage": "/showdb"
}
]
PASTE = [
{
"desc": "Pastes the given text in spacebin",
"cmds": ["paste"],
"usage": "/paste [reply to message/text file]"
}
]
QR = [
{
"desc": "Generates qr for given text",
"cmds": ["qr"],
"usage": "/qr [text to make qr]"
}
]
QUOTLY = [
{
"desc": "Converts your text into a quote",
"cmds": ["quote", "qt", "qu", "q"],
"usage": "/q [reply to a text message / give text as input]\nNote:\n1. No need to use args like -r for reply | reply set as default\n2.Either give text as reply or input"
}
]
RAYSO = [
{
"desc": "Create cool code snippets [Dark Mode]",
"cmds": ["rayso"],
"usage": "/rayso [ theme / do not specify for ramdom | reply to a message or a text file ]\nUse /rayso -l to list available themes"
},
{
"desc": "Create cool code snippets [Light Mode]",
"cmds": ["lrayso"],
"usage": "/lrayso [ theme / do not specify for ramdom | reply to a message or a text file ]\nUse /rayso -l to list available themes"
}
]
STICKERS = [
{
"desc": "Kang the replied sticker",
"cmds": ["kang"],
"usage": "/kang [ emoji | reply to the sticker ]"
},
{
"desc": "Delete sticker from ur pack",
"cmds": ["delsticker", "del_sticker"],
"usage": "/delsticker [ reply to sticker u want to delete ]"
},
{
"desc": "List all ur pack's",
"cmds": ["mypacks"],
"usage": "/mypacks"
},
{
"desc": "Creates a sticker with given text",
"cmds": ["stcr"],
"usage": "/stcr Mr.Stark"
}
]
SYSTEM = [
{
"desc": "Ping-Pong",
"cmds": ["p", "ping"],
},
{
"desc": " whether the bot is alive or not",
"cmds": ["alive"],
},
{
"desc": "Restarts the bot",
"cmds": ["restart"],
}
]
TELEGRAPH = [
{
"desc": "Creates a telegraph",
"cmds": ["/telegraph", "/tgraph"],
"usage": "/tgraph [title for the telegraph | reply to a text message]"
}
]
TRANSLATE = [
{
"desc": "Translates the replied message",
"cmds": ["tr", "translate"],
"usage": "/tr [language code | reply to a text message]"
}
]
UPDATE = [
{
"desc": "Updates the system",
"cmds": ["up", "update"],
},
{
"desc": "Deletes snippets in gitlab",
"cmds": ["d"],
}
]
URLUPLOADER = [
{
"desc": "download's file from given link",
"cmds": ["urlupload"],
"usage": "/urlupload [direct link of the file]"
}
]
WHOIS = [
{
"desc": "Know who the replied person is",
"cmds": ["info", "whois"],
"usage": "/info [user id/username | reply to a user message]"
}
]
WRITE = [
{
"desc": "Writes given text on a white paper",
"cmds": ["write"],
"usage": "/write hello"
}
]
| [] |
2024-01-10 | ChrisIsKing/Caribbean-Creole-Languages-Translation | backend~api~inference.py | from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE ="""{prompt}\n
CONTEXT: {context}\n
TEXT: {text}
"""
def inference(api_key: str = "",
model_name: str = "gpt-3.5-turbo",
temperature: int = 0,
prompt: str = "",
prompt_variables: dict = {},
max_tokens: int = 2048):
"""Inference function for langchain
Args:
api_key (str, optional): OpenAI API Key. Defaults to "".
model_name (str, optional): Name of OpenAI Model . Defaults to "gpt-3.5-turbo".
temperature (int, optional): Controls randomness. Defaults to 0.
prompt (str, optional): Prompt to be used for inference. Defaults to "".
prompt_variables (dict, optional): Variable to be inserted into template. Defaults to {}.
max_tokens (int, optional): _description_. Defaults to 2048.
"""
prompt_template = PromptTemplate(
input_variables=["prompt","context","text"],
template=PROMPT_TEMPLATE
)
llm = OpenAI(
temperature = temperature,
model_name=model_name,
openai_api_key = api_key
)
message = llm(prompt_template.format(
prompt=prompt,
context=prompt_variables.get("context",""),
# instruction=prompt_variables.get("instruction",""),
text=prompt_variables.get("text","")
), max_tokens=max_tokens)
if(message):
return message
| [
"{prompt}\n\n CONTEXT: {context}\n\n TEXT: {text}\n",
"context"
] |
2024-01-10 | ChrisIsKing/Caribbean-Creole-Languages-Translation | inference.py | from langchain.llms import OpenAI
from langchain import PromptTemplate
PROMPT_TEMPLATE ="""{prompt}
CONTEXT: {context}
INSTRUCTION: {instruction}
TEXT: {text}
"""
def inference(api_key: str = "",
model_name: str = "gpt-3.5-turbo",
temperature: int = 0,
prompt: str = "",
prompt_variables: dict = {},
max_tokens: int = 2048):
"""Inference function for langchain
Args:
api_key (str, optional): OpenAI API Key. Defaults to "".
model_name (str, optional): Name of OpenAI Model . Defaults to "gpt-3.5-turbo".
temperature (int, optional): Controls randomness. Defaults to 0.
prompt (str, optional): Prompt to be used for inference. Defaults to "".
prompt_variables (dict, optional): Variable to be inserted into template. Defaults to {}.
max_tokens (int, optional): _description_. Defaults to 2048.
"""
prompt_template = PromptTemplate(
input_variables=["prompt","context","instruction","text"],
template=PROMPT_TEMPLATE
)
llm = OpenAI(
temperature = temperature,
model_name=model_name,
openai_api_key = api_key
)
message = llm(prompt_template.format(
prompt=prompt,
context=prompt_variables.get("context",""),
instruction=prompt_variables.get("instruction",""),
text=prompt_variables.get("text","")
), max_tokens=max_tokens)
if(message):
return message
| [
"context",
"{prompt}\n CONTEXT: {context}\n INSTRUCTION: {instruction}\n TEXT: {text}\n",
"instruction"
] |
2024-01-10 | opentutor/opentutor-classifier | opentutor_classifier~opentutor_classifier~composite~predict.py | #
# This software is Copyright ©️ 2020 The University of Southern California. All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and subject to the full license file found in the root of this software deliverable. Permission to make commercial use of this software may be obtained by contacting: USC Stevens Center for Innovation University of Southern California 1150 S. Olive Street, Suite 2300, Los Angeles, CA 90115, USA Email: [email protected]
#
# The full terms of this copyright and license should always be found in the root directory of this software deliverable as "license.txt" and if these terms are not found with this software, please contact the USC Stevens Center for the full license.
#
from opentutor_classifier import (
AnswerClassifier,
ClassifierConfig,
AnswerClassifierInput,
AnswerClassifierResult,
)
import traceback
import asyncio
from opentutor_classifier.lr2.predict import LRAnswerClassifier
from opentutor_classifier.openai.predict import OpenAIAnswerClassifier
from typing import Dict, Any, Tuple, Union, cast
from opentutor_classifier.logger import get_logger
log = get_logger()
class CompositeAnswerClassifier(AnswerClassifier):
lr_classifier: AnswerClassifier = LRAnswerClassifier()
openai_classifier: AnswerClassifier = OpenAIAnswerClassifier()
async def run_lr_evaluate(
self, answer: AnswerClassifierInput
) -> AnswerClassifierResult:
result = await self.lr_classifier.evaluate(answer)
return result
async def run_openai_evaluate(
self, answer: AnswerClassifierInput
) -> AnswerClassifierResult:
try:
result = await self.openai_classifier.evaluate(answer)
return result
except BaseException as e:
raise e
def configure(self, config: ClassifierConfig) -> "AnswerClassifier":
self.lr_classifier = self.lr_classifier.configure(config)
self.openai_classifier = self.openai_classifier.configure(config)
return self
async def evaluate(self, answer: AnswerClassifierInput) -> AnswerClassifierResult:
# lr_task = asyncio.wait_for(self.run_lr_evaluate(answer), timeout=20)
openai_task = asyncio.wait_for(self.run_openai_evaluate(answer), timeout=10.0)
lr_task = self.run_lr_evaluate(answer)
results: Tuple[
Union[AnswerClassifierResult, BaseException],
Union[AnswerClassifierResult, BaseException],
] = await asyncio.gather(lr_task, openai_task, return_exceptions=True)
if isinstance(results[0], BaseException):
log.info("lr classifier returned exception:")
traceback.print_exception(
BaseException, results[0], results[0].__traceback__
)
if isinstance(results[1], BaseException):
log.info("openai returned exception:")
traceback.print_exception(
BaseException, results[1], results[1].__traceback__
)
if not isinstance(results[1], AnswerClassifierResult):
print("returning LR2 results")
print(str(cast(AnswerClassifierResult, results[0]).to_dict()))
return cast(AnswerClassifierResult, results[0])
else:
print("returning openai results")
print(str(cast(AnswerClassifierResult, results[1]).to_dict()))
return cast(AnswerClassifierResult, results[1])
def get_last_trained_at(self) -> float:
return self.lr_classifier.get_last_trained_at()
def save_config_and_model(self) -> Dict[str, Any]:
return self.lr_classifier.save_config_and_model()
| [] |
2024-01-10 | opentutor/opentutor-classifier | opentutor_classifier~tests~test_train_classifier.py | #
# This software is Copyright ©️ 2020 The University of Southern California. All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and subject to the full license file found in the root of this software deliverable. Permission to make commercial use of this software may be obtained by contacting: USC Stevens Center for Innovation University of Southern California 1150 S. Olive Street, Suite 2300, Los Angeles, CA 90115, USA Email: [email protected]
#
# The full terms of this copyright and license should always be found in the root directory of this software deliverable as "license.txt" and if these terms are not found with this software, please contact the USC Stevens Center for the full license.
#
from os import path
from typing import List
import pytest
import responses
import os
from opentutor_classifier import (
ExpectationTrainingResult,
ARCH_LR2_CLASSIFIER,
DEFAULT_LESSON_NAME,
ARCH_COMPOSITE_CLASSIFIER,
ARCH_OPENAI_CLASSIFIER,
)
from opentutor_classifier.dao import (
ModelRef,
find_predicton_config_and_pickle,
)
from opentutor_classifier.config import confidence_threshold_default
from opentutor_classifier.lr2.constants import MODEL_FILE_NAME
from opentutor_classifier.openai.constants import GROUNDTRUTH_FILENAME
from opentutor_classifier.openai.train import OpenAIGroundTruth
from .utils import (
assert_testset_accuracy,
assert_train_expectation_results,
create_and_test_classifier,
fixture_path,
read_example_testset,
test_env_isolated,
train_classifier,
train_default_classifier,
_TestExpectation,
run_classifier_testset,
)
CONFIDENCE_THRESHOLD_DEFAULT = confidence_threshold_default()
@pytest.fixture(scope="module")
def data_root() -> str:
return fixture_path("data")
@pytest.fixture(scope="module")
def shared_root(word2vec) -> str:
return path.dirname(word2vec)
@pytest.mark.parametrize(
"lesson,arch,expected_length", [("candles", ARCH_OPENAI_CLASSIFIER, 85)]
)
def test_train_openai_ground_truth(
tmpdir,
data_root: str,
shared_root: str,
lesson: str,
arch: str,
expected_length: int,
):
os.environ["OPENAI_API_KEY"] = "fake"
with test_env_isolated(
tmpdir, data_root, shared_root, lesson=lesson, arch=arch
) as test_config:
train_classifier(lesson, test_config, False)
dao = test_config.find_data_dao()
config_and_model = find_predicton_config_and_pickle(
ModelRef(arch, lesson, GROUNDTRUTH_FILENAME), dao
)
result: OpenAIGroundTruth = OpenAIGroundTruth.from_dict(config_and_model.model)
assert len(result.training_answers) == expected_length
@pytest.mark.parametrize("lesson", [("question1"), ("question2")])
def test_outputs_models_at_specified_root(
tmpdir, data_root: str, shared_root: str, lesson: str
):
with test_env_isolated(
tmpdir, data_root, shared_root, lesson=lesson
) as test_config:
result = train_classifier(lesson, test_config)
assert path.exists(path.join(result.models, MODEL_FILE_NAME))
assert path.exists(path.join(result.models, "config.yaml"))
@pytest.mark.parametrize(
"arch,expected_model_file_name",
[
(ARCH_LR2_CLASSIFIER, MODEL_FILE_NAME),
],
)
def test_outputs_models_at_specified_model_root_for_default_model(
arch: str, expected_model_file_name: str, tmpdir, data_root: str, shared_root: str
):
with test_env_isolated(
tmpdir, data_root, shared_root, lesson=DEFAULT_LESSON_NAME
) as test_config:
result = train_default_classifier(test_config)
assert path.exists(path.join(result.models, expected_model_file_name))
def _test_train_and_predict(
lesson: str,
arch: str,
# confidence_threshold for now determines whether an answer
# is really classified as GOOD/BAD (confidence >= threshold)
# or whether it is interpretted as NEUTRAL (confidence < threshold)
confidence_threshold: float,
expected_training_result: List[ExpectationTrainingResult],
expected_accuracy: float,
tmpdir,
data_root: str,
shared_root: str,
):
with test_env_isolated(
tmpdir, data_root, shared_root, arch=arch, lesson=lesson
) as test_config:
train_result = train_classifier(lesson, test_config)
assert path.exists(train_result.models)
assert_train_expectation_results(
train_result.expectations, expected_training_result
)
testset = read_example_testset(
lesson, confidence_threshold=confidence_threshold
)
assert_testset_accuracy(
arch,
train_result.models,
shared_root,
testset,
expected_accuracy=expected_accuracy,
)
@pytest.mark.parametrize(
"example,arch,confidence_threshold,expected_training_result,expected_accuracy",
[
(
"missing-expectation-training-data",
ARCH_COMPOSITE_CLASSIFIER,
CONFIDENCE_THRESHOLD_DEFAULT,
[
ExpectationTrainingResult(expectation_id="1", accuracy=0.6875),
ExpectationTrainingResult(expectation_id="2", accuracy=0.6875),
],
0.4,
),
(
"ies-rectangle",
ARCH_LR2_CLASSIFIER,
CONFIDENCE_THRESHOLD_DEFAULT,
[
ExpectationTrainingResult(expectation_id="0", accuracy=0.90),
ExpectationTrainingResult(expectation_id="1", accuracy=0.95),
ExpectationTrainingResult(expectation_id="2", accuracy=0.95),
],
1,
),
(
"candles",
ARCH_LR2_CLASSIFIER,
CONFIDENCE_THRESHOLD_DEFAULT,
[
ExpectationTrainingResult(expectation_id="0", accuracy=0.84),
ExpectationTrainingResult(expectation_id="1", accuracy=0.81),
ExpectationTrainingResult(expectation_id="2", accuracy=0.81),
ExpectationTrainingResult(expectation_id="3", accuracy=0.95),
],
0.85,
),
],
)
@pytest.mark.slow
def test_train_and_predict_slow(
example: str,
arch: str,
# confidence_threshold for now determines whether an answer
# is really classified as GOOD/BAD (confidence >= threshold)
# or whether it is interpretted as NEUTRAL (confidence < threshold)
confidence_threshold: float,
expected_training_result: List[ExpectationTrainingResult],
expected_accuracy: float,
tmpdir,
data_root: str,
shared_root: str,
monkeypatch,
):
monkeypatch.setenv("OPENAI_API_KEY", "dummy")
_test_train_and_predict(
example,
arch,
confidence_threshold,
expected_training_result,
expected_accuracy,
tmpdir,
data_root,
shared_root,
)
@pytest.mark.parametrize(
"lesson,arch",
[
(
"shapes",
ARCH_LR2_CLASSIFIER,
),
],
)
def test_predict_on_model_trained_with_cluster_features_but_cluster_features_later_disabled(
lesson: str,
arch: str,
tmpdir,
data_root: str,
shared_root: str,
monkeypatch,
):
with test_env_isolated(
tmpdir, data_root, shared_root, arch=arch, lesson=lesson
) as test_config:
monkeypatch.setenv("TRAIN_QUALITY_DEFAULT", str(2))
train_result = train_classifier(lesson, test_config)
assert path.exists(train_result.models)
monkeypatch.setenv("TRAIN_QUALITY_DEFAULT", str(0))
testset = read_example_testset(lesson)
run_classifier_testset(arch, train_result.models, shared_root, testset)
@pytest.mark.parametrize(
"lesson,use_default,arch",
[
(
"shapes",
False,
ARCH_LR2_CLASSIFIER,
),
(
"shapes",
True,
ARCH_LR2_CLASSIFIER,
),
],
)
def test_predict_off_model_trained_with_cluster_features_but_cluster_features_later_enabled(
lesson: str,
use_default: bool,
arch: str,
tmpdir,
data_root: str,
shared_root: str,
monkeypatch,
):
with test_env_isolated(
tmpdir,
data_root,
shared_root,
arch=arch,
lesson=lesson,
is_default_model=use_default,
) as test_config:
monkeypatch.setenv("TRAIN_QUALITY_DEFAULT", str(0))
train_result = (
train_default_classifier(test_config)
if use_default
else train_classifier(lesson, test_config)
)
assert path.exists(train_result.models)
import logging
logging.warning(f"models={train_result.models}")
monkeypatch.setenv("TRAIN_QUALITY_DEFAULT", str(2))
testset = read_example_testset(lesson)
run_classifier_testset(
arch,
path.join(path.dirname(train_result.models), DEFAULT_LESSON_NAME)
if use_default
else train_result.models,
shared_root,
testset,
)
def _test_train_and_predict_specific_answers_slow(
lesson: str,
arch: str,
evaluate_input_list: List[str],
expected_training_result: List[ExpectationTrainingResult],
expected_evaluate_result: List[_TestExpectation],
tmpdir,
data_root: str,
shared_root: str,
):
with test_env_isolated(
tmpdir, data_root, shared_root, arch, lesson=lesson
) as test_config:
train_result = train_classifier(lesson, test_config)
assert path.exists(train_result.models)
assert_train_expectation_results(
train_result.expectations, expected_training_result
)
for evaluate_input, ans in zip(evaluate_input_list, expected_evaluate_result):
create_and_test_classifier(
lesson,
path.split(path.abspath(train_result.models))[0],
shared_root,
evaluate_input,
[ans],
arch=arch,
)
@pytest.mark.slow
@pytest.mark.parametrize(
"lesson,arch,evaluate_input_list,expected_training_result,expected_evaluate_result",
[
(
"ies-rectangle",
ARCH_LR2_CLASSIFIER,
[
# "5",
# "It is 3 and 7 and 4 and 0",
# "30 and 74",
"37 x 40",
# "thirty seven by forty",
# "forty by thirty seven",
# "37 by forty",
# "thirty-seven by forty",
# "37.0 by 40.000",
# "thirty seven by fourty",
],
[ExpectationTrainingResult(expectation_id="2", accuracy=0.89)],
[
# _TestExpectation(evaluation="Bad", score=0.80, expectation=2),
# _TestExpectation(evaluation="Bad", score=0.80, expectation=2),
# _TestExpectation(evaluation="Bad", score=0.80, expectation=2),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
_TestExpectation(evaluation="Good", score=0.80, expectation="2"),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
# _TestExpectation(evaluation="Good", score=0.80, expectation=2),
],
),
],
)
def test_train_and_predict_specific_answers_slow(
lesson: str,
arch: str,
evaluate_input_list: List[str],
expected_training_result: List[ExpectationTrainingResult],
expected_evaluate_result: List[_TestExpectation],
tmpdir,
data_root: str,
shared_root: str,
):
_test_train_and_predict_specific_answers_slow(
lesson,
arch,
evaluate_input_list,
expected_training_result,
expected_evaluate_result,
tmpdir,
data_root,
shared_root,
)
@responses.activate
@pytest.mark.parametrize(
"lesson,arch,evaluate_input_list,expected_evaluate_result",
[
(
# It's important to test what would happen
# if--in the past--we had trained a model for a lesson
# but then subsequently lost the actual trained model.
# This is an important case, because having trained the model
# might have generated features which would live on in the config/db,
# and those generated features would cause shape-errors at prediction time
# when used with the default model
"ies-mixture-with-trained-features-but-model-is-lost",
ARCH_LR2_CLASSIFIER,
["a"],
[
_TestExpectation(evaluation="Bad", score=0.50, expectation="2"),
],
)
],
)
def test_default_classifier_train_and_predict(
lesson: str,
arch: str,
evaluate_input_list: List[str],
expected_evaluate_result: List[_TestExpectation],
data_root: str,
shared_root: str,
tmpdir,
):
with test_env_isolated(
tmpdir,
data_root,
shared_root,
arch=arch,
is_default_model=True,
lesson=lesson,
) as config:
train_result = train_default_classifier(config=config)
assert path.exists(train_result.models)
for evaluate_input, ans in zip(evaluate_input_list, expected_evaluate_result):
create_and_test_classifier(
lesson,
path.split(path.abspath(train_result.models))[0],
shared_root,
evaluate_input,
[ans],
arch=arch,
)
| [] |
2024-01-10 | opentutor/opentutor-classifier | opentutor_classifier~opentutor_classifier~openai~predict.py | #
# This software is Copyright ©️ 2020 The University of Southern California. All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and subject to the full license file found in the root of this software deliverable. Permission to make commercial use of this software may be obtained by contacting: USC Stevens Center for Innovation University of Southern California 1150 S. Olive Street, Suite 2300, Los Angeles, CA 90115, USA Email: [email protected]
#
# The full terms of this copyright and license should always be found in the root directory of this software deliverable as "license.txt" and if these terms are not found with this software, please contact the USC Stevens Center for the full license.
#
from opentutor_classifier import (
AnswerClassifier,
ClassifierConfig,
AnswerClassifierInput,
AnswerClassifierResult,
ExpectationClassifierResult,
ExpectationConfig,
ARCH_OPENAI_CLASSIFIER,
)
from opentutor_classifier.dao import ModelRef, find_predicton_config_and_pickle
from opentutor_classifier.openai.shared import OpenAIGroundTruth
from opentutor_classifier.speechact import SpeechActClassifier
from opentutor_classifier.config import EVALUATION_GOOD, EVALUATION_BAD
from opentutor_classifier.openai.openai_api import (
Answer,
OpenAICall,
OpenAIResultContent,
openai_create,
)
from .constants import (
SYSTEM_ASSIGNMENT,
USER_GUARDRAILS,
ANSWER_TEMPLATE,
GROUNDTRUTH_FILENAME,
)
from typing import Dict, List, Any
class OpenAIAnswerClassifier(AnswerClassifier):
speech_act_classifier: SpeechActClassifier = SpeechActClassifier()
config: ClassifierConfig
def configure(self, config: ClassifierConfig) -> "AnswerClassifier":
self.config = config
return self
async def evaluate(self, answer: AnswerClassifierInput) -> AnswerClassifierResult:
if answer.config_data is None:
raise Exception("missing question data in answer")
model_ref = ModelRef(
ARCH_OPENAI_CLASSIFIER, self.config.model_name, GROUNDTRUTH_FILENAME
)
ground_truth_dict = find_predicton_config_and_pickle(
model_ref, self.config.dao
).model
if ground_truth_dict is not None:
ground_truth = OpenAIGroundTruth.from_dict(ground_truth_dict)
else:
ground_truth = None
concepts: List[ExpectationConfig] = answer.config_data.expectations
call = OpenAICall(
system_assignment=SYSTEM_ASSIGNMENT,
user_concepts=concepts,
user_answer=[answer.input_sentence],
user_template=ANSWER_TEMPLATE,
user_guardrails=USER_GUARDRAILS,
user_groundtruth=ground_truth,
)
response: OpenAIResultContent = await openai_create(call_data=call)
expectations: List[ExpectationClassifierResult] = []
print(response.to_json())
open_ai_answer: Answer = response.answers[
response.answers.__iter__().__next__()
]
for concept_key in open_ai_answer.concepts.keys():
concept = open_ai_answer.concepts[concept_key]
evaluation = EVALUATION_GOOD if concept.is_known else EVALUATION_BAD
concept_result = ExpectationClassifierResult(
expectation_id=concept_key,
evaluation=evaluation,
score=concept.confidence,
)
expectations.append(concept_result)
result = AnswerClassifierResult(input=answer, expectation_results=expectations)
result.speech_acts[
"metacognitive"
] = self.speech_act_classifier.check_meta_cognitive(result)
result.speech_acts["profanity"] = self.speech_act_classifier.check_profanity(
result
)
return result
def get_last_trained_at(self) -> float:
return 0.0
def save_config_and_model(self) -> Dict[str, Any]:
raise NotImplementedError()
| [] |
2024-01-10 | ortegaalfredo/aiTrans | aiTrans.py | #!/usr/bin/python
import os
import argparse
import sys
from openaiConnector import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", help="Source file to be read", required=True)
parser.add_argument("-l", "--language", help="Language of the source file", default="python")
parser.add_argument("-a", "--allfile", help="Whether to generate code for the whole file or just for the code snippets", action="store_true")
args = parser.parse_args()
source = args.source
language = args.language
allfile = args.allfile
# Remove code blocks
def remove_code_blocks(code):
new_code = ""
for line in code.splitlines():
if not line.startswith("```"):
new_code += line + "\n"
return new_code
# Load OpenAI API key
api_key = os.environ.get('OPENAI_API_KEY')
if api_key is not None:
print("Loaded OpenAI API key from Environment", file=sys.stderr)
else:
with open('api-key.txt') as f:
api_key = f.read().strip()
print("Loaded OpenAI API key from file.", file=sys.stderr)
# Check if API key is valid
check_api_key_validity(api_key)
# Open source file
with open(source) as s:
if allfile:
# Read whole file
l = s.read()
# Prompt
prompt = "Write the raw valid {} code for this, ready to be executed, please include comments on each function:\n\n{}".format(language, l)
# Get code from GPT
code = call_AI_chatGPT(prompt)
# Remove code blocks
code = remove_code_blocks(code)
# Print code
print(code)
else:
keywords=['assert', 'async', 'await', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'lambda', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']
for line in s:
if line.startswith(tuple(keywords)) or line.startswith("#"):
print(line)
else:
# Prompt
prompt = "Write the raw valid {} code for this, ready to be embedded into another {} code file:\n{}".format(language, language, line)
# Get code from GPT
code = call_AI_chatGPT(prompt)
# Remove code blocks
code = remove_code_blocks(code)
# Print code
print(code)
| [
"Write the raw valid PLACEHOLDER code for this, ready to be embedded into another PLACEHOLDER code file:\nPLACEHOLDER",
"Write the raw valid PLACEHOLDER code for this, ready to be executed, please include comments on each function:\n\nPLACEHOLDER"
] |
2024-01-10 | ortegaalfredo/aiTrans | src~bootTrans.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This is the transpiler 'bootloader', it was written by hand.
# It is used to use the AI to translate the pseudocode of itself to the final transpiler.
# After this, the transpiler can transpile itself.
import os,argparse,sys
import openai
def check_api_key_validity(key):
try:
openai.api_key = key
print("OpenAI API key is valid", file=sys.stderr)
except openai.OpenAIError:
print("Invalid OpenAI API key", file=sys.stderr)
exit()
# ---------- OpenAI interface
# The bootloader only supports using chatGPT as transpiler.
def call_AI_chatGPT(prompt):
messages = [
{
"role": "system",
"content": "You are a excellent programmer. Write the code to execute the given task. Always write only the raw code and nothing more, no quotes. Never write english, nor code delimiters.",
},
{"role": "user", "content": prompt},
]
model = "gpt-3.5-turbo"
temperature = 0
max_tokens = 2048
response = openai.ChatCompletion.create(
messages=messages, model=model, temperature=temperature, max_tokens=max_tokens
)
response = response.choices[0]
response = response["message"]
response = response["content"]
return response
# Increse to 1.0 to introduce randomness in answers
temperature = 0
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", dest="source")
parser.add_argument("-l", dest="language",default="python")
parser.add_argument("-a", action="store_true", dest="allfile")
args = parser.parse_args()
# import api key
api_key = os.environ.get("OPENAI_API_KEY")
if (api_key is None) or (len(api_key)==0): # try to load apikey from file
try:
api_key=open('api-key.txt','rb').read().strip().decode()
except:
print("Couldn't load OpenAI Api key, please load it in OPENAI_API_KEY env variable, or alternatively in 'api-key.txt' file.", file=sys.stderr)
exit(0)
else: print('Loaded api key from environment variable.',file=sys.stderr)
check_api_key_validity(api_key)
s=open(args.source,"rb")
if args.allfile: # Feed the whole description, without parsing it. This is the equivalent of using the ChatGPT web interface.
l=s.read()
l=l[:-1]
l=l.decode()
l="Write the raw valid %s code for this, ready to be executed, please include comments on each function:\n\n %s" % (args.language,l)
code=call_AI_chatGPT(l)
# Remove ChatGPTs code blocks.
c=""
for i in code.splitlines():
if i.find('```')<0:
c+=i+'\n'
print(c)
else: # Feed the description line by line
#Those are valid only for python
keywords=['#','assert', 'async', 'await', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'lambda', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']
line=0
for l in s.readlines():
l=l[:-1]
l=l.decode()
tabs=l[:l.find(l.strip())]
code=""
#keyword line
for k in keywords:
if (l.strip().find(k)==0):
code=l
#convert line
if code=="" and len(l)>2:
l="Write the raw valid %s code for this, ready to be embedded into another %s code:\n %s" % (args.language,args.language,l)
code=call_AI_chatGPT(l)
if len(code.splitlines())>1:
# Remove ChatGPTs '```'
c=""
for i in code.splitlines():
if i.find('```')<0:
c+=i+'\n'
code=c
print(tabs+code)
line+=1
| [
"You are a excellent programmer. Write the code to execute the given task. Always write only the raw code and nothing more, no quotes. Never write english, nor code delimiters."
] |
2024-01-10 | scottjmoore/CrewAITutorial | crewai_example.py | from langchain.llms import Ollama
from langchain.tools import DuckDuckGoSearchRun
from crewai import Agent, Task, Crew, Process
ollama_openhermes=Ollama(model='openhermes:latest')
search_tool=DuckDuckGoSearchRun()
researcher=Agent(
role='Senior Research Analyst',
goal='Uncover cutting-edge developments in AI and data science in',
backstory="""You are a Senior Research Analyst at a leading tech think tank.
Your expertise lies in identifying emerging trends and technologies in AI and
data science. You have a knack for dissecting complex data and presenting
actionable insights.""",
verbose=True,
allow_delegation=False,
tools=[search_tool],
llm=ollama_openhermes
)
writer=Agent(
role='Tech Content Strategist',
goal='Craft compelling content on tech advancements',
backstory="""You are a renowned Tech Content Strategist, known for your insightful
and engaging articles on technology and innovation. With a deep understanding of
the tech industry, you transform complex concepts into compelling narratives.""",
verbose=True,
allow_delegation=False,
llm=ollama_openhermes
)
research_task = Task(
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
Identify key trends, breakthrough technologies, and potential industry impacts.
Compile your findings in a detailed report. Your final answer MUST be a full analysis report""",
agent=researcher
)
writer_task = Task(
description="""Using the insights from the researcher's report, develop an engaging blog
post that highlights the most significant AI advancements.
Your post should be informative yet accessible, catering to a tech-savvy audience.
Aim for a narrative that captures the essence of these breakthroughs and their
implications for the future. Your final answer MUST be a full blog post of at least 500 words.""",
agent=writer
)
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writer_task],
verbose=2,
process=Process.sequential
)
result = crew.kickoff()
print("######################")
print(result) | [] |
2024-01-10 | davidlevy247/ChatGPT_SummarizePDF | pdf_summarizer.py | import os
import openai
import re
import base64
import getpass
import sys
import logging
from PyPDF2 import PdfReader
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from datetime import datetime
from colorama import Fore, Style
# Initialize logging
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
CONFIG_FILE = 'config.txt'
def get_encryption_key():
logging.info('Getting encryption key.')
password = getpass.getpass("Enter your password: ").encode() # Get the password for encryption
salt = b'\x00'*16 # Just for simplicity we use static salt
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
)
logging.info('Encryption key obtained.')
return base64.urlsafe_b64encode(kdf.derive(password))
def encrypt_data(data, key):
logging.info('Encrypting data.')
f = Fernet(key)
logging.info('Data encrypted.')
return f.encrypt(data)
def decrypt_data(data, key):
logging.info('Decrypting data.')
f = Fernet(key)
logging.info('Data decrypted.')
return f.decrypt(data)
def load_config(config_file, encryption_key=None):
logging.info('Loading configuration file.')
if os.path.exists(config_file):
with open(config_file, 'rb') as f:
data = f.read()
if encryption_key is not None:
try:
data = decrypt_data(data, encryption_key)
except:
logging.error('Error occurred while decrypting data.')
return None, None
logging.info('Configuration file loaded.')
return data.decode().split('\n')
logging.error('Configuration file not found.')
return None, None
def save_config(config_file, api_key, prompt, encryption_key=None):
data = f"{api_key}\n{prompt}"
if encryption_key is not None:
data = encrypt_data(data.encode(), encryption_key)
else:
data = data.encode()
with open(config_file, 'wb') as f:
f.write(data)
def is_encrypted(config_file):
with open(config_file, 'rb') as f:
first_line = f.readline().strip()
return not re.match(r'sk-\w+', first_line.decode(errors='ignore'))
if os.path.exists(CONFIG_FILE):
attempts = 3
while attempts > 0:
# Only ask for a password if the file is encrypted.
if is_encrypted(CONFIG_FILE):
encryption_key = get_encryption_key()
else:
encryption_key = None
api_key, prompt = load_config(CONFIG_FILE, encryption_key)
if api_key is not None and prompt is not None:
break # Successful decryption
print(f"{Fore.RED}Incorrect password.{Style.RESET_ALL} {Fore.GREEN}Remaining Attempts:{Style.RESET_ALL} {Fore.RED}{attempts-1}{Style.RESET_ALL}")
attempts -= 1
if attempts == 0:
if input(f"{Fore.RED}Unable to decrypt the configuration file.{Style.RESET_ALL} {Fore.GREEN}Would you like to create a new one?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']:
api_key = input(f"{Fore.GREEN}Enter your OpenAI API key: {Style.RESET_ALL}")
sys.stdout.write(Fore.RED + "Enter your prompt to ask ChatGPT for your desired results per page. " + Style.RESET_ALL)
sys.stdout.write("For example, 'Please summarize the following single page from a PDF book into coherent easy to understand paragraphs without indentations or early line breaks; sometimes a single page may be impossible to summarize into one to three paragraphs, so when that happens report what the problem is with the page:'\n")
prompt = input(f"{Fore.GREEN}Enter your prompt: {Style.RESET_ALL}")
use_encryption = input(f"{Fore.GREEN}Would you like to use encryption for the config file?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']
encryption_key = get_encryption_key() if use_encryption else None
else:
print("Exiting program.")
exit(1)
else: # Proceed with the existing decrypted config
if input(f"{Fore.GREEN}Would you like to change the API key?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']:
api_key = input("{Fore.GREEN}Enter your new OpenAI API key:{Style.RESET_ALL} ")
if input(f"{Fore.GREEN}Would you like to change the prompt? Current prompt:{Style.RESET_ALL} '{prompt}' [y/N]: ").strip().lower() in ['y', 'yes']:
sys.stdout.write(Fore.RED + "Your prompt to ask ChatGPT for your desired results per page should be carefully written. " + Style.RESET_ALL)
sys.stdout.write("For example, 'Please summarize the following single page from a PDF book into coherent easy to understand paragraphs without indentations or early line breaks; sometimes a single page may be impossible to summarize into one to three paragraphs, so when that happens report what the problem is with the page:'\n")
prompt = input(f"{Fore.GREEN}Enter your prompt: {Style.RESET_ALL}")
if input(f"{Fore.GREEN}Would you like to change the encryption status of the config file?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']:
use_encryption = input(f"{Fore.GREEN}Would you like to use encryption for the config file?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']
encryption_key = get_encryption_key() if use_encryption else None
os.rename(CONFIG_FILE, f"{CONFIG_FILE}.{datetime.now().strftime('%Y%m%d%H%M%S')}.bak")
else: # config.txt does not exist
sys.stdout.write(Fore.GREEN + "No configuration file found. Let's create a new one." + Style.RESET_ALL + "\n")
api_key = input("Enter your OpenAI API key: ")
sys.stdout.write(Fore.RED + "Enter your prompt to ask ChatGPT for your desired results per page. " + Style.RESET_ALL)
sys.stdout.write("For example, 'Please summarize the following single page from a PDF book into coherent easy to understand paragraphs without indentations or early line breaks; sometimes a single page may be impossible to summarize into one to three paragraphs, so when that happens report what the problem is with the page:'\n")
prompt = input(f"{Fore.GREEN}Enter your prompt: {Style.RESET_ALL}")
use_encryption = input(f"{Fore.GREEN}Would you like to use encryption for the config file?{Style.RESET_ALL} [y/N]: ").strip().lower() in ['y', 'yes']
encryption_key = get_encryption_key() if use_encryption else None
# Save new configuration
save_config(CONFIG_FILE, api_key, prompt, encryption_key)
# Set up OpenAI API key
openai.api_key = api_key
#how many pages back to include
ROLLING_WINDOW_SIZE = 2
# max characters the API will accept
API_CHARACTER_LIMIT = 4096
#calculate prompt length and have that available to account for truncation
PROMPT_LENGTH = len(prompt)
def get_page_text(pages, start_page, end_page):
text = ''
for page in pages[start_page:end_page+1]:
page_text = page.extract_text()
if page_text:
text += page_text + "\n"
return text
def truncate_text(text):
if len(text) > API_CHARACTER_LIMIT - PROMPT_LENGTH:
text = text[-(API_CHARACTER_LIMIT - PROMPT_LENGTH):] # Ensure text + prompt doesn't exceed API character limit
return text
def summarize_text(text):
# Check if the text is empty or consists only of whitespace
if not text or text.isspace():
return None
# Use OpenAI API to summarize text
text = truncate_text(text) # Truncate text if it's too long
response = openai.Completion.create(
engine="text-davinci-002",
prompt=f"{prompt}\n\n{text}",
max_tokens=100,
n=1,
stop=None,
temperature=0.5,
)
summary = response.choices[0].text.strip()
return summary
def is_text_suitable_for_summarization(text):
# Check if the text contains at least some alphabetical characters
return bool(re.search(r'[A-Za-z]+', text))
def main():
# Open file selection dialog
Tk().withdraw()
pdf_file_path = askopenfilename(filetypes=[("PDF files", "*.pdf")])
if not pdf_file_path:
print("No file selected. Exiting.")
return
# Read PDF file
pdf_reader = PdfReader(pdf_file_path)
# Get total number of pages
total_pages = len(pdf_reader.pages)
# Create output text file with the same name as the source PDF
input_file_name = os.path.splitext(os.path.basename(pdf_file_path))[0]
output_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), f"{input_file_name}_summary.txt")
with open(output_file_path, 'w', encoding='utf-8') as output_file:
print("Starting summarization process...")
for page_num in range(total_pages):
# Show the page currently being processed and the total number of pages
print(f"{Fore.GREEN}Processing page {page_num+1} of {total_pages}...{Style.RESET_ALL}")
start_page = max(0, page_num - 2) # we take the current page and the 2 previous ones
page_text = get_page_text(pdf_reader.pages, start_page, page_num)
page_text = truncate_text(page_text)
if not page_text or not is_text_suitable_for_summarization(page_text):
print(f"{Fore.RED}Page {page_num+1}: Unable to extract suitable text for summarization. Skipping.{Style.RESET_ALL}")
output_file.write(f"Page {page_num+1}: Unable to extract suitable text for summarization. Skipping.\n\n")
continue
summary = summarize_text(page_text)
if summary and is_text_suitable_for_summarization(summary):
print(f"Page {page_num+1} summary: {summary}")
output_file.write(f"Page {page_num+1} Summary:\n{summary}\n\n")
else:
print(f"Page {page_num+1}: Failed to generate a suitable summary.")
output_file.write(f"Page {page_num+1}: Failed to generate a suitable summary.\n\n")
output_file.flush()
os.fsync(output_file.fileno())
print("Summarization process completed.")
print(f"Summarization complete. Results saved to: {output_file_path}")
if __name__ == "__main__":
main()
| [
"1",
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | prairiewolf11/PaddleSpeech | paddlespeech~s2t~models~whisper~whipser.py | # MIT License, Copyright (c) 2022 OpenAI.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Modified from OpenAI Whisper 2022 (https://github.com/openai/whisper/whisper)
import os
from dataclasses import dataclass
from dataclasses import field
from functools import lru_cache
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlespeech.s2t.modules.align as paddlespeech_nn
import soundfile
import tqdm
from paddle import nn
from paddle.distribution import Categorical
from paddlespeech.s2t.models.whisper import utils
from paddlespeech.s2t.models.whisper.tokenizer import get_tokenizer
from paddlespeech.s2t.models.whisper.tokenizer import LANGUAGES
from paddlespeech.s2t.models.whisper.tokenizer import Tokenizer
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
_MODELS = ["large"]
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = utils.exact_div(
N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
class LayerNorm(paddlespeech_nn.LayerNorm):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class Linear(paddlespeech_nn.Linear):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return F.linear(x, self.weight, None
if self.bias is None else self.bias)
class Conv1d(paddlespeech_nn.Conv1D):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
return super().forward(x)
class MultiHeadAttention(nn.Layer):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = Linear(n_state, n_state, bias_attr=True)
self.key = Linear(n_state, n_state, bias_attr=False)
self.value = Linear(n_state, n_state, bias_attr=True)
self.out = Linear(n_state, n_state, bias_attr=True)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
q = self.query(x)
if kv_cache is None or xa is None or self.key not in kv_cache:
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
# otherwise, perform key/value projections for self- or cross-attention as usual.
k = self.key(x if xa is None else xa)
v = self.value(x if xa is None else xa)
else:
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
k = kv_cache[self.key]
v = kv_cache[self.value]
wv = self.qkv_attention(q, k, v, mask)
return self.out(wv)
def qkv_attention(self,
q: paddle.Tensor,
k: paddle.Tensor,
v: paddle.Tensor,
mask: Optional[paddle.Tensor]=None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head)**-0.25
q = paddle.transpose(
q.view(*q.shape[:2], self.n_head, -1), (0, 2, 1, 3)) * scale
k = paddle.transpose(
k.view(*k.shape[:2], self.n_head, -1), (0, 2, 3, 1)) * scale
v = paddle.transpose(
v.view(*v.shape[:2], self.n_head, -1), (0, 2, 1, 3))
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
w = F.softmax(qk.float(), axis=-1).to(q.dtype)
return paddle.transpose((w @ v), (0, 2, 1, 3)).flatten(start_axis=2)
class ResidualAttentionBlock(nn.Layer):
def __init__(self, n_state: int, n_head: int, cross_attention: bool=False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = MultiHeadAttention(
n_state, n_head) if cross_attention else None
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp = nn.Sequential(
Linear(n_state, n_mlp, bias_attr=True),
nn.GELU(), Linear(n_mlp, n_state, bias_attr=True))
self.mlp_ln = LayerNorm(n_state)
def forward(
self,
x: paddle.Tensor,
xa: Optional[paddle.Tensor]=None,
mask: Optional[paddle.Tensor]=None,
kv_cache: Optional[dict]=None, ):
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)
if self.cross_attn:
x = x + self.cross_attn(
self.cross_attn_ln(x), xa, kv_cache=kv_cache)
x = x + self.mlp(self.mlp_ln(x))
return x
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = paddle.exp(-log_timescale_increment * paddle.arange(
channels // 2, dtype=paddle.float32))
scaled_time = paddle.arange(
length,
dtype=paddle.float32)[:, np.newaxis] * inv_timescales[np.newaxis, :]
return paddle.to_tensor(
paddle.concat(
[paddle.sin(scaled_time), paddle.cos(scaled_time)], axis=1))
class AudioEncoder(nn.Layer):
def __init__(self,
n_mels: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.conv1 = Conv1d(
n_mels, n_state, kernel_size=3, stride=1, padding=1, bias_attr=True)
self.conv2 = Conv1d(
n_state,
n_state,
kernel_size=3,
stride=2,
padding=1,
bias_attr=True)
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList(
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)])
self.ln_post = LayerNorm(n_state)
def forward(self, x: paddle.Tensor):
"""
x : paddle.Tensor, shape = (batch_size, n_mels, n_ctx)
the mel spectrogram of the audio
"""
x = F.gelu(self.conv1(x))
x = F.gelu(self.conv2(x))
x = paddle.transpose(x, (0, 2, 1))
assert x.shape[
1:] == self.positional_embedding.shape, "incorrect audio shape"
x = (x + self.positional_embedding)
for block in self.blocks:
x = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Layer):
def __init__(self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = paddle.create_parameter(
shape=[n_ctx, n_state], dtype='float32')
self.blocks: Iterable[ResidualAttentionBlock] = nn.LayerList([
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
])
self.ln = LayerNorm(n_state)
mask = paddle.full(
shape=[n_ctx, n_state], fill_value=-np.inf, dtype='float32')
mask = paddle.triu(mask, diagonal=1)
self.register_buffer("mask", mask, persistable=False)
def forward(self,
x: paddle.Tensor,
xa: paddle.Tensor,
kv_cache: Optional[dict]=None):
"""
x : paddle.LongTensor, shape = (batch_size, <= n_ctx)
the text tokens
xa : paddle.Tensor, shape = (batch_size, n_mels, n_audio_ctx)
the encoded audio features to be attended on
"""
offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
x = self.token_embedding(x) + self.positional_embedding[offset:offset +
x.shape[-1]]
x = x.to(xa.dtype)
for block in self.blocks:
x = block(x, xa, mask=self.mask, kv_cache=kv_cache)
x = self.ln(x)
logits = (x @ paddle.transpose(self.token_embedding.weight, (1, 0)))
return logits
@dataclass(frozen=True)
class DecodingOptions:
task: str = "transcribe" # whether to perform X->X "transcribe" or X->English "translate"
language: Optional[
str] = None # language that the audio is in; uses detected language if None
# sampling-related options
temperature: float = 0.0
sample_len: Optional[int] = None # maximum number of tokens to sample
best_of: Optional[
int] = None # number of independent samples to collect, when t > 0
beam_size: Optional[
int] = None # number of beams in beam search, when t == 0
patience: Optional[
float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)
# options for ranking generations (either beams or best-of-N samples)
length_penalty: Optional[
float] = None # "alpha" in Google NMT, None defaults to length norm
# prompt, prefix, and token suppression
prompt: Optional[Union[str, List[
int]]] = None # text or tokens for the previous context
prefix: Optional[Union[str, List[
int]]] = None # text or tokens to prefix the current context
suppress_blank: bool = True # this will suppress blank outputs
# list of tokens ids (or comma-separated token ids) to suppress
# "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
# timestamp sampling options
without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
max_initial_timestamp: Optional[
float] = 1.0 # the initial timestamp cannot be later than this
# implementation details
fp16: bool = False # use fp16 for most of the calculation
@dataclass(frozen=True)
class DecodingResult:
audio_features: paddle.Tensor
language: str
language_probs: Optional[Dict[str, float]] = None
tokens: List[int] = field(default_factory=list)
text: str = ""
avg_logprob: float = np.nan
no_speech_prob: float = np.nan
temperature: float = np.nan
compression_ratio: float = np.nan
class Inference:
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
"""Perform a forward pass on the decoder and return per-token logits"""
raise NotImplementedError
def rearrange_kv_cache(self, source_indices) -> None:
"""Update the key-value cache according to the updated beams"""
raise NotImplementedError
def cleanup_caching(self) -> None:
"""Clean up any resources or hooks after decoding is finished"""
pass
class WhisperInference(Inference):
def __init__(self, model: "Whisper", initial_token_length: int):
self.model: "Whisper" = model
self.initial_token_length = initial_token_length
self.kv_cache = {}
self.hooks = []
def logits(self, tokens: paddle.Tensor,
audio_features: paddle.Tensor) -> paddle.Tensor:
if not self.kv_cache:
self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()
if tokens.shape[-1] > self.initial_token_length:
# only need to use the last token except in the first forward pass
tokens = tokens[:, -1:]
return self.model.decoder(
tokens, audio_features, kv_cache=self.kv_cache)
def cleanup_caching(self):
for hook in self.hooks:
hook.remove()
self.kv_cache = {}
self.hooks = []
def rearrange_kv_cache(self, source_indices):
for module, tensor in self.kv_cache.items():
# update the key/value cache to contain the selected sequences
self.kv_cache[module] = tensor[source_indices].detach()
@paddle.no_grad()
def detect_language(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
tokenizer: Tokenizer=None) -> Tuple[paddle.Tensor, List[dict]]:
"""
Detect the spoken language in the audio, and return them as list of strings, along with the ids
of the most probable language tokens and the probability distribution over all language tokens.
This is performed outside the main decode loop in order to not interfere with kv-caching.
Returns
-------
language_tokens : Tensor, shape = (batch_size,)
ids of the most probable language tokens, which appears after the startoftranscript token.
language_probs : List[Dict[str, float]], length = batch_size
list of dictionaries containing the probability distribution over all languages.
"""
if tokenizer is None:
tokenizer = get_tokenizer(
model.is_multilingual, resource_path=resource_path)
if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
raise ValueError(
"This model doesn't have language tokens so it can't perform lang id"
)
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
# skip encoder forward pass if already-encoded audio features were given
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
mel = model.encoder(mel)
# forward pass using a single token, startoftranscript
batch_size = mel.shape[0]
x = paddle.to_tensor([[tokenizer.sot]] * batch_size) # [batch_size, 1]
logits = model.logits(x, mel)[:, 0]
# collect detected languages; suppress all non-language tokens
mask = paddle.ones(paddle.to_tensor(logits.shape[-1]), dtype=bool)
mask[list(tokenizer.all_language_tokens)] = False
logits[:, mask] = -np.inf
language_tokens = paddle.argmax(logits, axis=-1)
language_token_probs = F.softmax(logits, axis=-1)
language_probs = [{
c: language_token_probs[i, j].tolist()
for j, c in zip(tokenizer.all_language_tokens,
tokenizer.all_language_codes)
} for i in range(batch_size)]
if single:
language_tokens = language_tokens[0]
language_probs = language_probs[0]
return language_tokens, language_probs
def transcribe(
model: "Whisper",
mel: paddle.Tensor,
resource_path: str,
*,
verbose: Optional[bool]=None,
temperature: Union[float, Tuple[float, ...]]=(0.0, 0.2, 0.4, 0.6, 0.8,
1.0),
compression_ratio_threshold: Optional[float]=2.4,
logprob_threshold: Optional[float]=-1.0,
no_speech_threshold: Optional[float]=0.6,
condition_on_previous_text: bool=True,
**decode_options, ):
"""
Transcribe an audio file using Whisper
Parameters
----------
model: Whisper
The Whisper model instance
mel: paddle.Tensor
The audio feature
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successfully used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = np.float32 #paddle only support float32
if dtype == np.float32:
decode_options["fp16"] = False
if decode_options.get(
"language") == 'None' or decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language"
)
segment = pad_or_trim(mel, N_FRAMES)
_, probs = model.detect_language(segment, resource_path)
decode_options["language"] = max(probs, key=probs.get)
if verbose is not None:
print(
f"Detected language: {LANGUAGES[decode_options['language']].title()}"
)
language = decode_options["language"]
task = decode_options.get("task", "transcribe")
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=task)
def decode_with_fallback(segment: paddle.Tensor) -> DecodingResult:
temperatures = [temperature] if isinstance(temperature, (
int, float)) else temperature
decode_result = None
for t in temperatures:
kwargs = {**decode_options}
if t > 0:
# disable beam_size and patience when t > 0
kwargs.pop("beam_size", None)
kwargs.pop("patience", None)
else:
# disable best_of when t == 0
kwargs.pop("best_of", None)
options = DecodingOptions(**kwargs, temperature=t)
decode_result = model.decode(segment, options, resource_path)
needs_fallback = False
if compression_ratio_threshold is not None and decode_result.compression_ratio > compression_ratio_threshold:
needs_fallback = True # too repetitive
if logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold:
needs_fallback = True # average log probability is too low
if not needs_fallback:
break
return decode_result
seek = 0
input_stride = utils.exact_div(
N_FRAMES, model.dims.n_audio_ctx) # mel frames per output token: 2
time_precision = (input_stride * HOP_LENGTH /
SAMPLE_RATE) # time per output token: 0.02 (seconds)
all_tokens = []
all_segments = []
prompt_reset_since = 0
initial_prompt = decode_options.pop("initial_prompt", None) or []
if initial_prompt:
initial_prompt = tokenizer.encode(" " +
initial_prompt.strip()).input_ids
all_tokens.extend(initial_prompt)
def add_segment(*,
start: float,
end: float,
text_tokens: paddle.Tensor,
result: DecodingResult):
text = tokenizer.decode(
[token for token in text_tokens if token < tokenizer.eot])
if len(text.strip()) == 0: # skip empty text output
return
all_segments.append({
"id": len(all_segments),
"seek": seek,
"start": start,
"end": end,
"text": text,
"tokens": result.tokens,
"temperature": result.temperature,
"avg_logprob": result.avg_logprob,
"compression_ratio": result.compression_ratio,
"no_speech_prob": result.no_speech_prob,
})
if verbose:
print(
f"[{utils.format_timestamp(start)} --> {utils.format_timestamp(end)}] {text}"
)
# show the progress bar when verbose is False (otherwise the transcribed text will be printed)
num_frames = mel.shape[-1]
previous_seek_value = seek
with tqdm.tqdm(
total=num_frames, unit='frames',
disable=verbose is not False) as pbar:
while seek < num_frames:
timestamp_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
segment = pad_or_trim(mel[:, seek:], N_FRAMES)
segment_duration = segment.shape[-1] * HOP_LENGTH / SAMPLE_RATE
decode_options["prompt"] = all_tokens[prompt_reset_since:]
result: DecodingResult = decode_with_fallback(segment)
tokens = paddle.to_tensor(result.tokens)
if no_speech_threshold is not None:
# no voice activity check
should_skip = result.no_speech_prob > no_speech_threshold
if logprob_threshold is not None and result.avg_logprob > logprob_threshold:
# don't skip if the logprob is high enough, despite the no_speech_prob
should_skip = False
if should_skip:
seek += segment.shape[
-1] # fast-forward to the next segment boundary
continue
timestamp_tokens: paddle.Tensor = tokens.greater_equal(
paddle.to_tensor(tokenizer.timestamp_begin))
consecutive = paddle.where(timestamp_tokens[:-1] & timestamp_tokens[
1:])[0]
if len(
consecutive
) > 0: # if the output contains two consecutive timestamp tokens
consecutive = paddle.add(consecutive, paddle.to_tensor(1))
last_slice = 0
for current_slice in consecutive:
sliced_tokens = tokens[last_slice:current_slice]
start_timestamp_position = (
sliced_tokens[0].item() - tokenizer.timestamp_begin)
end_timestamp_position = (
sliced_tokens[-1].item() - tokenizer.timestamp_begin)
add_segment(
start=timestamp_offset + start_timestamp_position *
time_precision,
end=timestamp_offset + end_timestamp_position *
time_precision,
text_tokens=sliced_tokens[1:-1],
result=result, )
last_slice = current_slice
last_timestamp_position = (
tokens[last_slice - 1].item() - tokenizer.timestamp_begin)
seek += last_timestamp_position * input_stride
all_tokens.extend(tokens[:last_slice + 1].tolist())
else:
duration = segment_duration
timestamps = tokens[timestamp_tokens.nonzero().flatten()]
if len(timestamps) > 0 and timestamps[
-1].item() != tokenizer.timestamp_begin:
# no consecutive timestamps but it has a timestamp; use the last one.
# single timestamp at the end means no speech after the last timestamp.
last_timestamp_position = timestamps[
-1].item() - tokenizer.timestamp_begin
duration = last_timestamp_position * time_precision
add_segment(
start=timestamp_offset,
end=timestamp_offset + duration,
text_tokens=tokens,
result=result, )
seek += segment.shape[-1]
all_tokens.extend(tokens.tolist())
if not condition_on_previous_text or result.temperature > 0.5:
# do not feed the prompt tokens if a high temperature was used
prompt_reset_since = len(all_tokens)
# update progress bar
pbar.update(min(num_frames, seek) - previous_seek_value)
previous_seek_value = seek
return dict(
text=tokenizer.decode(all_tokens[len(initial_prompt):]),
segments=all_segments,
language=language)
class SequenceRanker:
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]) -> List[int]:
"""
Given a list of groups of samples and their cumulative log probabilities,
return the indices of the samples in each group to select as the final result
"""
raise NotImplementedError
class MaximumLikelihoodRanker(SequenceRanker):
"""
Select the sample with the highest log probabilities, penalized using either
a simple length normalization or Google NMT paper's length penalty
"""
def __init__(self, length_penalty: Optional[float]):
self.length_penalty = length_penalty
def rank(self,
tokens: List[List[paddle.Tensor]],
sum_logprobs: List[List[float]]):
def scores(logprobs, lengths):
result = []
for logprob, length in zip(logprobs, lengths):
if self.length_penalty is None:
penalty = length
else:
# from the Google NMT paper
penalty = ((5 + length) / 6)**self.length_penalty
result.append(logprob / penalty)
return result
# get the sequence with the highest score
lengths = [[len(t) for t in s] for s in tokens]
return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]
class TokenDecoder:
def reset(self):
"""Initialize any stateful variables for decoding a new sequence"""
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
"""Specify how to select the next token, based on the current trace and logits
Parameters
----------
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
sum_logprobs : Tensor, shape = (n_batch)
cumulative log probabilities for each sequence
Returns
-------
tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
the tokens, appended with the selected next token
completed : bool
True if all sequences has reached the end of text
"""
raise NotImplementedError
def finalize(
self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor
) -> Tuple[Sequence[Sequence[paddle.Tensor]], List[List[float]]]:
"""Finalize search and return the final candidate sequences
Parameters
----------
tokens : Tensor, shape = (batch_size, beam_size, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence
sum_logprobs : Tensor, shape = (batch_size, beam_size)
cumulative log probabilities for each sequence
Returns
-------
tokens : Sequence[Sequence[Tensor]], length = batch_size
sequence of Tensors containing candidate token sequences, for each audio input
sum_logprobs : List[List[float]], length = batch_size
sequence of cumulative log probabilities corresponding to the above
"""
raise NotImplementedError
class GreedyDecoder(TokenDecoder):
def __init__(self, temperature: float, eot: int):
self.temperature = temperature
self.eot = eot
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
temperature = self.temperature
if temperature == 0:
next_tokens = paddle.argmax(logits, axis=-1)
else:
next_tokens = Categorical(logits=logits / temperature).sample([1])
next_tokens = paddle.reshape(next_tokens, [
next_tokens.shape[0] * next_tokens.shape[1],
])
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
current_logprobs = logprobs[paddle.arange(logprobs.shape[0]),
next_tokens]
sum_logprobs += current_logprobs * paddle.to_tensor(
(tokens[:, -1] != self.eot), dtype=paddle.float32)
next_tokens[tokens[:, -1] == self.eot] = self.eot
tokens = paddle.concat([tokens, next_tokens[:, None]], axis=-1)
completed = paddle.all((tokens[:, -1] == self.eot))
return tokens, completed
def finalize(self, tokens: paddle.Tensor, sum_logprobs: paddle.Tensor):
# make sure each sequence has at least one EOT token at the end
tokens = F.pad(tokens, (0, 1), value=self.eot, data_format="NCL")
return tokens, sum_logprobs.tolist()
class BeamSearchDecoder(TokenDecoder):
def __init__(self,
beam_size: int,
eot: int,
inference: Inference,
patience: Optional[float]=None):
self.beam_size = beam_size
self.eot = eot
self.inference = inference
self.patience = patience or 1.0
self.max_candidates: int = round(beam_size * self.patience)
self.finished_sequences = None
assert self.max_candidates > 0, f"Invalid beam size ({beam_size}) or patience ({patience})"
def reset(self):
self.finished_sequences = None
def update(self,
tokens: paddle.Tensor,
logits: paddle.Tensor,
sum_logprobs: paddle.Tensor) -> Tuple[paddle.Tensor, bool]:
if tokens.shape[0] % self.beam_size != 0:
raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
batch_size = tokens.shape[0] // self.beam_size
if self.finished_sequences is None: # for the first update
self.finished_sequences = [{} for _ in range(batch_size)]
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
next_tokens, source_indices, finished_sequences = [], [], []
for i in range(batch_size):
scores, sources, finished = {}, {}, {}
# STEP 1: calculate the cumulative log probabilities for possible candidates
for j in range(self.beam_size):
idx = i * self.beam_size + j
prefix = tokens[idx].tolist()
logprob, token = paddle.topk(
logprobs[idx], k=self.beam_size + 1)
for logprob, token in zip(logprob, token):
new_logprob = (sum_logprobs[idx] + logprob).tolist()[0]
sequence = tuple(prefix + [token.tolist()[0]])
scores[sequence] = new_logprob
sources[sequence] = idx
# STEP 2: rank the candidates and keep the top beam_size sequences for each audio
saved = 0
for sequence in sorted(scores, key=scores.get, reverse=True):
if sequence[-1] == self.eot:
finished[sequence] = scores[sequence]
else:
sum_logprobs[len(next_tokens)] = scores[sequence]
next_tokens.append(sequence)
source_indices.append(sources[sequence])
saved += 1
if saved == self.beam_size:
break
finished_sequences.append(finished)
tokens = paddle.to_tensor(next_tokens)
self.inference.rearrange_kv_cache(source_indices)
# add newly finished sequences to self.finished_sequences
assert len(self.finished_sequences) == len(finished_sequences)
for previously_finished, newly_finished in zip(self.finished_sequences,
finished_sequences):
for seq in sorted(
newly_finished, key=newly_finished.get, reverse=True):
if len(previously_finished) >= self.max_candidates:
break # the candidate list is full
previously_finished[seq] = newly_finished[seq]
# mark as completed if all audio has enough number of samples
completed = all(
len(sequences) >= self.max_candidates
for sequences in self.finished_sequences)
return tokens, completed
def finalize(self,
preceding_tokens: paddle.Tensor,
sum_logprobs: paddle.Tensor):
# collect all finished sequences, including patience, and add unfinished ones if not enough
sum_logprobs = sum_logprobs.cpu()
for i, sequences in enumerate(self.finished_sequences):
if len(sequences
) < self.beam_size: # when not enough sequences are finished
for j in list(np.argsort(sum_logprobs[i]))[::-1]:
sequence = preceding_tokens[i, j].tolist() + [self.eot]
sequences[tuple(sequence)] = sum_logprobs[i][j].item()
if len(sequences) >= self.beam_size:
break
tokens: List[List[paddle.Tensor]] = [
[paddle.to_tensor(seq) for seq in sequences.keys()]
for sequences in self.finished_sequences
]
sum_logprobs: List[List[float]] = [
list(sequences.values()) for sequences in self.finished_sequences
]
return tokens, sum_logprobs
class LogitFilter:
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor) -> None:
"""Apply any filtering or masking to logits in-place
Parameters
----------
logits : Tensor, shape = (n_batch, vocab_size)
per-token logits of the probability distribution at the current step
tokens : Tensor, shape = (n_batch, current_sequence_length)
all tokens in the context so far, including the prefix and sot_sequence tokens
"""
raise NotImplementedError
class SuppressBlank(LogitFilter):
def __init__(self, tokenizer: Tokenizer, sample_begin: int):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
if tokens.shape[1] == self.sample_begin:
logits[:, self.tokenizer.encode(" ").input_ids +
[self.tokenizer.eot]] = -np.inf
class SuppressTokens(LogitFilter):
def __init__(self, suppress_tokens: Sequence[int]):
self.suppress_tokens = list(suppress_tokens)
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
logits[:, self.suppress_tokens] = -np.inf
class ApplyTimestampRules(LogitFilter):
def __init__(self,
tokenizer: Tokenizer,
sample_begin: int,
max_initial_timestamp_index: Optional[int]):
self.tokenizer = tokenizer
self.sample_begin = sample_begin
self.max_initial_timestamp_index = max_initial_timestamp_index
def apply(self, logits: paddle.Tensor, tokens: paddle.Tensor):
# suppress <|notimestamps|> which is handled by without_timestamps
if self.tokenizer.no_timestamps is not None:
logits[:, self.tokenizer.no_timestamps] = -np.inf
# timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
for k in range(tokens.shape[0]):
seq = [t for t in tokens[k, self.sample_begin:].tolist()]
last_was_timestamp = len(seq) >= 1 and seq[
-1] >= self.tokenizer.timestamp_begin
penultimate_was_timestamp = len(seq) < 2 or seq[
-2] >= self.tokenizer.timestamp_begin
if last_was_timestamp:
if penultimate_was_timestamp: # has to be non-timestamp
logits[k, self.tokenizer.timestamp_begin:] = -np.inf
else: # cannot be normal text tokens
logits[k, :self.tokenizer.eot] = -np.inf
# apply the `max_initial_timestamp` option
if tokens.shape[
1] == self.sample_begin and self.max_initial_timestamp_index is not None:
last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
logits[:, last_allowed + 1:] = -np.inf
# if sum of probability over timestamps is above any other token, sample timestamp
logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32)
for k in range(tokens.shape[0]):
timestamp_logprob = paddle.logsumexp(
logprobs[k, self.tokenizer.timestamp_begin:], axis=-1)
max_text_token_logprob = paddle.max(
logprobs[k, :self.tokenizer.timestamp_begin])
if timestamp_logprob > max_text_token_logprob:
logits[k, :self.tokenizer.timestamp_begin] = -np.inf
class DecodingTask:
inference: Inference
sequence_ranker: SequenceRanker
decoder: TokenDecoder
logit_filters: List[LogitFilter]
def __init__(self,
model: "Whisper",
options: DecodingOptions,
resource_path: str):
self.model = model
language = options.language or "en"
tokenizer = get_tokenizer(
model.is_multilingual,
resource_path=resource_path,
language=language,
task=options.task)
self.tokenizer: Tokenizer = tokenizer
self.options: DecodingOptions = self._verify_options(options)
self.resource_path: str = resource_path
self.beam_size: int = options.beam_size or options.best_of or 1
self.n_ctx: int = model.dims.n_text_ctx
self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
self.sot_sequence: Tuple[int] = tokenizer.sot_sequence
if self.options.without_timestamps:
self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
self.initial_tokens: Tuple[int] = self._get_initial_tokens()
self.sample_begin: int = len(self.initial_tokens)
self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
# inference: implements the forward pass through the decoder, including kv caching
self.inference = WhisperInference(model, len(self.initial_tokens))
# sequence ranker: implements how to rank a group of sampled sequences
self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)
# decoder: implements how to select the next tokens, given the autoregressive distribution
if options.beam_size is not None:
self.decoder = BeamSearchDecoder(options.beam_size, tokenizer.eot,
self.inference, options.patience)
else:
self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
# logit filters: applies various rules to suppress or penalize certain tokens
self.logit_filters = []
if self.options.suppress_blank:
self.logit_filters.append(
SuppressBlank(self.tokenizer, self.sample_begin))
if self.options.suppress_tokens:
self.logit_filters.append(
SuppressTokens(self._get_suppress_tokens()))
if not options.without_timestamps:
precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds
max_initial_timestamp_index = None
if options.max_initial_timestamp:
max_initial_timestamp_index = round(
self.options.max_initial_timestamp / precision)
self.logit_filters.append(
ApplyTimestampRules(tokenizer, self.sample_begin,
max_initial_timestamp_index))
def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
if options.beam_size is not None and options.best_of is not None:
raise ValueError("beam_size and best_of can't be given together")
if options.temperature == 0:
if options.best_of is not None:
raise ValueError(
"best_of with greedy sampling (T=0) is not compatible")
if options.patience is not None and options.beam_size is None:
raise ValueError("patience requires beam_size to be given")
if options.length_penalty is not None and not (
0 <= options.length_penalty <= 1):
raise ValueError(
"length_penalty (alpha) should be a value between 0 and 1")
return options
def _get_initial_tokens(self) -> Tuple[int]:
tokens = list(self.sot_sequence)
prefix = self.options.prefix
prompt = self.options.prompt
if prefix:
prefix_tokens = (
self.tokenizer.encode(" " + prefix.strip().input_ids)
if isinstance(prefix, str) else prefix)
if self.sample_len is not None:
max_prefix_len = self.n_ctx // 2 - self.sample_len
prefix_tokens = prefix_tokens[-max_prefix_len:]
tokens = tokens + prefix_tokens
if prompt:
prompt_tokens = (
self.tokenizer.encode(" " + prompt.strip().input_ids)
if isinstance(prompt, str) else prompt)
tokens = [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2
- 1):] + tokens
return tuple(tokens)
def _get_suppress_tokens(self) -> Tuple[int]:
suppress_tokens = self.options.suppress_tokens
if isinstance(suppress_tokens, str):
suppress_tokens = [int(t) for t in suppress_tokens.split(",")]
if -1 in suppress_tokens:
suppress_tokens = [t for t in suppress_tokens if t >= 0]
suppress_tokens.extend(self.tokenizer.non_speech_tokens)
elif suppress_tokens is None or len(suppress_tokens) == 0:
suppress_tokens = [] # interpret empty string as an empty list
else:
assert isinstance(suppress_tokens,
list), "suppress_tokens must be a list"
suppress_tokens.extend([
self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm
])
if self.tokenizer.no_speech is not None:
# no-speech probability is collected separately
suppress_tokens.append(self.tokenizer.no_speech)
return tuple(sorted(set(suppress_tokens)))
def _get_audio_features(self, mel: paddle.Tensor):
#if self.options.fp16:
# mel = mel.half()
if mel.shape[-2:] == (self.model.dims.n_audio_ctx,
self.model.dims.n_audio_state):
# encoded audio features are given; skip audio encoding
audio_features = mel
else:
audio_features = self.model.encoder(mel)
#if audio_features.dtype != (np.float16 if self.options.fp16 else np.float32):
# return TypeError(f"audio_features has an incorrect dtype: {audio_features.dtype}")
return audio_features
def _detect_language(self,
audio_features: paddle.Tensor,
tokens: paddle.Tensor,
resource_path: str):
languages = [self.options.language] * audio_features.shape[0]
lang_probs = None
if self.options.language is None or self.options.task == "lang_id":
lang_tokens, lang_probs = self.model.detect_language(
audio_features, self.tokenizer, self.resource_path)
languages = [max(probs, key=probs.get) for probs in lang_probs]
if self.options.language is None:
tokens[:, self.sot_index +
1] = lang_tokens # write language tokens
return languages, lang_probs
def _main_loop(self, audio_features: paddle.Tensor, tokens: paddle.Tensor):
assert audio_features.shape[0] == tokens.shape[0]
n_batch = tokens.shape[0]
sum_logprobs: paddle.Tensor = paddle.zeros(
paddle.to_tensor(n_batch), dtype=paddle.float32)
no_speech_probs = [np.nan] * n_batch
try:
for i in range(self.sample_len):
logits = self.inference.logits(tokens, audio_features)
if i == 0 and self.tokenizer.no_speech is not None: # save no_speech_probs
probs_at_sot = F.softmax(
logits[:, self.sot_index],
axis=-1,
dtype=paddle.float32)
no_speech_probs = probs_at_sot[:, self.tokenizer.
no_speech].tolist()
# now we need to consider the logits at the last token only
logits = logits[:, -1]
# apply the logit filters, e.g. for suppressing or applying penalty to
for logit_filter in self.logit_filters:
logit_filter.apply(logits, tokens)
# expand the tokens tensor with the selected next tokens
tokens, completed = self.decoder.update(tokens, logits,
sum_logprobs)
if completed or tokens.shape[-1] > self.n_ctx:
break
finally:
self.inference.cleanup_caching()
return tokens, sum_logprobs, no_speech_probs
@paddle.no_grad()
def run(self, mel: paddle.Tensor) -> List[DecodingResult]:
self.decoder.reset()
tokenizer: Tokenizer = self.tokenizer
batch_size: int = mel.shape[0]
audio_features: paddle.Tensor = self._get_audio_features(
mel) # encoder forward pass
tokens: paddle.Tensor
if batch_size > 1:
for i in range(batch_size):
tokens = paddle.concat(
x=[
paddle.to_tensor([self.initial_tokens]),
paddle.to_tensor([self.initial_tokens])
],
axis=0)
elif batch_size == 1:
tokens = paddle.to_tensor([self.initial_tokens])
# detect language if requested, overwriting the language token
languages, language_probs = self._detect_language(
paddle.to_tensor(audio_features),
paddle.to_tensor(tokens), self.resource_path)
if self.options.task == "lang_id":
return [
DecodingResult(
audio_features=features,
language=language,
language_probs=probs) for features, language, probs in
zip(audio_features, languages, language_probs)
]
# repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
audio_features = paddle.repeat_interleave(
audio_features, self.beam_size, axis=0)
tokens = paddle.repeat_interleave(tokens, self.beam_size, axis=0)
# call the main sampling loop
tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features,
tokens)
# reshape the tensors to have (batch_size, beam_size) as the first two dimensions
audio_features = audio_features[::self.beam_size]
no_speech_probs = no_speech_probs[::self.beam_size]
assert audio_features.shape[0] == len(no_speech_probs) == batch_size
tokens = tokens.reshape([batch_size, self.beam_size, -1])
sum_logprobs = sum_logprobs.reshape([batch_size, self.beam_size])
# get the final candidates for each group, and slice between the first sampled token and EOT
tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
tokens: List[List[paddle.Tensor]] = [[
t[self.sample_begin:(t == tokenizer.eot).nonzero()[0, 0]] for t in s
] for s in tokens]
# select the top-ranked sample in each group
selected = self.sequence_ranker.rank(tokens, sum_logprobs)
tokens: List[List[
int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
sum_logprobs: List[
float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
avg_logprobs: List[
float] = [lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)]
fields = (texts, languages, tokens, audio_features, avg_logprobs,
no_speech_probs)
if len(set(map(len, fields))) != 1:
raise RuntimeError(
f"inconsistent result lengths: {list(map(len, fields))}")
return [
DecodingResult(
audio_features=features,
language=language,
tokens=tokens,
text=text,
avg_logprob=avg_logprob,
no_speech_prob=no_speech_prob,
temperature=self.options.temperature,
compression_ratio=utils.compression_ratio(text), )
for text, language, tokens, features, avg_logprob, no_speech_prob in
zip(*fields)
]
@paddle.no_grad()
def decode(
model: "Whisper",
mel: paddle.Tensor,
options: DecodingOptions=DecodingOptions(),
resource_path=str, ) -> Union[DecodingResult, List[DecodingResult]]:
"""
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
Parameters
----------
model: Whisper
the Whisper model instance
mel: paddle.Tensor, shape = (80, 3000) or (*, 80, 3000)
A tensor containing the Mel spectrogram(s)
options: DecodingOptions
A dataclass that contains all necessary options for decoding 30-second segments
Returns
-------
result: Union[DecodingResult, List[DecodingResult]]
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
"""
single = mel.ndim == 2
if single:
mel = mel.unsqueeze(0)
result = DecodingTask(model, options, resource_path).run(mel)
if single:
result = result[0]
return result
class Whisper(nn.Layer):
def __init__(self, dims: ModelDimensions):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer, )
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer, )
def embed_audio(self, mel: paddle.Tensor):
return self.encoder.forward(mel)
def logits(self, tokens: paddle.Tensor, audio_features: paddle.Tensor):
return self.decoder.forward(tokens, audio_features)
def forward(self, mel: paddle.Tensor,
tokens: paddle.Tensor) -> Dict[str, paddle.Tensor]:
return self.decoder(tokens, self.encoder(mel))
@property
def device(self):
return paddle.device.get_device()
@property
def is_multilingual(self):
return self.dims.n_vocab == 51865
def install_kv_cache_hooks(self, cache: Optional[dict]=None):
"""
The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value
tensors calculated for the previous positions. This method returns a dictionary that stores
all caches, and the necessary hooks for the key and value projection modules that save the
intermediate tensors to be reused during later calculations.
Returns
-------
cache : Dict[nn.Layer, paddle.Tensor]
A dictionary object mapping the key/value projection modules to its cache
hooks : List[RemovableHandle]
List of PyTorch RemovableHandle objects to stop the hooks to be called
"""
cache = {**cache} if cache is not None else {}
hooks = []
def save_to_cache(module, _, output):
if module not in cache or output.shape[
1] > self.decoder.positional_embedding.shape[0]:
cache[
module] = output # save as-is, for the first token or cross attention
else:
cache[module] = paddle.concat(
[cache[module], output], axis=1).detach()
return cache[module]
def install_hooks(layer: nn.Layer):
if isinstance(layer, MultiHeadAttention):
hooks.append(
layer.key.register_forward_post_hook(save_to_cache))
hooks.append(
layer.value.register_forward_post_hook(save_to_cache))
self.decoder.apply(install_hooks)
return cache, hooks
detect_language = detect_language
transcribe = transcribe
decode = decode
def pad_or_trim(array, length: int=N_SAMPLES, *, axis: int=-1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if paddle.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(axis=axis, index=paddle.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = F.pad(
array, [pad for sizes in pad_widths[::-1] for pad in sizes],
data_format='NLC')
array = paddle.transpose(array, (1, 0))
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = paddle.transpose(array, (1, 0))
array = np.pad(array, pad_widths)
array = paddle.transpose(array, (1, 0))
return array
def hann_window(n_fft: int=N_FFT):
"""
hanning window
n_fft: The number of frequency components of the discrete Fourier transform.
"""
return paddle.to_tensor(
[0.5 - 0.5 * np.cos(2 * np.pi * n / n_fft) for n in range(n_fft)],
dtype=paddle.float32)
@lru_cache(maxsize=None)
def mel_filters(resource_path: str, n_mels: int=N_MELS) -> paddle.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(resource_path, "assets", "mel_filters.npz")) as f:
return paddle.to_tensor(f[f"mel_{n_mels}"])
def log_mel_spectrogram(audio: Union[str, np.ndarray, paddle.Tensor],
n_mels: int=N_MELS,
resource_path: str=None):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, paddle.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
paddle.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not paddle.is_tensor(audio):
if isinstance(audio, str):
audio, _ = soundfile.read(audio, dtype="float32", always_2d=True)
audio = audio[:, 0]
logger.info(f"audio shape: {audio.shape}")
audio = paddle.to_tensor(audio)
window = hann_window(N_FFT)
stft = paddle.signal.stft(audio, N_FFT, HOP_LENGTH, window=window)
magnitudes = stft[:, :-1].abs()**2
filters = mel_filters(resource_path, n_mels)
mel_spec = filters @ magnitudes
mel_spec = paddle.to_tensor(mel_spec.numpy().tolist())
log_spec = paddle.clip(mel_spec, min=1e-10).log10()
log_spec = paddle.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
| [
"0",
"1",
"None",
" ",
"initial_prompt"
] |
2024-01-10 | OllieBullGB/DNLML | backend~DAML.py | import os
import pandas as pd
import numpy as np
import openai
import pickle
import uuid
import ast
from dotenv import load_dotenv
from sklearn.preprocessing import PowerTransformer, LabelEncoder
from sklearn.model_selection import train_test_split as tt_split
from sklearn.metrics import accuracy_score, mean_squared_error as mse
from sklearn.svm import LinearSVC, SVC, SVR
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import SGDClassifier, Ridge
from sklearn.kernel_approximation import RBFSampler
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class DAML:
def __init__(self, df, task, verbose = 0, forceTask=None):
self.df = df
if forceTask is None:
self.task, self.target_column = self.process_NLP_task(task)
if verbose == 1:
print("TASK:", self.task)
print("TARGET COLUMN:", self.target_column)
else:
self.task, self.target_column = forceTask
self.models = []
self.verbose = verbose
# find target column based off the task.targetColumn after NLP processing
self.clean()
# Split data into 60% training, 20% validation, 20% test.
# Ensure that data is the same across all models for accurate comparison
X = self.df.drop(self.target_column, axis=1)
y = self.df[self.target_column]
X_temp, self.X_test, y_temp, self.y_test = tt_split(X, y,test_size=0.2, random_state=42)
self.X_train, self.X_val, self.y_train, self.y_val = tt_split(X_temp, y_temp, test_size = 0.25, random_state=42)
self.selected_model = None
self.model()
# sort the models by accuracy desc
reverse_order = self.task != 'regression'
self.models.sort(key=lambda x: x[1], reverse=reverse_order)
# pick the type of the best model to be selected_model
self.selected_model = self.models[0][0].__class__.__name__
self.selected_model_url = None
# dump best model
if len(self.models) > 0:
domain_name = "https://python-nlai-service.onrender.com/"
model_name = str(uuid.uuid4()) + "model.sav"
pickle.dump(self.models[0][0], open(f'./models/{model_name}', 'wb'))
self.selected_model_url = domain_name + "models/" + model_name
def process_NLP_task(self, task, processor="open-ai"):
column_string = ', '.join(list(self.df.columns))
query = f"{task}. The columns for my dataset are {column_string}"
message = "You are a natural language processor that can talk exclusively in JSON. You are tasked with analysing a query for a dataset. For example 'I want to classify more results based on an income of >=50k usd. The columns for my dataset are 'age','profession','nationality','income'' would return {'task': 'classification (labelled)', 'targetColumn':'income}'. Target column must exist within the given set of columns, task must be one of 'classification (unlabelled) or 'regression'. Your task is: " + query
if processor == "open-ai":
messages = [{"role":"user", "content": message}]
chat = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages = messages)
reply = chat.choices[0].message.content
response = ast.literal_eval(reply)
return (response['task'], response['targetColumn'])
else:
return ("classification (labelled)", "price_range")
def clean(self):
# eliminate NaN values
# ->> drop records/columns
# ->> impute values
# normalise values
# encode categorical data
# deal with missing values
self.df.dropna(axis=0, inplace=True)
# encode categorical data
cols = self.df.columns
numerical_cols = self.df._get_numeric_data().columns
categorical_cols = list(set(cols) - set(numerical_cols))
for col_name in categorical_cols:
le = LabelEncoder()
self.df[col_name] = le.fit_transform(self.df[col_name])
# normalise value
transformer = PowerTransformer(method='yeo-johnson')
X_cols = list(self.df.columns)
X_cols.remove(self.target_column)
for col in X_cols:
self.df[col] = transformer.fit_transform(self.df[col].to_numpy().reshape(-1, 1))
def model(self, big_dataset_size=100000):
if self.task == 'classification (labelled)':
print('classification (labelled)')
# <100k instances
# yes:
# --> LinearSVC, KNeighbors, SVC, Ensemble Classifiers
# no:
# --> SGD Classifier, Kernel Approximation
num_records = self.df.shape[0]
if num_records < big_dataset_size:
print("< 100k")
self.init_linearSVC()
self.init_knn()
self.init_SVC()
self.init_RandomForestClassifier()
# Start a LinearSVC, KNeighbors, SVC, Ensemble Classifier
else:
print("> 100k")
self.init_SGDClassifier()
# Start an SGD Classifier, Kernel Approximation.
elif self.task == 'regression':
print('regression')
num_records = self.df.shape[0]
if num_records < big_dataset_size:
print("< 100k")
self.init_SVR(kernel='linear')
self.init_SVR(kernel='rbf')
self.init_RandomForestRegressor()
# Ridge, linearSVR, rbfSVR, Ensemble Regressors
else:
print("> 100k")
def init_linearSVC(self):
model = LinearSVC(random_state=0, max_iter=2000)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test )
accuracy = accuracy_score(self.y_test, y_pred)
self.models.append((model, accuracy))
def init_knn(self, improvement_threshold=0.03, not_improved_number=2):
best_model = None
best_accuracy = 0
not_improved = 0
i = 1
while not_improved < not_improved_number:
not_improved += 1
if self.verbose == 1:
print(f"KNN: trying {i} neighbors, currently at acc {best_accuracy}")
model = KNeighborsClassifier(n_neighbors=i)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_val)
accuracy = accuracy_score(self.y_val, y_pred)
if accuracy > best_accuracy + improvement_threshold:
best_model = model
best_accuracy = accuracy
not_improved = 0
i+=2
y_pred = best_model.predict(self.X_test)
accuracy = accuracy_score(self.y_test, y_pred)
self.models.append((best_model, accuracy))
def init_SVC(self):
model = SVC(gamma='auto', max_iter=2000)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test)
accuracy = accuracy_score(self.y_test, y_pred)
self.models.append((model, accuracy))
def init_RandomForestClassifier(self, improvement_threshold=0.03, not_improved_number=2):
best_model = None
best_accuracy = 0
not_improved = 0
i = 1
while not_improved < not_improved_number:
not_improved += 1
if self.verbose:
print(f"RFC: trying depth {i}, currently at acc {best_accuracy}")
model = RandomForestClassifier(max_depth=i, random_state=0)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_val)
accuracy = accuracy_score(self.y_val, y_pred)
if accuracy > best_accuracy + improvement_threshold:
best_model = model
best_accuracy = accuracy
not_improved = 0
i += 2
y_pred = best_model.predict(self.X_test)
accuracy = accuracy_score(self.y_test, y_pred)
self.models.append((model, accuracy))
def init_SGDClassifier(self):
model = SGDClassifier(max_iter=2000)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test)
accuracy = accuracy_score(self.y_test, y_pred)
self.models.append((model, accuracy))
def init_SVR(self, kernel='linear'):
model = SVR(kernel=kernel)
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test)
cost = mse(self.y_test, y_pred)
self.models.append((model, cost))
def init_RandomForestRegressor(self, improvement_threshold=0.5, not_improved_number=2):
best_model = None
best_cost = float('inf')
not_improved = 0
i = 1
while not_improved < not_improved_number:
not_improved += 1
if self.verbose:
print(f"RFR: trying depth {i}, currently at acc {best_cost}")
model = RandomForestRegressor(max_depth=i, criterion="squared_error")
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_val)
cost = mse(self.y_val, y_pred)
if cost + improvement_threshold < best_cost:
best_model = model
best_cost = cost
not_improved = 0
i += 2
y_pred = best_model.predict(self.X_test)
cost = mse(self.y_test, y_pred)
self.models.append((best_model, cost))
| [] |
2024-01-10 | DARPA-ASKEM/mit-service | src~connect.py | import locale
import copy
from collections import OrderedDict
import json
import os
import re
import time
from typing import Any, List
import pandas as pd
import openai
from openai import OpenAIError
from tqdm import tqdm
import ast
import sys
from util import *
from gpt_interaction import *
from mira_dkg_interface import *
import os
import gpt_key
# Check if the environment variable exists and use it if it does
GPT_KEY = os.environ.get("GPT_KEY", getattr(gpt_key, 'GPT_KEY', None))
# from automates.program_analysis.JSON2GroMEt.json2gromet import json_to_gromet
# from automates.gromet.query import query
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
def index_text_path(text_path: str) -> str:
fw = open(text_path + "_idx", "w")
with open(text_path) as fp:
for i, line in enumerate(fp):
fw.write('%d\t%s' % (i, line))
fw.close()
return text_path + "_idx"
def index_text(text: str) -> str:
idx_text = ""
tlist = text.splitlines()
# print(tlist)
for i, line in enumerate(tlist):
if i==len(tlist)-1 and line== "":
break
idx_text = idx_text + ('%d\t%s\n' % (i, line))
return idx_text
def get_gpt_match(prompt, key, model="text-davinci-002"):
# mykey = b'Z1QFxceGL_s6karbgfNFyuOdQ__m5TfHR7kuLPJChgs='
# enc = b'gAAAAABjRh0iNbsVb6_DKSHPmlg3jc4svMDEmKuYd-DcoTxEbESYI9F8tm8anjbsTsZYHz_avZudJDBdOXSHYZqKmhdoBcJd919hCffSMg6WFYP12hpvI7EeNppGFNoZsLGnDM5d6AOUeRVeIc2FbmB_j0vvcIwuEQ=='
# fernet = Fernet(mykey)
# openai.api_key = fernet.decrypt(enc).decode()
openai.api_key = key
response = openai.Completion.create(model=model, prompt=prompt, temperature=0.0, max_tokens=256)
result = response.choices[0].text.strip()
# print(result)
return result
def get_gpt4_match(prompt, key, model="gpt-4"):
openai.api_key = key
completion = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": prompt}], temperature=0.0)
result = completion.choices[0].message.content
# print(result)
return result
def read_text_from_file(text_path):
text_file = open(text_path, "r")
prompt = text_file.read()
return prompt
# Get gpt-3 prompt with arizona-extraction, ontology terms and match targets
def get_prompt(vars, terms, target):
text_file = open("prompts/prompt.txt", "r")
prompt = text_file.read()
text_file.close()
vstr = ''
vlen = len(vars)
i = 1;
for v in vars:
vstr += str(i) + " (" + str(v[1]) + ", " + str(v[2]) + ")\n"
i += 1;
# print(vstr)
tstr = '[' + ', '.join(terms) + ']'
tlen = len(terms)
# print(tstr)
prompt = prompt.replace("[VAR]", vstr)
prompt = prompt.replace("[VLEN]", str(vlen))
prompt = prompt.replace("[TERMS]", tstr)
prompt = prompt.replace("[TLEN]", str(tlen))
prompt = prompt.replace("[TARGET]", target)
# print(prompt)
return prompt
# Get gpt-3 prompt with formula, code terms and match formula targets
def get_code_formula_prompt(code, formula, target):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/code_formula_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[CODE]", code)
prompt = prompt.replace("[FORMULA]", formula)
prompt = prompt.replace("[TARGET]", target)
# print(prompt)
return prompt
# Get gpt-3 prompt with variable and datasets, and match variable target with the best data columns
def get_var_dataset_prompt(vars, dataset, target):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/var_dataset_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[DESC]", vars)
prompt = prompt.replace("[DATASET]", dataset)
prompt = prompt.replace("[TARGET]", target)
# print(prompt)
return prompt
def get_var_dataset_prompt_simplified(vars, dataset):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/var_dataset_prompt_simplified.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[DESC]", vars)
prompt = prompt.replace("[DATASET]", dataset)
# print(prompt)
return prompt
def get_rank_dkg_prompt(target, set):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/rank_dkg_terms.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[TARGET]", target)
prompt = prompt.replace("[SET]", set)
# print(prompt)
return prompt
#
def get_mit_arizona_var_prompt(mit, arizona):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/mit_arizona_var_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[MIT]", mit)
prompt = prompt.replace("[ARIZONA]", arizona)
# print(prompt)
return prompt
# Get gpt-3 prompt with formula, and match variable targets
def get_var_formula_prompt(desc, var):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/var_formula_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[DESC]", desc)
prompt = prompt.replace("[TARGET]", var)
# print(prompt)
return prompt
# Get gpt-3 prompt with formula, and match variable targets
def get_formula_var_prompt(formula):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/formula_var_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[FORMULA]", formula)
# print(prompt)
return prompt
def get_all_desc_formula_prompt(all_dsec, latex_var):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/all_desc_formula_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[DESC]", all_dsec)
prompt = prompt.replace("[TARGET]", latex_var)
return prompt
# Get gpt-3 prompt with formula, code terms and match formula targets
def get_code_text_prompt(code, text, target):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/code_text_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[CODE]", code)
prompt = prompt.replace("[TEXT]", text)
prompt = prompt.replace("[TARGET]", target)
# print(prompt)
return prompt
def get_text_param_prompt(text):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/text_param_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[TEXT]", text)
return prompt
def get_text_var_prompt(text):
text_file = open(os.path.join(os.path.dirname(__file__), 'prompts/text_var_prompt.txt'), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[TEXT]", text)
return prompt
# Get gpt-3 prompt with code, dataset and match function targets
def get_code_dataset_prompt(code, dataset, target):
text_file = open(os.path.join(os.path.dirname(__file__), "prompts/code_dataset_prompt.txt"), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[CODE]", code)
prompt = prompt.replace("[DATASET]", dataset)
prompt = prompt.replace("[TARGET]", target)
# print(prompt)
return prompt
def get_csv_doc_prompt(schema, stats, doc, dataset_name, doc_name):
text_file = open(os.path.join(os.path.dirname(__file__), "prompts/dataset_profiling_prompt.txt"), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[SCHEMA]", schema)
prompt = prompt.replace("[STATS]", json.dumps(stats))
prompt = prompt.replace("[DOC]", doc)
prompt = prompt.replace("[DATASET_NAME]", dataset_name)
prompt = prompt.replace("[DOC_NAME]", doc_name)
# print(prompt)
return prompt
def get_data_card_prompt(fields, doc, dataset_name, doc_name):
with open(os.path.join(os.path.dirname(__file__), "prompts/data_card_prompt.txt"), "r") as text_file:
prompt = text_file.read()
fields_str = '\n'.join([f"{f[0]}: {f[1]}" for f in fields])
prompt = prompt.replace("[FIELDS]", fields_str)
prompt = prompt.replace("[DOC]", doc)
prompt = prompt.replace("[DATASET_NAME]", dataset_name)
prompt = prompt.replace("[DOC_NAME]", doc_name)
return prompt
def get_model_card_prompt(fields, text, code):
with open(os.path.join(os.path.dirname(__file__), "prompts/model_card_prompt.txt"), "r") as text_file:
prompt = text_file.read()
fields_str = '\n'.join([f"{f[0]}: {f[1]}" for f in fields])
prompt = prompt.replace("[FIELDS]", fields_str)
prompt = prompt.replace("[TEXT]", text)
prompt = prompt.replace("[CODE]", code)
return prompt
def get_text_column_prompt(text, column):
text_file = open(os.path.join(os.path.dirname(__file__), "prompts/text_column_prompt.txt"), "r")
prompt = text_file.read()
text_file.close()
prompt = prompt.replace("[TEXT]", text)
prompt = prompt.replace("[COLUMN]", column)
return prompt
def get_variables(path):
list = []
with open(path) as myFile:
for num, line in enumerate(myFile, 1):
match = re.match(r'\s*(\S+)\s*=\s*([-+]?(?:\d*\.\d+|\d+))\s*', line)
if match:
para = match.group(1)
val = match.group(2)
# print(num, ",", para, ",", val)
list.append((num, para, val))
print("Extracted arizona-extraction: ", list)
return list
def match_code_targets(targets, code_path, terms):
vars = get_variables(code_path)
vdict = {}
connection = []
for idx, v in enumerate(vars):
vdict[v[1]] = idx
for t in targets:
val = get_match(vars, terms, t)
connection.append((t, {val: "grometSubObject"}, float(vars[vdict[val]][2]), vars[vdict[val]][0]))
return connection
def ontology_code_connection():
terms = ['population', 'doubling time', 'recovery time', 'infectious time']
code = "model/SIR/CHIME_SIR_while_loop.py"
targets = ['population', 'infectious time']
val = []
try:
val = match_code_targets(targets, code, terms)
except OpenAIError as err:
print("OpenAI connection error:", err)
print("Using hard-coded connections")
val = [("infectious time", {"name": "grometSubObject"}, 14.0, 67),
("population", {"name": "grometSubObject"}, 1000, 80)]
print(val)
def code_text_connection(code, text, gpt_key, interactive = False):
code_str = code
idx_text = index_text(text)
tlist = text.splitlines()
targets = extract_func_names(code_str)
print(f"TARGETS: {targets}")
tups = []
try:
for t in targets:
prompt = get_code_text_prompt(code_str, idx_text, t)
match = get_gpt_match(prompt, gpt_key)
ilist = extract_ints(match)
ret_s = select_text(tlist, int(ilist[0]), int(ilist[-1]), 1, interactive)
if interactive:
print("Best description for python function {} is in lines {}-{}:".format(t, ilist[0], ilist[-1]))
print(ret_s)
print("---------------------------------------")
else:
tups.append((t, int(ilist[0]), int(ilist[-1]), ret_s))
return tups, True
except OpenAIError as err:
if interactive:
print("OpenAI connection error:", err)
else:
return f"OpenAI connection error: {err}", False
def code_dataset_connection(code, schema, gpt_key, interactive=False):
targets = extract_func_names(code)
tups = []
try:
for t in targets:
prompt = get_code_dataset_prompt(code, schema, t)
match = get_gpt_match(prompt, gpt_key)
returnable = match.split("function are the ")[1].split(" columns.")[0].split(" and ")
if interactive:
print(returnable)
print("---------------------------------------")
else:
tups.append((t, returnable))
return tups, True
except OpenAIError as err:
if interactive:
print("OpenAI connection error:", err)
else:
return f"OpenAI connection error: {err}",False
def text_column_connection(text, column, gpt_key):
try:
prompt = get_text_column_prompt(text, column)
match = get_gpt_match(prompt, gpt_key, model="text-davinci-003")
return match, True
except OpenAIError as err:
return f"OpenAI connection error: {err}",False
def rank_dkg_terms(target, concepts, gpt_key):
"""
Rank the concepts by their similarity to the target
:param target: Target concept json
:param concepts: List of candidate concepts in json
:return: List of ranked concepts
"""
prompt = get_rank_dkg_prompt(json.dumps(target), json.dumps(concepts))
match = get_gpt4_match(prompt, gpt_key, model="gpt-4")
rank = match.splitlines()
sorted = sort_dkg(rank, concepts)
return sorted, True
def sort_dkg(ranking, json_obj):
# Create a dictionary with the ranking as keys and the corresponding JSON objects as values
ranking_dict = {item[0]: item for item in json_obj if item[0] in ranking}
# Sort the dictionary based on the ranking list
sorted_dict = {k: ranking_dict[k] for k in ranking if k in ranking_dict}
# Convert the sorted dictionary back to a list of lists
sorted_json = list(sorted_dict.values())
return sorted_json
async def profile_matrix(data: List[List[Any]], doc, dataset_name, doc_name, gpt_key='', smart=False):
"""
Grounding a matrix of data to DKG terms
"""
if not data:
return f"Empty dataset input", False
if not all(all(isinstance(x, (int, float)) for x in row) for row in data):
return f"Matrix data must be all-numeric. Data was: {data}", False
# for matrices, we compute statistics across the entire matrix
df = pd.DataFrame(data)
df = df.stack()
stats = {
"mean": df.mean(),
"std": df.std(),
"min": df.min(),
"max": df.max(),
"quantile_25": df.quantile(0.25),
"quantile_50": df.quantile(0.5),
"quantile_75": df.quantile(0.75),
"num_null_entries": int(df.isnull().sum()),
"type": "numeric",
}
return json.dumps({'matrix_stats': stats}), True
def dataset_header_dkg(cols, gpt_key=''):
"""
Grounding the column header to DKG terms
:param cols: List of column names
:return: Matches column name to DKG
"""
col_ant = {}
for col in cols:
print(f"looking up {col}")
results = []
results.extend(get_mira_dkg_term(col, ['id', 'name'],True))
# print(results)
seen = set()
for res in results:
if not res:
break
seen.add(res[0])
ans = get_gpt_match(f"What's the top 2 similar terms of \"{col}\" in epidemiology? Please list these terms separated by comma.", gpt_key, model="text-davinci-003")
print(f"relevant items found from GPT: {ans}")
for e in ans.split(","):
# print(e)
e = e.strip()
for res in get_mira_dkg_term(e, ['id', 'name', 'type'],True):
# print(res)
if not res:
break
if not res[0] in seen:
results.append(res)
seen.add(res[0])
col_ant[col] = results
return json.dumps(col_ant), True
from kgmatching import local_batch_get_mira_dkg_term
async def dataset_header_document_dkg(data, doc, dataset_name, doc_name, gpt_key='', smart=False):
"""
Grounding a dataset to a DKG
:param data: Dataset as a list of lists, including header and optionally a few rows
:param doc: Document string
:param dataset_name: Name of dataset
:param doc_name: Name of document
:param gpt_key: OpenAI API key
:return: Matches column name to DKG
"""
if not data:
return f"Empty dataset input", False
header = data[0]
data = data[1:]
schema = ','.join(header)
print("Getting stats")
stats = await _compute_tabular_statistics(data, header=header)
col_ant = OrderedDict()
prompt = get_csv_doc_prompt(schema, stats, doc, dataset_name, doc_name)
match = get_gpt4_match(prompt, gpt_key, model="gpt-4")
print("Got match")
print(match)
match = match.split('```')
if len(match) == 1:
match = match[0]
else:
match = match[1]
results = [s.strip() for s in match.splitlines()]
results = [s for s in results if s]
results = [[x.strip() for x in s.split("|")] for s in results]
results = [x for x in results if len(x) == 4]
if len(results) != len(header):
return f"Got different number of results ({len(results)}) than columns ({len(header)})", False
for res, col in zip(results, header):
col_ant[col] = {}
col_ant[col]["col_name"] = res[0]
col_ant[col]["concept"] = res[1]
col_ant[col]["unit"] = res[2]
col_ant[col]["description"] = res[3]
col_names = list(col_ant.keys())
col_concepts = [col_ant[col]["concept"] for col in col_ant]
col_descriptions = [col_ant[col]["description"] for col in col_ant]
terms = [ f'{col_name}: {col_concept} {col_description}' for (col_name, col_concept, col_description) in zip(col_names, col_concepts, col_descriptions) ]
matches0 = local_batch_get_mira_dkg_term(terms)
matches = [[[res['id'], res['name'], res['type']] for res in batch] for batch in matches0]
# # line up coroutines
# ops = [abatch_get_mira_dkg_term(col_names, ['id', 'name', 'type'], True),
# abatch_get_mira_dkg_term(col_concepts, ['id', 'name', 'type'], True),
# ]
# # let them all finish
# name_results, concept_results = await asyncio.gather(*ops)
for col, match in zip(col_names, matches):
col_ant[col]["dkg_groundings"] = match
if smart:
target = copy.deepcopy(col_ant[col])
del target["dkg_groundings"]
res=rank_dkg_terms(target, match, gpt_key)[0]
col_ant[col]["dkg_groundings"] = res
print(f"Smart grounding for {col}: {res}")
for col in col_ant:
col_ant[col]["column_stats"] = stats.get(col, {})
return json.dumps(col_ant), True
async def _compute_tabular_statistics(data: List[List[Any]], header):
"""
Compute summary statistics for a given tabular dataset.
:param data: Dataset as a list of lists
:return: Summary statistics as a dictionary
"""
csv_df = pd.DataFrame(data, columns=header)
null_counts = csv_df.isnull().sum(axis=0)
# first handle numeric columns
df = csv_df.describe()
df.drop('count', inplace=True) # we don't want the count row
# NaN and inf are not json serialiazable, so we replace them with strings
df.fillna('NaN', inplace=True)
df.replace(float('nan'), 'NaN', inplace=True) # better safe than sorry
df.replace(float('inf'), 'inf', inplace=True)
df.replace(float('-inf'), '-inf', inplace=True)
res = df.to_dict()
key_translations = {f"{x}%": f"quantile_{x}" for x in (25, 50, 75)}
for col in res.keys():
res[col]['type'] = 'numeric'
for k in list(res[col].keys()):
if k in key_translations:
res[col][key_translations[k]] = res[col].pop(k)
# try to infer date columns and convert them to datetime objects
date_cols = set()
df = csv_df.select_dtypes(include=['object'])
for col in df.columns:
try:
df[col] = pd.to_datetime(df[col])
date_cols.add(col)
except Exception:
continue
# then handle categorical columns, saving the top 10 most common values along with their counts
# (also do this for dates)
for col in df.columns:
res[col] = {'type': 'categorical'}
res[col]['most_common_entries'] = {}
# get top <=10 most common values along with their counts
counts = df[col].value_counts()
for i in range(min(10, len(counts))):
val = counts.index[i]
if col in date_cols:
val = val.isoformat()
res[col]['most_common_entries'][val] = int(counts[i])
# get number of unique entries
res[col]['num_unique_entries'] = len(df[col].unique())
if col in date_cols:
res[col]['type'] = 'date'
res[col]['earliest'] = df[col].min().isoformat()
res[col]['latest'] = df[col].max().isoformat()
for col in res:
res[col]['num_null_entries'] = int(null_counts[col])
# make sure all column indices are strings
res = {str(k): v for k, v in res.items()}
return res
async def construct_data_card(data_doc, dataset_name, doc_name, dataset_type, gpt_key='', model="gpt-3.5-turbo-16k"):
"""
Constructing a data card for a given dataset and its description.
:param data: Small dataset, including header and optionally a few rows
:param data_doc: Document string
:param gpt_key: OpenAI API key
:param model: OpenAI model to use
:return: Data card
"""
fields = [("DESCRIPTION", "Short description of the dataset (1-3 sentences)."),
("AUTHOR_NAME", "Name of publishing institution or author."),
("AUTHOR_EMAIL", "Email address for the author of this dataset."),
("DATE", "Date of publication of this dataset."),
("PROVENANCE", "Short (1 sentence) description of how the data was collected."),
("SENSITIVITY", "Is there any human-identifying information in the dataset?"),
("LICENSE", "License for this dataset."),
]
if dataset_type == 'no-header':
# also want GPT to fill in the schema
fields.append(("SCHEMA", "The dataset schema, as a comma-separated list of column names."))
elif dataset_type == 'matrix':
# instead of a schema, want to ask GPT to explain what a given (row, column) cell means
fields.append(("CELL_INTERPRETATION", "A brief description of what a given cell in the matrix represents (i.e. how to interpret the value at a given a row/column pair)."))
prompt = get_data_card_prompt(fields, data_doc, dataset_name, doc_name)
match = get_gpt4_match(prompt, gpt_key, model=model)
print(match)
results = OrderedDict([(f[0], "UNKNOWN") for f in fields])
for res in match.splitlines():
if res == "":
continue
res = res.split(":", 1)
if len(res) != 2:
continue
field, value = res
field = field.strip()
value = value.strip()
for f in fields:
if f[0] == field:
results[field] = value
break
return json.dumps(results), True
async def construct_model_card(text, code, gpt_key='', model="gpt-3.5-turbo-16k"):
"""
Constructing a data card for a given dataset and its description.
:param text: Model description (either model documentation or related paper)
:param code: Model code
:param gpt_key: OpenAI API key
:param model: OpenAI model to use
:return: Model card
"""
fields = [("DESCRIPTION", "Short description of the model (1 sentence)."),
("AUTHOR_INST", "Name of publishing institution."),
("AUTHOR_AUTHOR", "Name of author(s)."),
("AUTHOR_EMAIL", "Email address for the author of this model."),
("DATE", "Date of publication of this model."),
("SCHEMA", "Short description of the schema of inputs and outputs of the model (1 sentence)."),
("PROVENANCE", "Short description (1 sentence) of how the model was trained."),
("DATASET", "Short description (1 sentence) of what dataset was used to train the model."),
("COMPLEXITY", "The complexity of the model"),
("USAGE", "Short description (1 sentence) of the context in which the model should be used"),
("LICENSE", "License for this model."),
]
prompt = get_model_card_prompt(fields, text, code)
match = get_gpt4_match(prompt, gpt_key, model=model)
print(match)
results = OrderedDict([(f[0], "UNKNOWN") for f in fields])
for res in match.splitlines():
if res == "":
continue
res = res.split(":", 1)
if len(res) != 2:
continue
field, value = res
field = field.strip()
value = value.strip()
for f in fields:
if f[0] == field:
results[field] = value
break
return json.dumps(results), True
def select_text(lines, s, t, buffer, interactive=True):
ret_s = ""
start = s - buffer
end = t + buffer
if start < 0:
start = 0
if end >= len(lines):
end = len(lines) - 1
for i in range(start, end+1):
if i<=t and i>=s:
if interactive:
ret_s += ">>\t{}\t{}".format(i,lines[i])
else:
ret_s += lines[i]
elif interactive:
ret_s += "\t{}\t{}".format(i, lines[i])
return ret_s
def code_formula_connection(code, formulas, gpt_key):
flist = formulas.splitlines()
matches = []
if flist[-1]=="":
del flist[-1]
try:
for t in flist:
prompt = get_code_formula_prompt(code, formulas, t)
match = get_gpt_match(prompt, gpt_key)
matches.append([t, match.split(":")[1]])
return matches, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def represents_int(s):
try:
int(s)
except ValueError:
return False
else:
return True
def vars_dataset_connection_simplified(json_str, dataset_str, gpt_key):
json_list = ast.literal_eval(json_str)
var_list = list(filter(lambda x: x["type"] == "variable", json_list))
all_desc_ls = [(var['id'] + ", " + var['name'] + ": " + var['text_annotations'][0]) for var in var_list]
all_desc = '\n'.join(all_desc_ls)
vs_data = {}
try:
prompt = get_var_dataset_prompt_simplified(all_desc, dataset_str)
print(prompt)
ans = get_gpt4_match(prompt, gpt_key, model="gpt-3.5-turbo-16k")
ans = ans.splitlines()
print(ans)
for item in ans:
toks = item.split(": ")
if len(toks) < 2:
continue
vid = toks[0]
cols = []
for ds in toks[1].split(", "):
data_col = ds.split("___")
if len(data_col) < 2:
continue
cols.append([data_col[0], data_col[1]])
vs_data[vid] = cols
print(cols)
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
for item in json_list:
if item["type"] != "variable":
continue
id = item["id"]
item["data_annotations"] = vs_data[id]
new_json_str = json.dumps(json_list)
return new_json_str, True
def vars_dataset_connection(json_str, dataset_str, gpt_key):
json_list = ast.literal_eval(json_str)
var_list = list(filter(lambda x: x["type"] == "variable", json_list))
all_desc_ls = [(var['name']+": "+var['text_annotations'][0]) for var in var_list]
all_desc = '\n'.join(all_desc_ls)
vs_data = {}
dataset_s = ""
datasets = dataset_str.splitlines()
dataset_name_dict = {}
i = 0
for d in tqdm(datasets):
toks = d.split(":")
if len(toks) != 2:
continue
name, cols = toks
cols = cols.split(",")
dataset_name_dict[i] = name
for col in cols:
dataset_s += str(i) + "___" + col.strip() + "\n"
i += 1
try:
for i in tqdm(range(len(all_desc_ls))):
prompt = get_var_dataset_prompt(all_desc, dataset_s, all_desc_ls[i])
print(prompt)
ans = get_gpt4_match(prompt, gpt_key, model="gpt-3.5-turbo")
ans = ans.splitlines()
print(ans)
for j in range(len(ans)):
toks = ans[j].split("___")
# print(toks)
if len(toks) < 2 or not represents_int(toks[0]):
ans[j] = ""
else:
d_name = dataset_name_dict[int(toks[0])]
col_name = "___".join(toks[k] for k in range(1, len(toks)))
ans[j] = [d_name, col_name]
vs_data[var_list[i]['id']] = ans
time.sleep(5)
# print(f"assigned value {ans} to key {var_list[i]['id']}")
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
for item in json_list:
if item["type"] != "variable":
continue
id = item["id"]
item["data_annotations"] = vs_data[id]
# matches_str = ",".join(
# [("\"" + var + "\":[\"" + "\",\"".join([str(item) for item in vs_data[var]]) + "\"]") for var in
# vs_data])
# s = ", {\"type\":\"datasetmap\""+ \
# ", \"id\":\"d" + str(hash(matches_str) % ((sys.maxsize + 1) * 2)) + \
# "\", \"matches\": " + json.dumps(vs_data) + " }]"
# new_json_str = json_str[:-1] + s
new_json_str = json.dumps(json_list)
return new_json_str, True
def vars_formula_connection(json_str, formula, gpt_key):
json_list = ast.literal_eval(json_str)
var_list = list(filter(lambda x: x["type"] == "variable", json_list))
prompt = get_formula_var_prompt(formula)
latex_vars = get_gpt_match(prompt, gpt_key, model="text-davinci-003")
latex_vars = latex_vars.split(':')[1].split(',')
latex_var_set = {}
all_desc_ls = [var['name'] for var in var_list]
all_desc = '\n'.join(all_desc_ls)
try:
for latex_var in tqdm(latex_vars):
prompt = get_all_desc_formula_prompt(all_desc, latex_var)
ans = get_gpt_match(prompt, gpt_key, model="text-davinci-003")
ans = ans.splitlines()
matches = []
for a in ans:
if a in all_desc_ls:
a_idx = all_desc_ls.index(a)
matches.append(var_list[a_idx]['id'])
latex_var_set[latex_var] = matches
# for desc in tqdm(var_list):
# prompt = get_var_formula_prompt(desc["name"], latex_var)
# ans = get_gpt_match(prompt, gpt_key, model="text-davinci-003")
# ans = ans.split(':')[1]
# if ans == 'YES':
# current_matches = latex_var_set.get(latex_var, [])
# current_matches.append(desc["id"])
# latex_var_set[latex_var] = current_matches
matches_str = ",".join([("\"" + var + "\" : [\"" + "\",\"".join([str(item) for item in latex_var_set[var]]) + "\"]") for var in latex_var_set])
s = ", {\"type\":\"equation\", \"latex\":" + formula + \
", \"id\":\"e" + str(hash(formula)%((sys.maxsize + 1) * 2))+\
"\", \"matches\": {" + matches_str + "} }]"
new_json_str = json_str[:-1] + s
return new_json_str, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
DEFAULT_TERMS = ['population', 'doubling time', 'recovery time', 'infectious time']
DEFAULT_ATTRIBS = ['description', 'synonyms', 'xrefs', 'suggested_unit', 'suggested_data_type',
'physical_min', 'physical_max', 'typical_min', 'typical_max']
def code_dkg_connection(dkg_targets, gpt_key, ontology_terms=DEFAULT_TERMS, ontology_attribs=DEFAULT_ATTRIBS):
gromet_fn_module = json_to_gromet("gromet/CHIME_SIR_while_loop--Gromet-FN-auto.json")
nops = query.collect_named_output_ports(gromet_fn_module)
terms = list(build_local_ontology(ontology_terms, ontology_attribs).keys())
variables = set()
var_dict = {}
for nop in nops:
if nop[1] is not None:
variables.add(nop[0])
var_dict[nop[0]] = nop
vlist = []
for v in list(variables):
vlist.append((var_dict[v][2].to_dict()['line_begin'], v, var_dict[v][1].to_dict()['value']))
connection = []
for t in dkg_targets:
prompt = get_code_dkg_prompt(vlist, terms, t)
match = get_gpt_match(prompt, gpt_key)
val = match.split("(")[1].split(",")[0]
connection.append((t, {val: "grometSubObject"}, float(var_dict[val][1].to_dict()['value']),
var_dict[val][2].to_dict()['line_begin']))
print(connection)
return connection
def _is_numeric(s):
try:
locale.atof(s)
return True
except ValueError:
return False
def process_data(data: List[List[Any]]) -> List[List[Any]]:
"""
Convert all numeric values in a dataset to floats, casting empty strings to NaNs.
:param data: Dataset as a list of lists
:return: Dataset with all numeric values converted to floats
"""
def f(x):
if x == '':
return float('nan')
elif _is_numeric(x):
return locale.atof(x)
else:
return x
return [[f(x) for x in row] for row in data]
def get_dataset_type(first_row: List[Any]) -> str:
if all([_is_numeric(s) for s in first_row]):
return 'matrix'
elif any([_is_numeric(s) for s in first_row]):
return 'no-header'
else:
return 'header-0'
if __name__ == "__main__":
dkg_str = """{
"col_name": "dates",
"concept": "Time",
"unit": "YYYY-MM-DD",
"description": "The date when the data was recorded",
"dkg_groundings": [
[
"hp:0001147",
"Retinal exudate"
],
[
"hp:0030496",
"Macular exudate"
],
[
"ncit:C114947",
"Postterm Infant"
],
[
"oae:0006126",
"retinal exudates AE"
],
[
"oae:0008293",
"large for dates baby AE"
],
[
"pato:0000165",
"time",
"class"
],
[
"gfo:Time",
"time",
"class"
],
[
"geonames:2365173",
"Maritime",
"individual"
],
[
"wikidata:Q174728",
"centimeter",
"class"
],
[
"probonto:k0000056",
"nondecisionTime",
"class"
]
]
}"""
json_obj = json.loads(dkg_str)
target = copy.deepcopy(json_obj)
del target["dkg_groundings"]
print(rank_dkg_terms(target, json_obj["dkg_groundings"], GPT_KEY))
# code_dkg_connection("population", "") # GPT key
# vars = read_text_from_file('../demos/2023-03-19/mar_demo_intermediate.json')
# dataset = read_text_from_file('../resources/dataset/headers.txt')
# match, _ = vars_dataset_connection(vars, dataset, GPT_KEY)
# print(match)
#
# res, yes = dataset_header_document_dkg("""dates,VAX_count,day,sdm,events,I_1,I_2,I_3,Y_1,Y_2,Y_3,V_1,V_2,V_3,Infected,Y,V,logV""",
# """Using wastewater surveillance as a continuous pooled sampling technique has been in place in many countries since the early stages of the outbreak of COVID-19. Since the beginning of the outbreak, many research works have emerged, studying different aspects of *viral SARS-CoV-2 DNA concentrations* (viral load) in wastewater and its potential as an early warning method. However, one of the questions that has remained unanswered is the quantitative relation between viral load and clinical indicators such as daily cases, deaths, and hospitalizations. Few studies have tried to couple viral load data with an epidemiological model to relate the number of infections in the community to the viral burden. We propose a **stochastic wastewater-based SEIR model** to showcase the importance of viral load in the early detection and prediction of an outbreak in a community. We built three models based on whether or not they use the case count and viral load data and compared their *simulations* and *forecasting* quality. We consider a stochastic wastewater-based epidemiological model with four compartments (hidden states) of susceptible (S), exposed (E), infectious (I), and recovered/removed (R).dRxiv} } """,GPT_KEY)
# print(res)
# col = "people"
# ans = get_gpt_match(
# f"What's the top 2 similar terms of \"{col}\" in epidemiology? Please list these terms separated by comma.",
# GPT_KEY, model="text-davinci-003")
# print(ans)
# for latex_var in match:
# print(latex_var, match[latex_var])
# print('\n') | [
"[DOC_NAME]",
"[DATASET_NAME]"
] |
2024-01-10 | DARPA-ASKEM/mit-service | src~methods.py | import tiktoken
import asyncio
from typing import List
import openai ## already inited
## assumes
from openai import OpenAIError
import re
# def clean_spaces(text):
# text1 = re.sub(' +', ' ', text)
# text2 = re.sub('\n+', '\n', text1)
# return text2
def strip_latex_preamble(text):
start = text.find('\\begin{document}')
end_marker = '\\end{document}'
end = text.find(end_marker)
end_len = len(end_marker)
return text[start:end+end_len]
def split_into_chunks(text_tokens, max_chunk_size_tokens : int):
"""
split tokens into chunks of at most max size tokens
"""
token_splits = []
curr_pos = 0
while curr_pos < len(text_tokens):
split = text_tokens[curr_pos:curr_pos + max_chunk_size_tokens]
token_splits.append(split)
curr_pos += max_chunk_size_tokens
assert sum(token_splits, []) == text_tokens
for c in token_splits:
assert len(c) <= max_chunk_size_tokens
return token_splits
def create_prompt_tasks(prompt, document, model_name, answer_token_length=256, chunk_token_length=None):
max_context_length = g_context_lengths[model_name]
if chunk_token_length is None:
chunk_token_length = max_context_length - answer_token_length
tokenizer = tiktoken.encoding_for_model(model_name)
pre_text, post_text = prompt.split('[TEXT]')
pre_tok, post_tok = tokenizer.encode_batch([pre_text, post_text])
available_length = chunk_token_length - len(pre_tok) - len(post_tok) - 2 # space before and after
text_tokens = tokenizer.encode(document)
assert available_length > 0
chunks = split_into_chunks(text_tokens, max_chunk_size_tokens=available_length)
text_tasks = tokenizer.decode_batch([pre_tok + chunk + post_tok for chunk in chunks])
return text_tasks
g_context_lengths = {
'text-davinci-002':4097,
'text-davinci-003':4097,
'gpt-3.5-turbo-16k':16000, ## may be slightly larger
'gpt-3.5-turbo':4097,
'gpt-4':8000,
}
g_use_completion_api = set(['gpt-3.5-turbo-16k','gpt-3.5-turbo', 'gpt-4'])
from langchain.text_splitter import LatexTextSplitter
def split_latex_into_chunks(document : str, # latex
prompt_template : str,
model_name : str | None, # knowing which tokenizer guarantees we dont exceed context length
max_total_size: int | None, # if not given, use max possible based on model
max_answer_size: int = 256,
chunk_overlap: int = 0):
if model_name is not None: # if know model, use tokenizer to guarantee lengths
max_context_length = g_context_lengths[model_name]
if max_total_size is None:
max_total_size = max_context_length
assert max_total_size <= max_context_length
tokenizer = tiktoken.encoding_for_model(model_name)
encoded_prompt = tokenizer.encode(prompt_template)
chunk_token_length = min(max_context_length - max_answer_size, max_total_size)
max_document_chunk_size = chunk_token_length - len(encoded_prompt)
document_chunks = LatexTextSplitter.from_tiktoken_encoder(model_name=model_name,
chunk_size=max_document_chunk_size,
chunk_overlap=chunk_overlap).split_text(document)
else: ## tokenizer info not given, then best effort based on character count
assert max_total_size is not None
document_chunks = LatexTextSplitter(chunk_size=max_total_size,
chunk_overlap=chunk_overlap).split_text(document)
return document_chunks
async def fork_join_requests(prompts, model : str, api_key : str = None):
"""
send one request per prompt
"""
acc = []
if api_key is not None:
openai.api_key = api_key
for prompt in prompts:
if model in g_use_completion_api:
# TODO: chat completions lets one split the prompt.
cor = openai.ChatCompletion.acreate(model=model,
messages=[
#{"role":"system", "content":TODO}
{"role": "user", "content": prompt},
],
temperature=0.0)
else:
cor = openai.Completion.acreate(model=model, prompt=prompt,
temperature=0.0, max_tokens=256)
acc.append(asyncio.create_task(cor))
outputs = []
for cor in acc:
# try: # no point in handling error here, just makes things confusing
response = await cor
# except OpenAIError as err:
# return f"OpenAI connection error: {err}", False
if model in g_use_completion_api:
result = response.choices[0].message.content.strip()
else:
result = response.choices[0].text.strip()
print('openai result:\t', result)
outputs.append(result)
return outputs | [] |
2024-01-10 | DARPA-ASKEM/mit-service | api~routers~cards.py | import csv
from math import isnan
import ast, io, random, sys, os
import asyncio
from typing import Optional, Union
from openai import OpenAIError
from fastapi import APIRouter, status, UploadFile, File
from fastapi.responses import JSONResponse
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
from src.connect import construct_data_card, dataset_header_document_dkg, construct_model_card, profile_matrix, get_dataset_type, process_data
from src.response_types import MatrixDataCard, TabularDataCard, ModelCard
router = APIRouter()
@router.post("/get_data_card", tags=["Data-and-model-cards"], response_model=Union[MatrixDataCard, TabularDataCard])
async def get_data_card(gpt_key: str, csv_file: UploadFile = File(...), doc_file: UploadFile = File(...), smart: Optional[bool] = False) -> JSONResponse:
"""
Smart run provides better results but may result in slow response times as a consequence of extra GPT calls.
"""
files = [csv_file.read(), doc_file.read()]
_csv, doc = await asyncio.gather(*files)
_csv = _csv.decode().strip()
doc = doc.decode().strip()
# TODO handle inputs that are too long to fit in the context window
if len(_csv) == 0:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Empty CSV file")
if len(doc) == 0:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Empty document file")
csv_reader = csv.reader(io.StringIO(_csv), dialect=csv.Sniffer().sniff(_csv.splitlines()[-1]))
header = next(csv_reader) # can determine type from the header row
data_type = get_dataset_type(header)
if data_type == 'header-0':
schema = header
profiler = dataset_header_document_dkg
elif data_type == 'no-header':
# Probably best not to support this; the code path is poorly tested, and it's not clear what the expected behavior is.
# Either way, this should never come up in the Evaluation.
#schema = None
#profiler = dataset_header_dkg
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Invalid CSV file; no header found.")
elif data_type == 'matrix':
schema = None
profiler = profile_matrix
else:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Invalid CSV file; could not determine data type")
data = [header]
data.extend(csv_reader) # make sure header is included in data
data = process_data(data)
calls = [
construct_data_card(data_doc=doc, dataset_name=csv_file.filename, doc_name=doc_file.filename, dataset_type=data_type, gpt_key=gpt_key),
profiler(data=data, doc=doc, dataset_name=csv_file.filename, doc_name=doc_file.filename, gpt_key=gpt_key, smart=smart)
]
try:
results = await asyncio.gather(*calls)
except OpenAIError as err:
if "maximum context" in str(err):
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Input too long. Please reduce the size of your input.")
else:
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=f"OpenAI connection error: {err}")
for s, success in results:
if not success:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content=s)
data_card = ast.literal_eval(results[0][0])
data_profiling = ast.literal_eval(results[1][0])
if 'DATA_PROFILING_RESULT' in data_card:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content='DATA_PROFILING_RESULT cannot be a requested field in the data card.')
if data_type == 'header-0':
data_card['SCHEMA'] = schema
# get a random sample of a row from the csv
data_card['EXAMPLES'] = {k.strip(): v for k, v in zip(schema, random.sample(list(data[1:]), 1)[0])}
data_card['DATA_PROFILING_RESULT'] = data_profiling
elif data_type == 'no-header':
if 'SCHEMA' not in data_card:
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content='SCHEMA not found in data card')
schema = [s.strip() for s in data_card['SCHEMA'].split(',')]
schema = [s[1:] if s.startswith('[') else s for s in schema]
schema = [s[:-1] if s.endswith(']') else s for s in schema]
aligned_data_profiling = {}
for k, v in data_profiling.items():
k = int(k)
k = schema[k]
aligned_data_profiling[k] = v
data_card['DATA_PROFILING_RESULT'] = aligned_data_profiling
data_card['EXAMPLES'] = {k.strip(): v for k, v in zip(schema, random.sample(list(data), 1)[0])}
elif data_type == 'matrix':
data_card['DATA_PROFILING_RESULT'] = data_profiling
data_card['EXAMPLES'] = random.sample(data, 1)[0]
else:
raise Exception('Invalid data type')
def _fill_nan(ex):
if isinstance(ex, dict):
for k, v in ex.items():
ex[k] = _fill_nan(v)
elif isinstance(ex, list):
for i in range(len(ex)):
ex[i] = _fill_nan(ex[i])
elif isinstance(ex, float) and isnan(ex):
ex = None
return ex
data_card['EXAMPLES'] = _fill_nan(data_card['EXAMPLES'])
data_card['DATASET_TYPE'] = "matrix" if data_type == 'matrix' else "tabular"
print(data_card)
return data_card
@router.post("/get_model_card", tags=["Data-and-model-cards"], response_model=ModelCard)
async def get_model_card(gpt_key: str, text_file: UploadFile = File(...), code_file: UploadFile = File(...)) -> JSONResponse:
files = [text_file.read(), code_file.read()]
text, code = await asyncio.gather(*files)
# process model text
text_string = text.decode()
# process code
code_string = code.decode()
try:
res, success = await construct_model_card(text=text_string, code=code_string, gpt_key=gpt_key)
except OpenAIError as err:
if "maximum context" in str(err):
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content="Input too long. Please reduce the size of your input.")
else:
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=f"OpenAI connection error: {err}")
if not success:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content=res)
model_card = ast.literal_eval(res)
return model_card
| [] |
2024-01-10 | DARPA-ASKEM/mit-service | src~gen_petri.py | from gpt_interaction import *
from openai import OpenAIError
import re
def get_places(text, gpt_key):
try:
prompt = get_petri_places_prompt(text)
match = get_gpt_match(prompt, gpt_key)
#print(match)
places = match.split(":")[-1].split(",")
return places, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def get_transitions(text, gpt_key):
try:
prompt = get_petri_transitions_prompt(text)
match = get_gpt_match(prompt, gpt_key)
#print(match)
places = match.split(":")[-1].split(",")
return places, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def get_arcs(text, gpt_key):
try:
prompt = get_petri_arcs_prompt(text)
match = get_gpt_match(prompt, gpt_key, "text-davinci-003")
#print(match)
lines = match.splitlines()
transitions = []
for line in lines:
words = [w.rstrip() for w in line.split("->")]
if (len(words) == 0):
continue
transitions.append(words)
return transitions, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def match_place_to_text(text, place, gpt_key):
try:
prompt = get_petri_match_place_prompt(text, place)
match = get_gpt_match(prompt, gpt_key)
#print(match)
#places = match.split(":")[-1].split(",")
return match, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def init_param_from_text(text, param, gpt_key):
try:
prompt = get_petri_init_param_prompt(text, param)
match = get_gpt_match(prompt, gpt_key)
return match.replace(")", ") ").split(" "), True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def match_place_and_text_to_columns(place, text, columns, gpt_key):
try:
prompt = get_petri_match_dataset_prompt(place, text, columns)
match = get_gpt_match(prompt, gpt_key)
#print(match)
#places = match.split(":")[-1].split(",")
return match, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
if __name__ == "__main__":
gpt_key = ""
with open("../resources/jan_hackathon_scenario_1/SEIRD/seird.py", "r") as f:
code = f.read()
with open("../resources/jan_hackathon_scenario_1/SEIRD/section2.txt", "r") as f:
text = f.read()
with open("../resources/jan_hackathon_scenario_1/SEIRD/sections34.txt", "r") as f:
text2 = f.read()
with open("../resources/dataset/headers.txt", "r") as f:
columns = f.read()[:3000]
places, s = get_places(code, gpt_key)
parameters, s = get_parameters(code, gpt_key)
transitions, s = get_transitions(code, gpt_key)
print(f"places:\t\t{places}\n------\n")
print(f"parameters:\t\t{parameters}\n------\n")
print(f"transitions:\t\t{transitions}\n------\n")
for place in places:
desc, s = match_place_to_text(text, place, gpt_key)
print(f"description of {place}: {desc}\n------\n")
cols, s = match_place_and_text_to_columns(place, text, columns, gpt_key)
print(f"Columns for {place}: {cols}\n------\n")
for param in parameters:
val, s = init_param_from_text(text2, param, gpt_key)
print(f"Initial value of {param}: {val}\n------\n") | [] |
2024-01-10 | DARPA-ASKEM/mit-service | src~ensemble~ensemble.py | import argparse
import json
from langchain.document_loaders import PyPDFLoader
from text_search import text_param_search, text_var_search
import os
import gpt_key
# Check if the environment variable exists and use it if it does
GPT_KEY = os.environ.get("GPT_KEY", getattr(gpt_key, 'GPT_KEY', None))
def extract_paper_info(input_json_file, output_json_file):
with open(input_json_file, 'r') as file:
json_data = json.load(file)
paper_info = []
for entry in json_data:
title = entry.get('title', 'N/A')
doi = 'N/A'
for identifier in entry.get('identifier', []):
if identifier.get('type') == 'doi':
doi = identifier.get('id')
break
url = 'N/A'
for link in entry.get('link', []):
if link.get('type') == 'publisher':
url = link.get('url')
break
paper_info.append({'title': title, 'doi': doi, 'url': url})
with open(output_json_file, 'w') as outfile:
json.dump(paper_info, outfile, indent=4)
def load_paper_info(json_file):
with open(json_file, 'r') as file:
json_data = json.load(file)
return json_data
def load_pdf(pdf_file, output_file):
loader = PyPDFLoader(pdf_file)
content = loader.load()
content_str = '\n'.join(str(item) for item in content) # Convert each element to a string and join them
with open(output_file, 'w', encoding='utf-8') as outfile:
outfile.write(content_str)
from methods import split_latex_into_chunks
# def async_extract_vars(in_path, out_dir):
# out_filename_params = out_dir + "/" + in_path.split("/")[-1].split(".txt")[0] + "_params.txt"
# out_filename_vars = out_dir + "/" + in_path.split("/")[-1].split(".txt")[0] + "_vars.txt"
# with open(in_path, "r") as fi, open(out_filename_params, "w+") as fop, open(out_filename_vars, "w+") as fov:
# text = fi.read()
# chunks = split_latex_into_chunks(document=)
# @profile
def extract_vars(in_path, out_dir):
out_filename_params = out_dir + "/" + in_path.split("/")[-1].split(".txt")[0] + "_params.txt"
out_filename_vars = out_dir + "/" + in_path.split("/")[-1].split(".txt")[0] + "_vars.txt"
with open(in_path, "r") as fi, open(out_filename_params, "w+") as fop, open(out_filename_vars, "w+") as fov:
text = fi.read()
length = len(text)
segments = int(length / 3500 + 1)
for i in range(segments):
snippet = text[i * 3500: (i + 1) * 3500]
# output, success = text_param_search(snippet, GPT_KEY)
# if success:
# print("OUTPUT (params): " + output + "\n------\n")
# if output != "None":
# fop.write(output + "\n")
output, success = text_var_search(snippet, GPT_KEY)
if success:
print("OUTPUT (vars): " + output + "\n------\n")
if output != "None":
fov.write(output + "\n")
def extract_variables(text_file, output_dir):
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--in_path", type=str, default=text_file)
parser.add_argument("-o", "--out_dir", type=str, default=output_dir)
args = parser.parse_args()
out_filename_params = args.out_dir + "/" + args.in_path.split("/")[-1].split(".txt")[0] + "_params.txt"
out_filename_vars = args.out_dir + "/" + args.in_path.split("/")[-1].split(".txt")[0] + "_vars.txt"
with open(args.in_path, "r") as fi, open(out_filename_params, "w+") as fop, open(out_filename_vars, "w+") as fov:
text = fi.read()
length = len(text)
segments = int(length / 3500 + 1)
for i in range(segments):
snippet = text[i * 3500: (i + 1) * 3500]
output, success = text_param_search(snippet, GPT_KEY)
if success:
print("OUTPUT (params): " + output + "\n------\n")
if output != "None":
fop.write(output + "\n")
output, success = text_var_search(snippet, GPT_KEY)
if success:
print("OUTPUT (vars): " + output + "\n------\n")
if output != "None":
fov.write(output + "\n")
def emsemble(json_file, data_list):
json_data = load_paper_info(json_file)
if __name__=="__main__":
print("run main")
# load_pdf("../../resources/xDD/paper/Time-Varying COVID-19 Reproduction Number in the United States.pdf",
# "../../resources/xDD/paper/Time-Varying COVID-19 Reproduction Number in the United States.txt")
extract_variables("../../resources/xDD/paper/COVID-19 Vaccine Effectiveness by Product and Timing in New York State.txt", "/Users/chunwei/research/mitaskem/resources/xDD/params")
# input_json_file = '../../resources/xDD/documents_mentioning_starterkit_data.bibjson'
# output_json_file = '../../resources/xDD/xdd_paper.json'
# extract_paper_info(input_json_file, output_json_file)
| [] |
2024-01-10 | DARPA-ASKEM/mit-service | src~text_search.py | from gpt_interaction import *
from openai import OpenAIError
from connect import *
import argparse
from mira_dkg_interface import *
import os
import gpt_key
# Check if the environment variable exists and use it if it does
GPT_KEY = os.environ.get("GPT_KEY", getattr(gpt_key, 'GPT_KEY', None))
MAX_TEXT_MATCHES = 2
MAX_DKG_MATCHES = 2
def text_param_search(text, gpt_key):
try:
prompt = get_text_param_prompt(text)
match = get_gpt_match(prompt, gpt_key, "text-davinci-003")
return match, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def text_var_search(text, gpt_key):
try:
prompt = get_text_var_prompt(text)
match = get_gpt_match(prompt, gpt_key, "text-davinci-003")
return match, True
except OpenAIError as err:
return f"OpenAI connection error: {err}", False
def vars_dedup(text:str) -> dict:
var_dict = {}
lines = text.splitlines()
# Build dictionary, deduplicating along the way
for line in lines:
line = line.strip()
toks = [t.strip() for t in line.split("|")]
if len(toks) == 1 or line == "name | description | numerical value":
continue
var_name = toks[0]
var_desc = toks[1]
if var_name not in var_dict:
var_dict[var_name] = {'description':[], 'value':None}
desc_list = var_dict[var_name]['description']
desc_list.append(var_desc)
var_dict[var_name]['description'] = desc_list
if len(toks) > 2:
var_val = toks[2]
print(var_name, 'found value', var_val)
if var_val != "None":
var_dict[var_name]['value'] = var_val
return var_dict
def vars_to_json(var_dict: dict) -> str:
s_out = "["
is_first = True
id = 0
batch_var_ground = batch_get_mira_dkg_term(var_dict.keys(), ['id', 'name'])
for var_name, var_ground in zip(var_dict, batch_var_ground):
var_defs_s = "[\"" + '\",\"'.join(i for i in var_dict[var_name][:MAX_TEXT_MATCHES]) + "\"]"
#var_ground = get_mira_dkg_term(var_name, ['id', 'name'])
var_ground = var_ground[:MAX_DKG_MATCHES]
var_ground_s = "[" + ",".join([("[\"" + "\",\"".join([str(item) for item in sublist]) + "\"]") for sublist in var_ground]) + "]"
if is_first:
is_first = False
else:
s_out += ","
s_out += "{\"type\" : \"variable\", \"name\": \"" + var_name \
+ "\", \"id\" : \"mit" + str(id) + "\", \"text_annotations\": " + var_defs_s \
+ ", \"dkg_annotations\" : " + var_ground_s + "}"
id += 1
s_out += "]"
return s_out
from kgmatching import local_batch_get_mira_dkg_term
import json
async def avars_to_json(var_dict: dict) -> str:
s_out = "["
is_first = True
id = 0
term_list = []
meta_list = []
for (term_name,term_desc) in var_dict.items():
gpt_desc = term_desc['description'][0]
term_list.append(term_name +':' + gpt_desc)
meta_list.append({'llm_output_name':term_name,
'llm_output_desc':gpt_desc})
batch_var_ground0 = local_batch_get_mira_dkg_term(term_list)
pretty_var0 = json.dumps(batch_var_ground0, indent=2)
print(f'batch_var_ground\n{pretty_var0}')
# batch_var_ground = await abatch_get_mira_dkg_term(var_dict.keys(), ['id', 'name'])
# pretty_var = json.dumps(batch_var_ground, indent=2)
# print(f'batch_var_ground\n{pretty_var}')
batch_var_ground = [[[res['id'], res['name']] for res in batch] for batch in batch_var_ground0]
for var_name, var_ground in zip(var_dict, batch_var_ground):
var_defs_s = "[\"" + '\",\"'.join(i for i in var_dict[var_name]['description'][:MAX_TEXT_MATCHES]) + "\"]"
#var_ground = get_mira_dkg_term(var_name, ['id', 'name'])
var_ground = var_ground[:MAX_DKG_MATCHES]
var_ground_s = "[" + ",".join([("[\"" + "\",\"".join([str(item) for item in sublist]) + "\"]") for sublist in var_ground]) + "]"
if is_first:
is_first = False
else:
s_out += ","
s_out += "{\"type\" : \"variable\", \"name\": \"" + var_name \
+ "\", \"id\" : \"mit" + str(id) + "\", \"text_annotations\": " + var_defs_s \
+ ", \"dkg_annotations\" : " + var_ground_s
if var_dict[var_name]['value']:
s_out += ", \"value\" : \"" + var_dict[var_name]['value'] + "\""
s_out += "}"
id += 1
s_out += "]"
return s_out
def main(args):
out_filename_params = args.out_dir + "/" + args.in_path.split("/")[-1].split(".")[0] + "_params.txt"
out_filename_vars = args.out_dir + "/" + args.in_path.split("/")[-1].split(".")[0] + "_vars.txt"
with open(args.in_path, "r") as fi, open(out_filename_params, "w+") as fop, open(out_filename_vars, "w+") as fov:
text = fi.read()
length = len(text)
segments = int(length/3500 + 1)
for i in range(segments):
snippet = text[i * 3500: (i+1) * 3500]
output, success = text_param_search(snippet, GPT_KEY)
if success:
print("OUTPUT (params): " + output + "\n------\n")
if output != "None":
fop.write(output + "\n")
output, success = text_var_search(snippet, GPT_KEY)
if success:
print("OUTPUT (vars): " + output + "\n------\n")
if output != "None":
fov.write(output + "\n")
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--in_path", type=str)
parser.add_argument("-o", "--out_dir", type=str, default="resources/jan_evaluation/cosmos_params")
args = parser.parse_args()
main(args)
| [] |
2024-01-10 | DARPA-ASKEM/mit-service | src~kgmatching.py | import pandas as pd
import numpy as np
from langchain.retrievers import TFIDFRetriever
from langchain.schema import Document
def make_name_doc(tup):
(name_str, synonym_string) = tup
if synonym_string == '':
syns = []
else:
syns = synonym_string.split(';')
if name_str == '':
name = []
else:
name = [name_str]
keywords = name + syns
doc = ';'.join(keywords)
return doc
def make_desc_doc(tup):
(name_doc, name_desc) = tup
if name_doc != '' and name_desc != '':
return f'{name_doc}: {name_desc}'
elif name_doc != '':
return name_doc
elif name_desc != '':
return name_desc
else:
return ''
def build_node_retriever(kg_node_tsv_path, limit):
print(f'building index for epi KG from {kg_node_tsv_path}')
df = pd.read_csv(kg_node_tsv_path, delimiter='\t')
df = df.rename({'name:string':'name', 'synonyms:string[]':'synonyms', 'id:ID':'id', 'description:string':'description', 'type:string':'type'}, axis=1)
df = df.assign(**(df[['name', 'synonyms', 'description']].fillna('')))
df = df.assign(name_doc = df[['name', 'synonyms']].apply(tuple, axis=1).map(make_name_doc))
df = df.assign(desc_doc = df[['name_doc', 'description']].apply(tuple, axis=1).map(make_desc_doc))
cleandf = df[~(df.desc_doc == '')]
docs = cleandf['desc_doc'].values.tolist()
metas = cleandf[['name', 'synonyms', 'id', 'description', 'type']].apply(dict, axis=1).values.tolist()
as_docs = [Document(page_content=doc_search, metadata=meta) for (doc_search, meta) in zip(docs, metas)]
retriever = TFIDFRetriever.from_documents(as_docs, k=limit)
print('done building index')
return retriever
## curl -o epi_2023-07-07_nodes.tsv.gz https://askem-mira.s3.amazonaws.com/dkg/epi/build/2023-07-07/nodes.tsv.gz
## unzip epi_2023-07-07_nodes.tsv
import os
g_kgpath = os.path.dirname(__file__) + '/../epi_2023-07-07_nodes.tsv'
g_retriever = build_node_retriever(g_kgpath, limit=4)
from typing import List
def local_batch_get_mira_dkg_term(term_list : List[str]) -> List[dict]:
batch_ans = []
for term in term_list:
docs = g_retriever.get_relevant_documents(term)
ansdocs = []
for doc in docs:
meta = {}
meta.update(doc.metadata)
ansdocs.append(meta)
batch_ans.append(ansdocs)
return batch_ans | [] |
2024-01-10 | lonngxiang/LLM-RAG-WEB | split.py |
from typing import List
import re
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
def load_file(filepath):
print("filepath:",filepath)
if filepath.endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return loader,docs | [] |
2024-01-10 | codebytes/azure-search-openai-demo | scripts~prepdocs.py | import argparse
import base64
import glob
import html
import io
import os
import re
import time
import json
import openai
import tiktoken
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
from azure.identity import AzureDeveloperCliCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
HnswParameters,
PrioritizedFields,
SearchableField,
SearchField,
SearchFieldDataType,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticSettings,
SimpleField,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
from azure.storage.blob import BlobServiceClient
from pypdf import PdfReader, PdfWriter
from tenacity import (
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
args = argparse.Namespace(verbose=False, openaihost="azure")
MAX_SECTION_LENGTH = 1000
SENTENCE_SEARCH_LIMIT = 100
SECTION_OVERLAP = 100
open_ai_token_cache = {}
CACHE_KEY_TOKEN_CRED = "openai_token_cred"
CACHE_KEY_CREATED_TIME = "created_time"
CACHE_KEY_TOKEN_TYPE = "token_type"
# Embedding batch support section
SUPPORTED_BATCH_AOAI_MODEL = {"text-embedding-ada-002": {"token_limit": 8100, "max_batch_size": 16}}
def calculate_tokens_emb_aoai(input: str):
encoding = tiktoken.encoding_for_model(args.openaimodelname)
return len(encoding.encode(input))
def blob_name_from_file_page(filename, page=0):
if os.path.splitext(filename)[1].lower() == ".pdf":
return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf"
else:
return os.path.basename(filename)
def upload_blobs(filename):
blob_service = BlobServiceClient(
account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds
)
blob_container = blob_service.get_container_client(args.container)
if not blob_container.exists():
blob_container.create_container()
# if file is PDF split into pages and upload each page as a separate blob
if os.path.splitext(filename)[1].lower() == ".pdf":
reader = PdfReader(filename)
pages = reader.pages
for i in range(len(pages)):
blob_name = blob_name_from_file_page(filename, i)
if args.verbose:
print(f"\tUploading blob for page {i} -> {blob_name}")
f = io.BytesIO()
writer = PdfWriter()
writer.add_page(pages[i])
writer.write(f)
f.seek(0)
blob_container.upload_blob(blob_name, f, overwrite=True)
else:
blob_name = blob_name_from_file_page(filename)
with open(filename, "rb") as data:
blob_container.upload_blob(blob_name, data, overwrite=True)
def remove_blobs(filename):
if args.verbose:
print(f"Removing blobs for '{filename or '<all>'}'")
blob_service = BlobServiceClient(
account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds
)
blob_container = blob_service.get_container_client(args.container)
if blob_container.exists():
if filename is None:
blobs = blob_container.list_blob_names()
else:
prefix = os.path.splitext(os.path.basename(filename))[0]
blobs = filter(
lambda b: re.match(f"{prefix}-\d+\.pdf", b),
blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]),
)
for b in blobs:
if args.verbose:
print(f"\tRemoving blob {b}")
blob_container.delete_blob(b)
def table_to_html(table):
table_html = "<table>"
rows = [
sorted([cell for cell in table.cells if cell.row_index == i], key=lambda cell: cell.column_index)
for i in range(table.row_count)
]
for row_cells in rows:
table_html += "<tr>"
for cell in row_cells:
tag = "th" if (cell.kind == "columnHeader" or cell.kind == "rowHeader") else "td"
cell_spans = ""
if cell.column_span > 1:
cell_spans += f" colSpan={cell.column_span}"
if cell.row_span > 1:
cell_spans += f" rowSpan={cell.row_span}"
table_html += f"<{tag}{cell_spans}>{html.escape(cell.content)}</{tag}>"
table_html += "</tr>"
table_html += "</table>"
return table_html
def get_document_text(filename):
offset = 0
page_map = []
if os.path.splitext(filename)[1].lower() == ".pdf":
if args.localpdfparser:
reader = PdfReader(filename)
pages = reader.pages
for page_num, p in enumerate(pages):
page_text = p.extract_text()
page_map.append((page_num, offset, page_text))
offset += len(page_text)
else:
if args.verbose:
print(f"Extracting text from '{filename}' using Azure Form Recognizer")
form_recognizer_client = DocumentAnalysisClient(
endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/",
credential=formrecognizer_creds,
headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"},
)
with open(filename, "rb") as f:
poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document=f)
form_recognizer_results = poller.result()
for page_num, page in enumerate(form_recognizer_results.pages):
tables_on_page = [
table
for table in form_recognizer_results.tables
if table.bounding_regions[0].page_number == page_num + 1
]
# mark all positions of the table spans in the page
page_offset = page.spans[0].offset
page_length = page.spans[0].length
table_chars = [-1] * page_length
for table_id, table in enumerate(tables_on_page):
for span in table.spans:
# replace all table spans with "table_id" in table_chars array
for i in range(span.length):
idx = span.offset - page_offset + i
if idx >= 0 and idx < page_length:
table_chars[idx] = table_id
# build page text by replacing characters in table spans with table html
page_text = ""
added_tables = set()
for idx, table_id in enumerate(table_chars):
if table_id == -1:
page_text += form_recognizer_results.content[page_offset + idx]
elif table_id not in added_tables:
page_text += table_to_html(tables_on_page[table_id])
added_tables.add(table_id)
page_text += " "
page_map.append((page_num, offset, page_text))
offset += len(page_text)
elif os.path.splitext(filename)[1].lower() == ".json":
with open(filename, "r") as f:
data = json.load(f)
if isinstance(data, list):
for i, obj in enumerate(data):
page_text = json.dumps(data[i])
page_map.append((i, offset, page_text))
offset += len(page_text)
elif isinstance(data, dict):
page_map.append((1, 0, json.dumps(data)))
else:
raise ValueError("Invalid JSON format")
return page_map
def split_text(page_map, filename):
SENTENCE_ENDINGS = [".", "!", "?"]
WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"]
if args.verbose:
print(f"Splitting '{filename}' into sections")
def find_page(offset):
num_pages = len(page_map)
for i in range(num_pages - 1):
if offset >= page_map[i][1] and offset < page_map[i + 1][1]:
return i
return num_pages - 1
all_text = "".join(p[2] for p in page_map)
length = len(all_text)
start = 0
end = length
while start + SECTION_OVERLAP < length:
last_word = -1
end = start + MAX_SECTION_LENGTH
if end > length:
end = length
else:
# Try to find the end of the sentence
while (
end < length
and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT
and all_text[end] not in SENTENCE_ENDINGS
):
if all_text[end] in WORDS_BREAKS:
last_word = end
end += 1
if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0:
end = last_word # Fall back to at least keeping a whole word
if end < length:
end += 1
# Try to find the start of the sentence or at least a whole word boundary
last_word = -1
while (
start > 0
and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT
and all_text[start] not in SENTENCE_ENDINGS
):
if all_text[start] in WORDS_BREAKS:
last_word = start
start -= 1
if all_text[start] not in SENTENCE_ENDINGS and last_word > 0:
start = last_word
if start > 0:
start += 1
section_text = all_text[start:end]
yield (section_text, find_page(start))
last_table_start = section_text.rfind("<table")
if last_table_start > 2 * SENTENCE_SEARCH_LIMIT and last_table_start > section_text.rfind("</table"):
# If the section ends with an unclosed table, we need to start the next section with the table.
# If table starts inside SENTENCE_SEARCH_LIMIT, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH
# If last table starts inside SECTION_OVERLAP, keep overlapping
if args.verbose:
print(
f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}"
)
start = min(end - SECTION_OVERLAP, start + last_table_start)
else:
start = end - SECTION_OVERLAP
if start + SECTION_OVERLAP < end:
yield (all_text[start:end], find_page(start))
def filename_to_id(filename):
filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", filename)
filename_hash = base64.b16encode(filename.encode("utf-8")).decode("ascii")
return f"file-{filename_ascii}-{filename_hash}"
def create_sections(filename, page_map, use_vectors, embedding_deployment: str = None, embedding_model: str = None):
file_id = filename_to_id(filename)
for i, (content, pagenum) in enumerate(split_text(page_map, filename)):
section = {
"id": f"{file_id}-page-{i}",
"content": content,
"category": args.category,
"sourcepage": blob_name_from_file_page(filename, pagenum),
"sourcefile": filename,
}
if use_vectors:
section["embedding"] = compute_embedding(content, embedding_deployment, embedding_model)
yield section
def before_retry_sleep(retry_state):
if args.verbose:
print("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
@retry(
retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=before_retry_sleep,
)
def compute_embedding(text, embedding_deployment, embedding_model):
refresh_openai_token()
embedding_args = {"deployment_id": embedding_deployment} if args.openaihost == "azure" else {}
return openai.Embedding.create(**embedding_args, model=embedding_model, input=text)["data"][0]["embedding"]
@retry(
retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=15, max=60),
stop=stop_after_attempt(15),
before_sleep=before_retry_sleep,
)
def compute_embedding_in_batch(texts):
refresh_openai_token()
embedding_args = {"deployment_id": args.openaideployment} if args.openaihost == "azure" else {}
emb_response = openai.Embedding.create(**embedding_args, model=args.openaimodelname, input=texts)
return [data.embedding for data in emb_response.data]
def create_search_index():
if args.verbose:
print(f"Ensuring search index {args.index} exists")
index_client = SearchIndexClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", credential=search_creds
)
if args.index not in index_client.list_index_names():
index = SearchIndex(
name=args.index,
fields=[
SimpleField(name="id", type="Edm.String", key=True),
SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"),
SearchField(
name="embedding",
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
hidden=False,
searchable=True,
filterable=False,
sortable=False,
facetable=False,
vector_search_dimensions=1536,
vector_search_configuration="default",
),
SimpleField(name="category", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True),
],
semantic_settings=SemanticSettings(
configurations=[
SemanticConfiguration(
name="default",
prioritized_fields=PrioritizedFields(
title_field=None, prioritized_content_fields=[SemanticField(field_name="content")]
),
)
]
),
vector_search=VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine")
)
]
),
)
if args.verbose:
print(f"Creating {args.index} search index")
index_client.create_index(index)
else:
if args.verbose:
print(f"Search index {args.index} already exists")
def update_embeddings_in_batch(sections):
batch_queue = []
copy_s = []
batch_response = {}
token_count = 0
for s in sections:
token_count += calculate_tokens_emb_aoai(s["content"])
if (
token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["token_limit"]
and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]["max_batch_size"]
):
batch_queue.append(s)
copy_s.append(s)
else:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose:
print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
batch_queue = []
batch_queue.append(s)
token_count = calculate_tokens_emb_aoai(s["content"])
if batch_queue:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose:
print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
for s in copy_s:
s["embedding"] = batch_response[s["id"]]
yield s
def index_sections(filename, sections):
if args.verbose:
print(f"Indexing sections from '{filename}' into search index '{args.index}'")
search_client = SearchClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds
)
i = 0
batch = []
for s in sections:
batch.append(s)
i += 1
if i % 1000 == 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose:
print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
batch = []
if len(batch) > 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose:
print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
def remove_from_index(filename):
if args.verbose:
print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'")
search_client = SearchClient(
endpoint=f"https://{args.searchservice}.search.windows.net/", index_name=args.index, credential=search_creds
)
while True:
filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'"
r = search_client.search("", filter=filter, top=1000, include_total_count=True)
if r.get_count() == 0:
break
r = search_client.delete_documents(documents=[{"id": d["id"]} for d in r])
if args.verbose:
print(f"\tRemoved {len(r)} sections from index")
# It can take a few seconds for search results to reflect changes, so wait a bit
time.sleep(2)
def refresh_openai_token():
"""
Refresh OpenAI token every 5 minutes
"""
if (
CACHE_KEY_TOKEN_TYPE in open_ai_token_cache
and open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == "azure_ad"
and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time()
):
token_cred = open_ai_token_cache[CACHE_KEY_TOKEN_CRED]
openai.api_key = token_cred.get_token("https://cognitiveservices.azure.com/.default").token
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
def read_files(
path_pattern: str,
use_vectors: bool,
vectors_batch_support: bool,
embedding_deployment: str = None,
embedding_model: str = None,
):
"""
Recursively read directory structure under `path_pattern`
and execute indexing for the individual files
"""
for filename in glob.glob(path_pattern):
if args.verbose:
print(f"Processing '{filename}'")
if args.remove:
remove_blobs(filename)
remove_from_index(filename)
else:
if os.path.isdir(filename):
read_files(filename + "/*", use_vectors, vectors_batch_support)
continue
try:
if not args.skipblobs:
upload_blobs(filename)
page_map = get_document_text(filename)
sections = create_sections(
os.path.basename(filename),
page_map,
use_vectors and not vectors_batch_support,
embedding_deployment,
embedding_model,
)
if use_vectors and vectors_batch_support:
sections = update_embeddings_in_batch(sections)
index_sections(os.path.basename(filename), sections)
except Exception as e:
print(f"\tGot an error while reading {filename} -> {e} --> skipping file")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.",
epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v",
)
parser.add_argument("files", help="Files to be processed")
parser.add_argument(
"--category", help="Value for the category field in the search index for all sections indexed in this run"
)
parser.add_argument(
"--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage"
)
parser.add_argument("--storageaccount", help="Azure Blob Storage account name")
parser.add_argument("--container", help="Azure Blob Storage container name")
parser.add_argument(
"--storagekey",
required=False,
help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument(
"--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)"
)
parser.add_argument(
"--searchservice",
help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)",
)
parser.add_argument(
"--index",
help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)",
)
parser.add_argument(
"--searchkey",
required=False,
help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument("--openaihost", help="Host of the API used to compute embeddings ('azure' or 'openai')")
parser.add_argument("--openaiservice", help="Name of the Azure OpenAI service used to compute embeddings")
parser.add_argument(
"--openaideployment",
help="Name of the Azure OpenAI model deployment for an embedding model ('text-embedding-ada-002' recommended)",
)
parser.add_argument(
"--openaimodelname", help="Name of the Azure OpenAI embedding model ('text-embedding-ada-002' recommended)"
)
parser.add_argument(
"--novectors",
action="store_true",
help="Don't compute embeddings for the sections (e.g. don't call the OpenAI embeddings API during indexing)",
)
parser.add_argument(
"--disablebatchvectors", action="store_true", help="Don't compute embeddings in batch for the sections"
)
parser.add_argument(
"--openaikey",
required=False,
help="Optional. Use this Azure OpenAI account key instead of the current user identity to login (use az login to set current user for Azure). This is required only when using non-Azure endpoints.",
)
parser.add_argument("--openaiorg", required=False, help="This is required only when using non-Azure endpoints.")
parser.add_argument(
"--remove",
action="store_true",
help="Remove references to this document from blob storage and the search index",
)
parser.add_argument(
"--removeall",
action="store_true",
help="Remove all blobs from blob storage and documents from the search index",
)
parser.add_argument(
"--localpdfparser",
action="store_true",
help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents",
)
parser.add_argument(
"--formrecognizerservice",
required=False,
help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)",
)
parser.add_argument(
"--formrecognizerkey",
required=False,
help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)",
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
# Use the current user identity to connect to Azure services unless a key is explicitly set for any of them
azd_credential = (
AzureDeveloperCliCredential()
if args.tenantid is None
else AzureDeveloperCliCredential(tenant_id=args.tenantid, process_timeout=60)
)
default_creds = azd_credential if args.searchkey is None or args.storagekey is None else None
search_creds = default_creds if args.searchkey is None else AzureKeyCredential(args.searchkey)
use_vectors = not args.novectors
compute_vectors_in_batch = not args.disablebatchvectors and args.openaimodelname in SUPPORTED_BATCH_AOAI_MODEL
if not args.skipblobs:
storage_creds = default_creds if args.storagekey is None else args.storagekey
if not args.localpdfparser:
# check if Azure Form Recognizer credentials are provided
if args.formrecognizerservice is None:
print(
"Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser."
)
exit(1)
formrecognizer_creds = (
default_creds if args.formrecognizerkey is None else AzureKeyCredential(args.formrecognizerkey)
)
if use_vectors:
if args.openaihost == "azure":
if not args.openaikey:
openai.api_key = azd_credential.get_token("https://cognitiveservices.azure.com/.default").token
openai.api_type = "azure_ad"
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
open_ai_token_cache[CACHE_KEY_TOKEN_CRED] = azd_credential
open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] = "azure_ad"
else:
openai.api_key = args.openaikey
openai.api_type = "azure"
openai.api_base = f"https://{args.openaiservice}.openai.azure.com"
openai.api_version = "2023-05-15"
else:
print("using normal openai")
openai.api_key = args.openaikey
openai.organization = args.openaiorg
openai.api_type = "openai"
if args.removeall:
remove_blobs(None)
remove_from_index(None)
else:
if not args.remove:
create_search_index()
print("Processing files...")
read_files(args.files, use_vectors, compute_vectors_in_batch, args.openaideployment, args.openaimodelname)
| [] |
2024-01-10 | codebytes/azure-search-openai-demo | app~backend~approaches~readretrieveread.py | from typing import Any
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.callbacks.manager import CallbackManager, Callbacks
from langchain.chains import LLMChain
from langchain.llms.openai import AzureOpenAI, OpenAI
from approaches.approach import AskApproach
from langchainadapters import HtmlCallbackHandler
from lookuptool import CsvLookupTool
from text import nonewlines
class ReadRetrieveReadApproach(AskApproach):
"""
Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information
is present then formulate an answer. Each iteration consists of two parts:
1. use GPT to see if we need more information
2. if more data is needed, use the requested "tool" to retrieve it.
The last call to GPT answers the actual question.
This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain.
[1] E. Karpas, et al. arXiv:2205.00445
"""
template_prefix = (
"You are an intelligent assistant helping employees gather insights from feedback and retrospectives. "
"Answer the question using only the data provided in the information sources below. "
"For tabular information return it as an html table. Do not return markdown format. "
"Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. "
"For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" "
"It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). "
"If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". "
"Never quote tool names as sources."
"If you cannot answer using the sources below, say that you don't know. "
"\n\nYou can access to the following tools:"
)
template_suffix = """
Begin!
Question: {input}
Thought: {agent_scratchpad}"""
CognitiveSearchToolDescription = "useful for searching the gathering insights from feedback and retrospectives, etc."
def __init__(
self,
search_client: SearchClient,
openai_host: str,
openai_deployment: str,
openai_model: str,
embedding_deployment: str,
embedding_model: str,
sourcepage_field: str,
content_field: str,
):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.openai_model = openai_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.openai_host = openai_host
async def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = ""
# Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r]
content = "\n".join(results)
return results, content
async def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]:
retrieve_results = None
async def retrieve_and_store(q: str) -> Any:
nonlocal retrieve_results
retrieve_results, content = await self.retrieve(q, overrides)
return content
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
acs_tool = Tool(
name="CognitiveSearch",
func=lambda _: "Not implemented",
coroutine=retrieve_and_store,
description=self.CognitiveSearchToolDescription,
callbacks=cb_manager,
)
employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager)
tools = [acs_tool, employee_tool]
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=overrides.get("prompt_template_prefix") or self.template_prefix,
suffix=overrides.get("prompt_template_suffix") or self.template_suffix,
input_variables=["input", "agent_scratchpad"],
)
if self.openai_type == "azure":
llm = AzureOpenAI(
deployment_name=self.openai_deployment,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
else:
llm = OpenAI(
model_name=self.openai_model,
temperature=overrides.get("temperature", 0.3),
openai_api_key=openai.api_key,
)
chain = LLMChain(llm=llm, prompt=prompt)
agent_exec = AgentExecutor.from_agent_and_tools(
agent=ZeroShotAgent(llm_chain=chain), tools=tools, verbose=True, callback_manager=cb_manager
)
result = await agent_exec.arun(q)
# Remove references to tool names that might be confused with a citation
result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "")
return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class EmployeeInfoTool(CsvLookupTool):
employee_name: str = ""
def __init__(self, employee_name: str, callbacks: Callbacks = None):
super().__init__(
filename="data/employeeinfo.csv",
key_field="name",
name="Employee",
description="useful for answering questions about the employee, their benefits and other personal information",
callbacks=callbacks,
)
self.func = lambda _: "Not implemented"
self.coroutine = self.employee_info
self.employee_name = employee_name
async def employee_info(self, name: str) -> str:
return self.lookup(name)
| [
"\nBegin!\n\nQuestion: {input}\n\nThought: {agent_scratchpad}",
"prompt_template_suffix",
"agent_scratchpad",
"You are an intelligent assistant helping employees gather insights from feedback and retrospectives. Answer the question using only the data provided in the information sources below. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". Never quote tool names as sources.If you cannot answer using the sources below, say that you don't know. \n\nYou can access to the following tools:",
"input",
"prompt_template_prefix"
] |
2024-01-10 | codebytes/azure-search-openai-demo | app~backend~approaches~chatreadretrieveread.py | from typing import Any, AsyncGenerator
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach:
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """AI Assistant that helps the company employees gather insights from feedback and retrospectives. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their insights from feedback and retrospectives.
Use double angle brackets to reference the questions, e.g. <<Are there specific documentation updates needed?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in feedback and retrospectives.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{"role" : USER, "content" : "What are the challenges with AKS?" },
{"role" : ASSISTANT, "content" : "AKS Feedback requests" },
{"role" : USER, "content" : "What are the documentation challenges?" },
{"role" : ASSISTANT, "content" : "Feedback Data" }
]
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: str,
chatgpt_model: str,
embedding_deployment: str,
embedding_model: str,
sourcepage_field: str,
content_field: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(
self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False
) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
user_q = "Generate search query for: " + history[-1]["user"]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
self.query_prompt_template,
self.chatgpt_model,
history,
user_q,
self.query_prompt_few_shots,
self.chatgpt_token_limit - len(user_q),
)
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=32,
n=1,
)
query_text = chat_completion.choices[0].message.content
if query_text.strip() == "0":
query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = (
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(
injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt
)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(
injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt
)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
messages = self.get_messages_from_history(
system_message,
self.chatgpt_model,
history,
# Model does not handle lengthy system messages well.
# Moved sources to latest user conversation to solve follow up questions prompt.
history[-1]["user"] + "\n\nSources:\n" + content,
max_tokens=self.chatgpt_token_limit,
)
msg_to_display = "\n\n".join([str(message) for message in messages])
extra_info = {
"data_points": results,
"thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>"
+ msg_to_display.replace("\n", "<br>"),
}
chat_coroutine = openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stream=should_stream,
)
return (extra_info, chat_coroutine)
async def run_without_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False)
chat_content = (await chat_coroutine).choices[0].message.content
extra_info["answer"] = chat_content
return extra_info
async def run_with_streaming(
self, history: list[dict[str, str]], overrides: dict[str, Any]
) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True)
yield extra_info
async for event in await chat_coroutine:
yield event
def get_messages_from_history(
self,
system_prompt: str,
model_id: str,
history: list[dict[str, str]],
user_conv: str,
few_shots=[],
max_tokens: int = 4096,
) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want.
# It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in few_shots:
message_builder.append_message(shot.get("role"), shot.get("content"))
user_content = user_conv
append_index = len(few_shots) + 1
message_builder.append_message(self.USER, user_content, index=append_index)
for h in reversed(history[:-1]):
if bot_msg := h.get("bot"):
message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index)
if user_msg := h.get("user"):
message_builder.append_message(self.USER, user_msg, index=append_index)
if message_builder.token_length > max_tokens:
break
messages = message_builder.messages
return messages
| [
"Feedback Data",
"Next Questions",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in feedback and retrospectives.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"Generate three very brief follow-up questions that the user would likely ask next about their insights from feedback and retrospectives.\nUse double angle brackets to reference the questions, e.g. <<Are there specific documentation updates needed?>>.\nTry not to repeat questions that have already been asked.\nOnly generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"What are the documentation challenges?",
"suggest_followup_questions",
"AKS Feedback requests",
"What are the challenges with AKS?",
"prompt_template"
] |
2024-01-10 | bravelab/ratemate | ratemate~ratemate.py | import logging
import config
from openai import OpenAI
logger = logging.getLogger("ratemate")
class RateMate:
def __init__(self):
self.client = OpenAI(api_key=config.API_KEY)
def rate_audio(self, audio_file_path: str) -> str:
lyrics = self.transcribe_audio(audio_file_path)
pegi_score = self.get_pegi_score(lyrics)
return pegi_score
def transcribe_audio(self, audio_file_path: str) -> str:
logger.info(f"Processing audio file: {audio_file_path}")
with open(audio_file_path, "rb") as audio_file:
transcript = self.client.audio.transcriptions.create(
model="whisper-1", file=audio_file
)
return transcript.text
def get_pegi_score(self, lyrics: str) -> str:
messages = [
{
"role": "system",
"content": "You are a bot which takes a song lyrics as input and make a pegi score for it.",
}
]
logger.info(f"Song lyrics: {lyrics}")
messages.append(
{"role": "user", "content": lyrics},
)
chat = self.client.chat.completions.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
| [
"You are a bot which takes a song lyrics as input and make a pegi score for it."
] |
2024-01-10 | automata/generative.monster | generative_monster~core.py | import os
import json
import random
import numpy as np
from dotenv import load_dotenv
load_dotenv()
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.schema import messages_from_dict, messages_to_dict, HumanMessage
from generative_monster.interface.twitter import TwitterInterface
from generative_monster.generator.openjourney import OpenJourneyGenerator
from generative_monster.generator.leap import LeapGenerator
from generative_monster.prompts import PROMPT_SUFFIXES
from generative_monster.utils.image import open_image, resize_image, save_image
from generative_monster.settings import (
AGENT_DESCRIPTION,
HASHTAGS,
TEMPERATURE,
GENERATORS_TWITTER_ACCOUNTS
)
class Monster:
def __init__(self):
pass
def create(self, publish=True):
# Inspiration
print("-- Memory and inspiration")
text = self.find_inspiration()
print("Generated description:", text)
if len(text) > 200:
text = text[:190] + "..."
print("Warning: It was too long! Shortening:", text)
# Appending hashtags
# tweet_content = text + "\n\n" + HASHTAGS
# tweet_content = HASHTAGS
# print("Tweet content:", tweet_content)
# Deciding on style
print("--- Style")
available_styles = list(PROMPT_SUFFIXES.keys())
selected_style = random.choice(available_styles)
print("Selected style:", selected_style)
# Prompt creation
print("--- Prompt creation")
prompt = self.create_prompt(text, style=selected_style)
print("Final prompt:", prompt)
# Image generation
print("-- Image generation")
available_generators = ["openjourney", "leap"]
selected_generator = random.choice(available_generators)
print("Selected generator:", selected_generator)
image_path = self.generate(prompt, generator=selected_generator)
if not image_path:
print("Failed to generate image. Please try again later... aborting.")
return
print("Generated image:", image_path)
# Validate image
print("-- Validating image")
if not self.is_valid(image_path):
print("Not a valid image. Please try again later... aborting.")
return
print("Valid image...")
# Scale up
print("-- Scaling image up")
scale_factor = 2
image_path = self.scale_image(image_path, scale_factor)
print(f"Scaled image by x{scale_factor}")
# Communication
if publish:
# generator_twitter = GENERATORS_TWITTER_ACCOUNTS[selected_generator]
# tweet_content = f"Generated using {generator_twitter} API"
tweet_content = ""
print("-- Communication")
response = self.publish(tweet_content, prompt, [image_path])
print("Tweet:", response)
return image_path
def create_from_prompt(self, initial_prompt, style, generator="openjourney"):
# Generate image from prompt straight
prompt = self.create_prompt(initial_prompt, style)
print("\tPrompt:", prompt)
image_path = self.generate(prompt, generator)
print("\tImage:", image_path)
return image_path
def find_inspiration(self):
# TODO Search twitter for daily headlines? Movies? TVSeries?
# Recover memory
if os.path.exists("memory.json"):
# Use existing memory
with open("memory.json", "r") as f:
memory_dict = json.load(f)
messages = messages_from_dict(memory_dict)
memory = ConversationBufferMemory(return_messages=True)
# Constraint
max_messages = 50
for message in messages[-max_messages:]:
if isinstance(message, HumanMessage):
memory.chat_memory.add_user_message(message.content)
else:
memory.chat_memory.add_ai_message(message.content)
else:
# Or create new one
memory = ConversationBufferMemory(return_messages=True)
memory.load_memory_variables({})
# Create a prompt
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(AGENT_DESCRIPTION),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(temperature=TEMPERATURE)
conversation = ConversationChain(
memory=memory,
prompt=prompt,
llm=llm,
verbose=False
)
gen_prompt = conversation.predict(
input="Describe a painting in a short phrase, maximum of 10 words, about a topic of your choice. Limit the your answer to 100 characters. Do not quote.")
# gen_text = conversation.predict(
# input="Write a tweet about your latest painting to share with your followers. Limit the answer to maximum 100 characters."
# )
# Save to memory
with open("memory.json", "w") as f:
memory_dict = messages_to_dict(memory.chat_memory.messages)
json.dump(memory_dict, f)
return gen_prompt.strip()
def create_prompt(self, text, style="acrylic"):
suffix = PROMPT_SUFFIXES[style]["suffix"]
prompt = text + " " + suffix
return prompt
def generate(self, prompt, generator="openjourney"):
if generator == "openjourney":
gen = OpenJourneyGenerator()
elif generator == "leap":
gen = LeapGenerator()
image_path = gen.generate(prompt)
return image_path
def publish(self, text, prompt, image_paths):
ti = TwitterInterface()
res = ti.tweet_with_images(text, prompt, image_paths)
return res
def scale_image(self, image_path, scale_factor=2):
original_image = open_image(image_path)
resized_image = resize_image(original_image, scale_factor)
# Overwrite original path for now
save_image(resized_image, image_path)
return image_path
def is_valid(self, image_path):
# Pitch black images are not valid
image = open_image(image_path)
image_array = np.array(image)
image_mean = image_array.mean()
return image_mean > 0.0 | [
"text6e165beb-da80-4b0d-bb6e-ebd184f8b44a... PLACEHOLDER",
"{input}",
"Describe a painting in a short phrase, maximum of 10 words, about a topic of your choice. Limit the your answer to 100 characters. Do not quote."
] |
2024-01-10 | fuyunxiansen/nlp | PolyUGPT~PolyUGPT~run_PolyUGPT.py | import os
import logging
import click
import torch
import utils
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # for streaming response
from langchain.callbacks.manager import CallbackManager
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
from prompt_template_utils import get_prompt_template
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
GenerationConfig,
pipeline,
)
from load_models import (
load_quantized_model_gguf_ggml,
load_quantized_model_qptq,
load_full_model,
)
from constants import (
EMBEDDING_MODEL_NAME,
PERSIST_DIRECTORY,
MODEL_ID,
MODEL_BASENAME,
MAX_NEW_TOKENS,
MODELS_PATH,
CHROMA_SETTINGS
)
def load_model(device_type, model_id, model_basename=None, LOGGING=logging):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if ".gguf" in model_basename.lower():
llm = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING)
return llm
elif ".ggml" in model_basename.lower():
model, tokenizer = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING)
else:
model, tokenizer = load_quantized_model_qptq(model_id, model_basename, device_type, LOGGING)
else:
model, tokenizer = load_full_model(model_id, model_basename, device_type, LOGGING)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=MAX_NEW_TOKENS,
temperature=0.2,
# top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
def retrieval_qa_pipline(device_type, use_history, promptTemplate_type="llama"):
"""
Initializes and returns a retrieval-based Question Answering (QA) pipeline.
This function sets up a QA system that retrieves relevant information using embeddings
from the HuggingFace library. It then answers questions based on the retrieved information.
Parameters:
- device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'cuda', etc.
- use_history (bool): Flag to determine whether to use chat history or not.
Returns:
- RetrievalQA: An initialized retrieval-based QA system.
Notes:
- The function uses embeddings from the HuggingFace library, either instruction-based or regular.
- The Chroma class is used to load a vector store containing pre-computed embeddings.
- The retriever fetches relevant documents or data based on a query.
- The prompt and memory, obtained from the `get_prompt_template` function, might be used in the QA system.
- The model is loaded onto the specified device using its ID and basename.
- The QA system retrieves relevant documents using the retriever and then answers questions based on those documents.
"""
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
# load the vectorstore
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS
)
retriever = db.as_retriever()
# get the prompt template and memory if set by the user.
prompt, memory = get_prompt_template(promptTemplate_type=promptTemplate_type, history=use_history)
# load the llm pipeline
llm = load_model(device_type, model_id=MODEL_ID, model_basename=MODEL_BASENAME, LOGGING=logging)
if use_history:
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank
retriever=retriever,
return_source_documents=True, # verbose=True,
callbacks=callback_manager,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
else:
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank
retriever=retriever,
return_source_documents=True, # verbose=True,
callbacks=callback_manager,
chain_type_kwargs={
"prompt": prompt,
},
)
return qa
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda" if torch.cuda.is_available() else "cpu",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
@click.option(
"--use_history",
"-h",
is_flag=True,
help="Use history (Default is False)",
)
@click.option(
"--model_type",
default="llama",
type=click.Choice(
["llama", "mistral", "non_llama"],
),
help="model type, llama, mistral or non_llama",
)
@click.option(
"--save_qa",
is_flag=True,
help="whether to save Q&A pairs to a CSV file (Default is False)",
)
def main(device_type, show_sources, use_history, model_type, save_qa):
"""
Implements the main information retrieval task for a localGPT.
This function sets up the QA system by loading the necessary embeddings, vectorstore, and LLM model.
It then enters an interactive loop where the user can input queries and receive answers. Optionally,
the source documents used to derive the answers can also be displayed.
Parameters:
- device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'mps', 'cuda', etc.
- show_sources (bool): Flag to determine whether to display the source documents used for answering.
- use_history (bool): Flag to determine whether to use chat history or not.
Notes:
- Logging information includes the device type, whether source documents are displayed, and the use of history.
- If the models directory does not exist, it creates a new one to store models.
- The user can exit the interactive loop by entering "exit".
- The source documents are displayed if the show_sources flag is set to True.
"""
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
logging.info(f"Use history set to: {use_history}")
# check if models directory do not exist, create a new one and store models here.
if not os.path.exists(MODELS_PATH):
os.mkdir(MODELS_PATH)
qa = retrieval_qa_pipline(device_type, use_history, promptTemplate_type=model_type)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = qa(query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
# Log the Q&A to CSV only if save_qa is True
if save_qa:
utils.log_to_csv(query, answer)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
| [] |
2024-01-10 | fuyunxiansen/nlp | PolyUGPT~PolyUGPT~run_PolyUGPT_API.py | import logging
import os
import shutil
import subprocess
import argparse
import torch
from flask import Flask, jsonify, request
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
# from langchain.embeddings import HuggingFaceEmbeddings
from run_PolyUGPT import load_model
from prompt_template_utils import get_prompt_template
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from werkzeug.utils import secure_filename
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME
if torch.backends.mps.is_available():
DEVICE_TYPE = "mps"
elif torch.cuda.is_available():
DEVICE_TYPE = "cuda"
else:
DEVICE_TYPE = "cpu"
SHOW_SOURCES = True
logging.info(f"Running on: {DEVICE_TYPE}")
logging.info(f"Display Source Documents set to: {SHOW_SOURCES}")
EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# EMBEDDINGS = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
# if os.path.exists(PERSIST_DIRECTORY):
# try:
# shutil.rmtree(PERSIST_DIRECTORY)
# except OSError as e:
# print(f"Error: {e.filename} - {e.strerror}.")
# else:
# print("The directory does not exist")
# run_langest_commands = ["python", "ingest.py"]
# if DEVICE_TYPE == "cpu":
# run_langest_commands.append("--device_type")
# run_langest_commands.append(DEVICE_TYPE)
# result = subprocess.run(run_langest_commands, capture_output=True)
# if result.returncode != 0:
# raise FileNotFoundError(
# "No files were found inside SOURCE_DOCUMENTS, please put a starter file inside before starting the API!"
# )
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
QA = RetrievalQA.from_chain_type(
llm=LLM,
chain_type="stuff",
retriever=RETRIEVER,
return_source_documents=SHOW_SOURCES,
chain_type_kwargs={
"prompt": prompt,
},
)
app = Flask(__name__)
@app.route("/api/delete_source", methods=["GET"])
def delete_source_route():
folder_name = "SOURCE_DOCUMENTS"
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
return jsonify({"message": f"Folder '{folder_name}' successfully deleted and recreated."})
@app.route("/api/save_document", methods=["GET", "POST"])
def save_document_route():
if "document" not in request.files:
return "No document part", 400
file = request.files["document"]
if file.filename == "":
return "No selected file", 400
if file:
filename = secure_filename(file.filename)
folder_path = "SOURCE_DOCUMENTS"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
file.save(file_path)
return "File saved successfully", 200
@app.route("/api/run_ingest", methods=["GET"])
def run_ingest_route():
global DB
global RETRIEVER
global QA
try:
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
return "Script execution failed: {}".format(result.stderr.decode("utf-8")), 500
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
QA = RetrievalQA.from_chain_type(
llm=LLM,
chain_type="stuff",
retriever=RETRIEVER,
return_source_documents=SHOW_SOURCES,
chain_type_kwargs={
"prompt": prompt,
},
)
return "Script executed successfully: {}".format(result.stdout.decode("utf-8")), 200
except Exception as e:
return f"Error occurred: {str(e)}", 500
@app.route("/api/prompt_route", methods=["GET", "POST"])
def prompt_route():
global QA
user_prompt = request.form.get("user_prompt")
if user_prompt:
# print(f'User Prompt: {user_prompt}')
# Get the answer from the chain
res = QA(user_prompt)
answer, docs = res["result"], res["source_documents"]
prompt_response_dict = {
"Prompt": user_prompt,
"Answer": answer,
}
prompt_response_dict["Sources"] = []
for document in docs:
prompt_response_dict["Sources"].append(
(os.path.basename(str(document.metadata["source"])), str(document.page_content))
)
return jsonify(prompt_response_dict), 200
else:
return "No user prompt received", 400
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=5110, help="Port to run the API on. Defaults to 5110.")
parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host to run the UI on. Defaults to 127.0.0.1. "
"Set to 0.0.0.0 to make the UI externally "
"accessible from other devices.",
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
app.run(debug=False, host=args.host, port=args.port)
| [
"user_prompt",
"{'Prompt': PLACEHOLDER, 'Answer': PLACEHOLDER}"
] |
2024-01-10 | rtxd/LangchainExperiments | Basics~OnceUponATime.py | from dotenv import load_dotenv
from langchain.llms import OpenAI
import os
# Warning this is using openAI package version <1.0.0
#Load the .env file
load_dotenv()
#Access the 'SECRET_KEY' variable
openai_api_key = os.getenv('OPENAI_API_KEY')
llm = OpenAI(openai_api_key=openai_api_key)
result = llm("Once upon a time", max_tokens=5)
print(result) | [] |
2024-01-10 | rtxd/LangchainExperiments | MemoryManagement~SavingConversations.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationSummaryMemory #Doesn't work well with FileChatMessageHistory
from dotenv import load_dotenv
load_dotenv()
chat = ChatOpenAI(verbose=True)
memory = ConversationSummaryMemory(
memory_key="messages",
return_messages=True,
llm=chat
)
prompt = ChatPromptTemplate(
input_variables=["content"],
messages=[
MessagesPlaceholder(variable_name="messages"),
HumanMessagePromptTemplate.from_template("{content}")
]
)
chain = LLMChain(
llm=chat,
prompt=prompt,
memory=memory,
verbose=True
)
while True:
content = input("> ")
result = chain({"content": content})
print(result["text"]) | [
"content",
"{content}"
] |
2024-01-10 | rtxd/LangchainExperiments | AddContextWithEmbedding~Facts.py | from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from dotenv import load_dotenv
load_dotenv()
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=200,
chunk_overlap=0
)
loader = TextLoader("AddContextWithEmbedding/facts.txt")
docs = loader.load_and_split(text_splitter=text_splitter)
for doc in docs:
print(doc.page_content)
print('\n') | [] |
2024-01-10 | rtxd/LangchainExperiments | MemoryManagement~MemoryManagement.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from dotenv import load_dotenv
load_dotenv()
chat = ChatOpenAI()
memory = ConversationBufferMemory(memory_key="messages", return_messages=True)
prompt = ChatPromptTemplate(
input_variables=["content"],
messages=[
MessagesPlaceholder(variable_name="messages"),
HumanMessagePromptTemplate.from_template("{content}")
]
)
chain = LLMChain(
llm=chat,
prompt=prompt,
memory=memory
)
while True:
content = input("> ")
result = chain({"content": content})
print(result["text"]) | [
"content",
"{content}"
] |
2024-01-10 | rtxd/LangchainExperiments | Basics~MyFirstChain.py | from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
import argparse
import os
load_dotenv()
openai_api_key = os.getenv('OPENAI_API_KEY')
parser = argparse.ArgumentParser()
parser.add_argument("--task", default="return a list of numbers")
parser.add_argument("--language", default="python")
args = parser.parse_args()
llm = OpenAI(openai_api_key=openai_api_key)
code_prompt = PromptTemplate(
template="Write a very short {language} function that will {task}",
input_variables=["language", "task"]
)
test_prompt = PromptTemplate(
input_variables=["language", "code"],
template="Write a test for the following {language} code:\n{code}"
)
code_chain = LLMChain(
llm=llm,
prompt=code_prompt,
output_key="code"
)
test_chain = LLMChain(
llm=llm,
prompt=test_prompt,
output_key="test"
)
chain = SequentialChain(
chains=[code_chain, test_chain],
input_variables=["language", "task"],
output_variables=["test", "code"]
)
result = chain({
"language": args.language,
"task": args.task
})
print(result["code"])
print(result["test"]) | [
"language",
"Write a very short {language} function that will {task}",
"Write a test for the following {language} code:\n{code}"
] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~adaptive_agent_executor.py | from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from langchain.agents.agent import (
AgentExecutor,
BaseMultiActionAgent,
BaseSingleActionAgent,
ExceptionTool,
)
from langchain.agents.tools import InvalidTool
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from langchain.tools.base import BaseTool
from overrides import override
from tiktoken.core import Encoding
class AdaptiveAgentExecutor(AgentExecutor):
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] # noqa: UP007
tools: Sequence[BaseTool]
return_intermediate_steps: bool = False
max_iterations: Optional[int] = 15 # noqa: UP007
max_execution_time: Optional[float] = None # noqa: UP007
early_stopping_method: str = "force"
handle_parsing_errors: Union[ # noqa: UP007
bool, str, Callable[[OutputParserException], str]
] = False
trim_intermediate_steps: Union[ # noqa: UP007
int, Callable[[List[Tuple[AgentAction, str]]], List[Tuple[AgentAction, str]]]
] = -1
llm_list: dict
switch_to_larger_model_threshold: int
enc: Encoding
tokens: int
@classmethod
def from_agent_and_tools(
cls,
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent], # noqa: UP007
tools: Sequence[BaseTool],
llm_list: dict,
switch_to_larger_model_threshold: int,
encoding: Encoding,
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentExecutor:
"""Create from agent and tools."""
return cls(
agent=agent,
tools=tools,
llm_list=llm_list,
switch_to_larger_model_threshold=switch_to_larger_model_threshold,
enc=encoding,
callbacks=callbacks,
tokens=len(encoding.encode(str(agent.llm_chain.prompt.template))),
**kwargs,
)
def token_counter(self, intermediate_steps: List[Tuple[AgentAction, str]]) -> int:
if len(intermediate_steps) == 0:
return self.tokens
new_item_text = (
str(intermediate_steps[-1][0].log)
+ str(intermediate_steps[-1][0].tool)
+ str(intermediate_steps[-1][0].tool_input)
+ str(intermediate_steps[-1][1])
)
self.tokens += len(self.enc.encode(new_item_text))
return self.tokens
@override
def _take_next_step( # noqa: PLR0912 C901 PLR0915
self,
name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str],
inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]],
run_manager: Optional[CallbackManagerForChainRun] = None, # noqa: UP007
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]: # noqa: UP007
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
if self.agent.llm_chain.llm == self.llm_list["short_context_llm"]:
if (
self.token_counter(intermediate_steps)
> self.switch_to_larger_model_threshold
):
self.agent.llm_chain.llm = self.llm_list["long_context_llm"]
# Call the LLM to see what to do.
output = self.agent.plan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
raise ValueError( # noqa: B904
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {str(e)}"
)
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = "Invalid or incomplete response"
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
raise ValueError(
"Got unexpected type of `handle_parsing_errors`"
) from e
output = AgentAction("_Exception", observation, text)
if run_manager:
run_manager.on_agent_action(output, color="green")
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(
output.tool_input,
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
return [(output, observation)]
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, color="green")
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool().run(
{
"requested_tool_name": agent_action.tool,
"available_tool_names": list(name_to_tool_map.keys()),
},
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
result.append((agent_action, observation))
return result
| [] |
2024-01-10 | Dataherald/dataherald | dataherald~model~base_model.py | import os
from typing import Any
from langchain.llms import AlephAlpha, Anthropic, Cohere, OpenAI
from overrides import override
from dataherald.model import LLMModel
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.utils.encrypt import FernetEncrypt
class BaseModel(LLMModel):
def __init__(self, system):
super().__init__(system)
self.openai_api_key = os.environ.get("OPENAI_API_KEY")
self.aleph_alpha_api_key = os.environ.get("ALEPH_ALPHA_API_KEY")
self.anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
self.cohere_api_key = os.environ.get("COHERE_API_KEY")
@override
def get_model(
self,
database_connection: DatabaseConnection,
model_family="openai",
model_name="davinci-003",
**kwargs: Any
) -> Any:
if database_connection.llm_api_key is not None:
fernet_encrypt = FernetEncrypt()
api_key = fernet_encrypt.decrypt(database_connection.llm_api_key)
if model_family == "openai":
self.openai_api_key = api_key
elif model_family == "anthropic":
self.anthropic_api_key = api_key
elif model_family == "google":
self.google_api_key = api_key
if self.openai_api_key:
self.model = OpenAI(model_name=model_name, **kwargs)
elif self.aleph_alpha_api_key:
self.model = AlephAlpha(model=model_name, **kwargs)
elif self.anthropic_api_key:
self.model = Anthropic(model=model_name, **kwargs)
elif self.cohere_api_key:
self.model = Cohere(model=model_name, **kwargs)
else:
raise ValueError("No valid API key environment variable found")
return self.model
| [] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~dataherald_finetuning_agent.py | import datetime
import logging
import os
from functools import wraps
from typing import Any, Callable, Dict, List, Type
import openai
from bson.objectid import ObjectId
from google.api_core.exceptions import GoogleAPIError
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.chains.llm import LLMChain
from langchain.schema import AgentAction
from langchain.tools.base import BaseTool
from overrides import override
from pydantic import BaseModel, Field
from sqlalchemy.exc import SQLAlchemyError
from dataherald.context_store import ContextStore
from dataherald.db import DB
from dataherald.db_scanner.models.types import TableDescription, TableDescriptionStatus
from dataherald.db_scanner.repository.base import TableDescriptionRepository
from dataherald.sql_database.base import SQLDatabase, SQLInjectionError
from dataherald.sql_database.models.types import (
DatabaseConnection,
)
from dataherald.sql_generator import EngineTimeOutORItemLimitError, SQLGenerator
from dataherald.types import Question, Response
from dataherald.utils.agent_prompts import (
FINETUNING_AGENT_PREFIX,
FINETUNING_AGENT_SUFFIX,
FINETUNING_SYSTEM_INFORMATION,
FORMAT_INSTRUCTIONS,
)
logger = logging.getLogger(__name__)
TOP_K = int(os.getenv("UPPER_LIMIT_QUERY_RETURN_ROWS", "50"))
def catch_exceptions(): # noqa: C901
def decorator(fn: Callable[[str], str]) -> Callable[[str], str]: # noqa: C901
@wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any: # noqa: PLR0911
try:
return fn(*args, **kwargs)
except openai.AuthenticationError as e:
# Handle authentication error here
return f"OpenAI API authentication error: {e}"
except openai.RateLimitError as e:
# Handle API error here, e.g. retry or log
return f"OpenAI API request exceeded rate limit: {e}"
except openai.BadRequestError as e:
# Handle connection error here
return f"OpenAI API request timed out: {e}"
except openai.APIResponseValidationError as e:
# Handle rate limit error (we recommend using exponential backoff)
return f"OpenAI API response is invalid: {e}"
except openai.OpenAIError as e:
# Handle timeout error (we recommend using exponential backoff)
return f"OpenAI API returned an error: {e}"
except GoogleAPIError as e:
return f"Google API returned an error: {e}"
except SQLAlchemyError as e:
return f"Error: {e}"
return wrapper
return decorator
class SQLInput(BaseModel):
sql_query: str = Field()
class QuestionInput(BaseModel):
question: str = Field()
class BaseSQLDatabaseTool(BaseModel):
"""Base tool for interacting with the SQL database and the context information."""
db: SQLDatabase = Field(exclude=True)
class Config(BaseTool.Config):
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
extra = "allow"
class SystemTime(BaseSQLDatabaseTool, BaseTool):
"""Tool for finding the current data and time."""
name = "system_time"
description = """
Use this tool to replace current_time and current_date in SQL queries with the actual current time and date.
"""
@catch_exceptions()
def _run(
self,
tool_input: str = "", # noqa: ARG002
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
current_datetime = datetime.datetime.now()
return f"Current Date and Time: {str(current_datetime)}"
async def _arun(
self,
tool_input: str = "",
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("SystemTime tool does not support async")
class TablesSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool which takes in the given question and returns a list of tables with their relevance score to the question"""
name = "get_db_table_names"
description = """
Use this tool to get the list of tables in the database.
"""
db_scan: List[TableDescription]
@catch_exceptions()
def _run(
self,
input: str, # noqa: ARG002
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Use the concatenation of table name, columns names, and the description of the table as the table representation"""
tables = []
for table in self.db_scan:
tables.append(table.table_name)
return f"Tables in the database: {','.join(tables)}"
async def _arun(
self,
input: str = "",
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("TablesSQLDatabaseTool does not support async")
class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool for querying a SQL database."""
name = "sql_db_query"
description = """
Use this tool to execute the SQL query on the database, and return the results.
"""
args_schema: Type[BaseModel] = SQLInput
@catch_exceptions()
def _run(
self,
query: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
if "```sql" in query:
logger.info("**** Removing markdown formatting from the query\n")
query = query.replace("```sql", "").replace("```", "")
logger.info(f"**** Query after removing markdown formatting: {query}\n")
return self.db.run_sql(query, top_k=TOP_K)[0]
async def _arun(
self,
query: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("QuerySQLDataBaseTool does not support async")
class GenerateSQL(BaseSQLDatabaseTool, BaseTool):
"""Tool for generating SQL queries."""
name = "generate_sql"
description = """
Use this tool to generate SQL queries.
Pass the user question as the input to the tool.
"""
args_schema: Type[BaseModel] = QuestionInput
db_scan: List[TableDescription]
instructions: List[dict] | None = Field(exclude=True, default=None)
api_key: str = Field(exclude=True)
def format_columns(self, table: TableDescription, top_k: int = 100) -> str:
"""
format_columns formats the columns.
Args:
table: The table to format.
top_k: The number of categories to show.
Returns:
The formatted columns in string format.
"""
columns_information = ""
for column in table.columns:
name = column.name
is_primary_key = column.is_primary_key
if is_primary_key:
primary_key_text = (
f"this column is a primary key of the table {table.table_name},"
)
else:
primary_key_text = ""
foreign_key = column.foreign_key
if foreign_key:
foreign_key_text = (
f"this column has a foreign key to the table {foreign_key},"
)
else:
foreign_key_text = ""
categories = column.categories
if categories:
if len(categories) <= top_k:
categories_text = f"Categories: {categories},"
else:
categories_text = ""
else:
categories_text = ""
if primary_key_text or foreign_key_text or categories_text:
columns_information += (
f"{name}: {primary_key_text}{foreign_key_text}{categories_text}\n"
)
return columns_information
def format_database_schema(
self, db_scan: List[TableDescription], top_k: int = 100
) -> str:
"""
format_database_schema formats the database schema.
Args:
db_scan: The database schema.
Returns:
The formatted database schema in string format.
"""
schema_of_database = ""
for table in db_scan:
tables_schema = table.table_schema
schema_of_database += f"{tables_schema}\n"
schema_of_database += "# Categorical Columns:\n"
columns_information = self.format_columns(table, top_k)
schema_of_database += columns_information
sample_rows = table.examples
schema_of_database += "# Sample rows:\n"
for item in sample_rows:
for key, value in item.items():
schema_of_database += f"{key}: {value}, "
schema_of_database += "\n"
schema_of_database += "\n\n"
return schema_of_database
@catch_exceptions()
def _run(
self,
question: str = "",
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
system_prompt = FINETUNING_SYSTEM_INFORMATION + self.format_database_schema(
self.db_scan
)
if self.instructions:
user_prompt = "Database administrator rules that should be followed:\n"
for index, instruction in enumerate(self.instructions):
user_prompt += f"{index+1}) {instruction['instruction']}\n"
user_prompt += "\n\n"
user_prompt += "User Question: " + question
else:
user_prompt = "User Question: " + question
response = openai.ChatCompletion.create(
model=os.getenv(
"FINETUNED_MODEL", "gpt-4-1106-preview"
), # gpt-4-1106-preview is included only for avoiding the error
api_key=self.api_key,
temperature=0.0,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
)
return response.choices[0]["message"]["content"]
async def _arun(
self,
tool_input: str = "",
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("GenerateSQL tool does not support async")
class SchemaSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool for getting schema of relevant tables."""
name = "db_schema"
description = """
Input: Comma-separated list of tables.
Output: Schema of the specified tables.
Use this tool to find the schema of the specified tables, if you are unsure about the schema of the tables when editing the SQL query.
Example Input: table1, table2, table3
"""
db_scan: List[TableDescription]
@catch_exceptions()
def _run(
self,
table_names: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Get the schema for tables in a comma-separated list."""
table_names_list = table_names.split(", ")
tables_schema = ""
for table in self.db_scan:
if table.table_name in table_names_list:
tables_schema += table.table_schema + "\n"
if table.description is not None:
tables_schema += "Table description: " + table.description + "\n"
if tables_schema == "":
tables_schema += "Tables not found in the database"
return tables_schema
async def _arun(
self,
table_name: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("SchemaSQLDatabaseTool does not support async")
class SQLDatabaseToolkit(BaseToolkit):
"""Dataherald toolkit"""
db: SQLDatabase = Field(exclude=True)
instructions: List[dict] | None = Field(exclude=True, default=None)
db_scan: List[TableDescription] = Field(exclude=True)
api_key: str = Field(exclude=True)
@property
def dialect(self) -> str:
"""Return string representation of SQL dialect to use."""
return self.db.dialect
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools = []
tools.append(SystemTime(db=self.db))
tools.append(QuerySQLDataBaseTool(db=self.db))
tools.append(
GenerateSQL(
db=self.db,
db_scan=self.db_scan,
instructions=self.instructions,
api_key=self.api_key,
)
)
tools.append(SchemaSQLDatabaseTool(db=self.db, db_scan=self.db_scan))
tools.append(TablesSQLDatabaseTool(db=self.db, db_scan=self.db_scan))
return tools
class DataheraldFinetuningAgent(SQLGenerator):
"""
DataheraldFinetuningAgent is a class that uses a Finetuning model to generate SQL queries.
"""
llm: Any = None
def create_sql_agent(
self,
toolkit: SQLDatabaseToolkit,
callback_manager: BaseCallbackManager | None = None,
prefix: str = FINETUNING_AGENT_PREFIX,
suffix: str = FINETUNING_AGENT_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: List[str] | None = None,
max_iterations: int
| None = int(os.getenv("AGENT_MAX_ITERATIONS", "20")), # noqa: B008
max_execution_time: float | None = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Dict[str, Any] | None = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
tools = toolkit.get_tools()
admin_instructions = ""
for index, instruction in enumerate(toolkit.instructions):
admin_instructions += f"{index+1}) {instruction['instruction']}\n"
prefix = prefix.format(
dialect=toolkit.dialect, admin_instructions=admin_instructions
)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=self.llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
@override
def generate_response(
self,
user_question: Question,
database_connection: DatabaseConnection,
context: List[dict] = None, # noqa: ARG002
generate_csv: bool = False,
) -> Response:
"""
generate_response generates a response to a user question using a Finetuning model.
Args:
user_question (Question): The user question to generate a response to.
database_connection (DatabaseConnection): The database connection to use.
context (List[dict], optional): The context to use. Defaults to None.
generate_csv (bool, optional): Whether to generate a CSV. Defaults to False.
Returns:
Response: The response to the user question.
"""
context_store = self.system.instance(ContextStore)
storage = self.system.instance(DB)
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4-1106-preview"),
)
repository = TableDescriptionRepository(storage)
db_scan = repository.get_all_tables_by_db(
{
"db_connection_id": ObjectId(database_connection.id),
"status": TableDescriptionStatus.SYNCHRONIZED.value,
}
)
if not db_scan:
raise ValueError("No scanned tables found for database")
_, instructions = context_store.retrieve_context_for_question(
user_question, number_of_samples=1
)
self.database = SQLDatabase.get_sql_engine(database_connection)
toolkit = SQLDatabaseToolkit(
db=self.database,
instructions=instructions,
db_scan=db_scan,
api_key=database_connection.decrypt_api_key(),
)
agent_executor = self.create_sql_agent(
toolkit=toolkit,
verbose=True,
max_execution_time=os.getenv("DH_ENGINE_TIMEOUT", None),
)
agent_executor.return_intermediate_steps = True
agent_executor.handle_parsing_errors = True
with get_openai_callback() as cb:
try:
result = agent_executor({"input": user_question.question})
result = self.check_for_time_out_or_tool_limit(result)
except SQLInjectionError as e:
raise SQLInjectionError(e) from e
except EngineTimeOutORItemLimitError as e:
raise EngineTimeOutORItemLimitError(e) from e
except Exception as e:
return Response(
question_id=user_question.id,
total_tokens=cb.total_tokens,
total_cost=cb.total_cost,
sql_query="",
sql_generation_status="INVALID",
sql_query_result=None,
error_message=str(e),
)
sql_query_list = []
for step in result["intermediate_steps"]:
action = step[0]
if type(action) == AgentAction and action.tool == "sql_db_query":
query = self.format_sql_query(action.tool_input)
if "```sql" in query:
logger.info("**** Removing markdown formatting from the query\n")
query = query.replace("```sql", "").replace("```", "")
logger.info(
f"**** Query after removing markdown formatting: {query}\n"
)
sql_query_list.append(query)
intermediate_steps = self.format_intermediate_representations(
result["intermediate_steps"]
)
logger.info(f"cost: {str(cb.total_cost)} tokens: {str(cb.total_tokens)}")
response = Response(
question_id=user_question.id,
response=result["output"],
intermediate_steps=intermediate_steps,
total_tokens=cb.total_tokens,
total_cost=cb.total_cost,
sql_query=sql_query_list[-1] if len(sql_query_list) > 0 else "",
)
return self.create_sql_query_status(
self.database,
response.sql_query,
response,
top_k=TOP_K,
generate_csv=generate_csv,
database_connection=database_connection,
)
| [
"\n\n",
"\n Use this tool to execute the SQL query on the database, and return the results.\n ",
"\n Use this tool to get the list of tables in the database.\n ",
"instruction",
"User Question: PLACEHOLDER",
"\n Use this tool to generate SQL queries.\n Pass the user question as the input to the tool.\n ",
"\n Use this tool to replace current_time and current_date in SQL queries with the actual current time and date.\n ",
"Database administrator rules that should be followed:\n",
"\n Input: Comma-separated list of tables.\n Output: Schema of the specified tables.\n Use this tool to find the schema of the specified tables, if you are unsure about the schema of the tables when editing the SQL query.\n Example Input: table1, table2, table3\n "
] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~dataherald_sqlagent.py | import datetime
import difflib
import logging
import os
from functools import wraps
from typing import Any, Callable, Dict, List
import numpy as np
import openai
import pandas as pd
import sqlalchemy
from bson.objectid import ObjectId
from google.api_core.exceptions import GoogleAPIError
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.callbacks import get_openai_callback
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.chains.llm import LLMChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import AgentAction
from langchain.tools.base import BaseTool
from overrides import override
from pydantic import BaseModel, Field
from sqlalchemy import MetaData
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.sql import func
from dataherald.context_store import ContextStore
from dataherald.db import DB
from dataherald.db_scanner.models.types import TableDescription, TableDescriptionStatus
from dataherald.db_scanner.repository.base import TableDescriptionRepository
from dataherald.sql_database.base import SQLDatabase, SQLInjectionError
from dataherald.sql_database.models.types import (
DatabaseConnection,
)
from dataherald.sql_generator import EngineTimeOutORItemLimitError, SQLGenerator
from dataherald.types import Question, Response
from dataherald.utils.agent_prompts import (
AGENT_PREFIX,
FORMAT_INSTRUCTIONS,
PLAN_BASE,
PLAN_WITH_FEWSHOT_EXAMPLES,
PLAN_WITH_FEWSHOT_EXAMPLES_AND_INSTRUCTIONS,
PLAN_WITH_INSTRUCTIONS,
SUFFIX_WITH_FEW_SHOT_SAMPLES,
SUFFIX_WITHOUT_FEW_SHOT_SAMPLES,
)
logger = logging.getLogger(__name__)
TOP_K = int(os.getenv("UPPER_LIMIT_QUERY_RETURN_ROWS", "50"))
EMBEDDING_MODEL = "text-embedding-ada-002"
def catch_exceptions(): # noqa: C901
def decorator(fn: Callable[[str], str]) -> Callable[[str], str]: # noqa: C901
@wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any: # noqa: PLR0911
try:
return fn(*args, **kwargs)
except openai.AuthenticationError as e:
# Handle authentication error here
return f"OpenAI API authentication error: {e}"
except openai.RateLimitError as e:
# Handle API error here, e.g. retry or log
return f"OpenAI API request exceeded rate limit: {e}"
except openai.BadRequestError as e:
# Handle connection error here
return f"OpenAI API request timed out: {e}"
except openai.APIResponseValidationError as e:
# Handle rate limit error (we recommend using exponential backoff)
return f"OpenAI API response is invalid: {e}"
except openai.OpenAIError as e:
# Handle timeout error (we recommend using exponential backoff)
return f"OpenAI API returned an error: {e}"
except GoogleAPIError as e:
return f"Google API returned an error: {e}"
except SQLAlchemyError as e:
return f"Error: {e}"
return wrapper
return decorator
# Classes needed for tools
class BaseSQLDatabaseTool(BaseModel):
"""Base tool for interacting with the SQL database and the context information."""
db: SQLDatabase = Field(exclude=True)
context: List[dict] | None = Field(exclude=True, default=None)
class Config(BaseTool.Config):
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
extra = "allow"
class SystemTime(BaseSQLDatabaseTool, BaseTool):
"""Tool for finding the current data and time."""
name = "system_time"
description = """
Input is an empty string, output is the current data and time.
Always use this tool before generating a query if there is any time or date in the given question.
"""
@catch_exceptions()
def _run(
self,
tool_input: str = "", # noqa: ARG002
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
current_datetime = datetime.datetime.now()
return f"Current Date and Time: {str(current_datetime)}"
async def _arun(
self,
tool_input: str = "",
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("GetCurrentTimeTool does not support async")
class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool for querying a SQL database."""
name = "sql_db_query"
description = """
Input: SQL query.
Output: Result from the database or an error message if the query is incorrect.
If an error occurs, rewrite the query and retry.
Use this tool to execute SQL queries.
"""
@catch_exceptions()
def _run(
self,
query: str,
top_k: int = TOP_K,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
if "```sql" in query:
logger.info("**** Removing markdown formatting from the query\n")
query = query.replace("```sql", "").replace("```", "")
logger.info(f"**** Query after removing markdown formatting: {query}\n")
return self.db.run_sql(query, top_k=top_k)[0]
async def _arun(
self,
query: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("QuerySQLDataBaseTool does not support async")
class GetUserInstructions(BaseSQLDatabaseTool, BaseTool):
"""Tool for retrieving the instructions from the user"""
name = "get_admin_instructions"
description = """
Input: is an empty string.
Output: Database admin instructions before generating the SQL query.
The generated SQL query MUST follow the admin instructions even it contradicts with the given question.
"""
instructions: List[dict]
@catch_exceptions()
def _run(
self,
tool_input: str = "", # noqa: ARG002
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
response = "Admin: All of the generated SQL queries must follow the below instructions:\n"
for instruction in self.instructions:
response += f"{instruction['instruction']}\n"
return response
async def _arun(
self,
tool_input: str = "", # noqa: ARG002
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("GetUserInstructions does not support async")
class TablesSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool which takes in the given question and returns a list of tables with their relevance score to the question"""
name = "db_tables_with_relevance_scores"
description = """
Input: Given question.
Output: Comma-separated list of tables with their relevance scores, indicating their relevance to the question.
Use this tool to identify the relevant tables for the given question.
"""
db_scan: List[TableDescription]
embedding: OpenAIEmbeddings
def get_embedding(
self,
text: str,
) -> List[float]:
text = text.replace("\n", " ")
return self.embedding.embed_query(text)
def cosine_similarity(self, a: List[float], b: List[float]) -> float:
return round(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)), 4)
@catch_exceptions()
def _run(
self,
user_question: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Use the concatenation of table name, columns names, and the description of the table as the table representation"""
question_embedding = self.get_embedding(user_question)
table_representations = []
for table in self.db_scan:
col_rep = ""
for column in table.columns:
col_rep += column.name + " "
table_rep = f"Table {table.table_name} contain columns: {col_rep}, this tables has: {table.description}"
table_representations.append([table.table_name, table_rep])
df = pd.DataFrame(
table_representations, columns=["table_name", "table_representation"]
)
df["table_embedding"] = df.table_representation.apply(
lambda x: self.get_embedding(x)
)
df["similarities"] = df.table_embedding.apply(
lambda x: self.cosine_similarity(x, question_embedding)
)
table_relevance = ""
for _, row in df.iterrows():
table_relevance += (
f'Table: {row["table_name"]}, relevance score: {row["similarities"]}\n'
)
return table_relevance
async def _arun(
self,
user_question: str = "",
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("TablesSQLDatabaseTool does not support async")
class ColumnEntityChecker(BaseSQLDatabaseTool, BaseTool):
"""Tool for checking the existance of an entity inside a column."""
name = "db_column_entity_checker"
description = """
Input: Column name and its corresponding table, and an entity.
Output: cell-values found in the column similar to the given entity.
Use this tool to get cell values similar to the given entity in the given column.
Example Input: table1 -> column2, entity
"""
def find_similar_strings(
self, input_list: List[tuple], target_string: str, threshold=0.4
):
similar_strings = []
for item in input_list:
similarity = difflib.SequenceMatcher(
None, str(item[0]).strip().lower(), target_string.lower()
).ratio()
if similarity >= threshold:
similar_strings.append((str(item[0]).strip(), similarity))
similar_strings.sort(key=lambda x: x[1], reverse=True)
return similar_strings[:25]
@catch_exceptions()
def _run(
self,
tool_input: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
try:
schema, entity = tool_input.split(",")
table_name, column_name = schema.split("->")
except ValueError:
return "Invalid input format, use following format: table_name -> column_name, entity (entity should be a string without ',')"
search_pattern = f"%{entity.strip().lower()}%"
meta = MetaData(bind=self.db.engine)
table = sqlalchemy.Table(table_name.strip(), meta, autoload=True)
try:
search_query = sqlalchemy.select(
[func.distinct(table.c[column_name.strip()])]
).where(func.lower(table.c[column_name.strip()]).like(search_pattern))
search_results = self.db.engine.execute(search_query).fetchall()
search_results = search_results[:25]
except SQLAlchemyError:
search_results = []
distinct_query = sqlalchemy.select(
[func.distinct(table.c[column_name.strip()])]
)
results = self.db.engine.execute(distinct_query).fetchall()
results = self.find_similar_strings(results, entity)
similar_items = "Similar items:\n"
already_added = {}
for item in results:
similar_items += f"{item[0]}\n"
already_added[item[0]] = True
if len(search_results) > 0:
for item in search_results:
if item[0] not in already_added:
similar_items += f"{item[0]}\n"
return similar_items
async def _arun(
self,
tool_input: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("ColumnEntityChecker does not support async")
class SchemaSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
"""Tool for getting schema of relevant tables."""
name = "db_relevant_tables_schema"
description = """
Input: Comma-separated list of tables.
Output: Schema of the specified tables.
Use this tool to discover all columns of the relevant tables and identify potentially relevant columns.
Example Input: table1, table2, table3
"""
db_scan: List[TableDescription]
@catch_exceptions()
def _run(
self,
table_names: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Get the schema for tables in a comma-separated list."""
table_names_list = table_names.split(", ")
tables_schema = ""
for table in self.db_scan:
if table.table_name in table_names_list:
tables_schema += table.table_schema + "\n"
if table.description is not None:
tables_schema += "Table description: " + table.description + "\n"
if tables_schema == "":
tables_schema += "Tables not found in the database"
return tables_schema
async def _arun(
self,
table_name: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("SchemaSQLDatabaseTool does not support async")
class InfoRelevantColumns(BaseSQLDatabaseTool, BaseTool):
"""Tool for getting more information for potentially relevant columns"""
name = "db_relevant_columns_info"
description = """
Input: Comma-separated list of potentially relevant columns with their corresponding table.
Output: Information about the values inside the columns and their descriptions.
Use this tool to gather details about potentially relevant columns. then, filter them, and identify the relevant ones.
Example Input: table1 -> column1, table1 -> column2, table2 -> column1
"""
db_scan: List[TableDescription]
@catch_exceptions()
def _run(
self,
column_names: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Get the column level information."""
items_list = column_names.split(", ")
column_full_info = ""
for item in items_list:
table_name, column_name = item.split(" -> ")
found = False
for table in self.db_scan:
if table_name == table.table_name:
col_info = ""
for column in table.columns:
if column_name == column.name:
found = True
col_info += f"Description: {column.description},"
if column.low_cardinality:
col_info += f" categories = {column.categories},"
col_info += " Sample rows: "
if found:
for row in table.examples:
col_info += row[column_name] + ", "
col_info = col_info[:-2]
column_full_info += f"Table: {table_name}, column: {column_name}, additional info: {col_info}\n"
if not found:
column_full_info += f"Table: {table_name}, column: {column_name} not found in database\n"
return column_full_info
async def _arun(
self,
table_name: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("InfoRelevantColumnsTool does not support async")
class GetFewShotExamples(BaseSQLDatabaseTool, BaseTool):
"""Tool to obtain few-shot examples from the pool of samples"""
name = "fewshot_examples_retriever"
description = """
Input: Number of required Question/SQL pairs.
Output: List of similar Question/SQL pairs related to the given question.
Use this tool to fetch previously asked Question/SQL pairs as examples for improving SQL query generation.
For complex questions, request more examples to gain a better understanding of tables and columns and the SQL keywords to use.
If the given question is very similar to one of the retrieved examples, it is recommended to use the same SQL query and modify it slightly to fit the given question.
Always use this tool first and before any other tool!
""" # noqa: E501
few_shot_examples: List[dict]
@catch_exceptions()
def _run(
self,
number_of_samples: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Get the schema for tables in a comma-separated list."""
if number_of_samples.isdigit():
number_of_samples = int(number_of_samples)
else:
return "Action input for the fewshot_examples_retriever tool should be an integer"
returned_output = ""
for example in self.few_shot_examples[:number_of_samples]:
if "used" not in example:
returned_output += f"Question: {example['nl_question']} -> SQL: {example['sql_query']}\n"
example["used"] = True
if returned_output == "":
returned_output = "No previously asked Question/SQL pairs are available"
return returned_output
async def _arun(
self,
number_of_samples: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("GetFewShotExamplesTool does not support async")
class SQLDatabaseToolkit(BaseToolkit):
"""Dataherald toolkit"""
db: SQLDatabase = Field(exclude=True)
context: List[dict] | None = Field(exclude=True, default=None)
few_shot_examples: List[dict] | None = Field(exclude=True, default=None)
instructions: List[dict] | None = Field(exclude=True, default=None)
db_scan: List[TableDescription] = Field(exclude=True)
embedding: OpenAIEmbeddings = Field(exclude=True)
@property
def dialect(self) -> str:
"""Return string representation of SQL dialect to use."""
return self.db.dialect
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools = []
query_sql_db_tool = QuerySQLDataBaseTool(db=self.db, context=self.context)
tools.append(query_sql_db_tool)
if self.instructions is not None:
tools.append(
GetUserInstructions(
db=self.db, context=self.context, instructions=self.instructions
)
)
get_current_datetime = SystemTime(db=self.db, context=self.context)
tools.append(get_current_datetime)
tables_sql_db_tool = TablesSQLDatabaseTool(
db=self.db,
context=self.context,
db_scan=self.db_scan,
embedding=self.embedding,
)
tools.append(tables_sql_db_tool)
schema_sql_db_tool = SchemaSQLDatabaseTool(
db=self.db, context=self.context, db_scan=self.db_scan
)
tools.append(schema_sql_db_tool)
info_relevant_tool = InfoRelevantColumns(
db=self.db, context=self.context, db_scan=self.db_scan
)
tools.append(info_relevant_tool)
column_sample_tool = ColumnEntityChecker(db=self.db, context=self.context)
tools.append(column_sample_tool)
if self.few_shot_examples is not None:
get_fewshot_examples_tool = GetFewShotExamples(
db=self.db,
context=self.context,
few_shot_examples=self.few_shot_examples,
)
tools.append(get_fewshot_examples_tool)
return tools
class DataheraldSQLAgent(SQLGenerator):
"""Dataherald SQL agent"""
max_number_of_examples: int = 5 # maximum number of question/SQL pairs
llm: Any = None
def remove_duplicate_examples(self, fewshot_exmaples: List[dict]) -> List[dict]:
returned_result = []
seen_list = []
for example in fewshot_exmaples:
if example["nl_question"] not in seen_list:
seen_list.append(example["nl_question"])
returned_result.append(example)
return returned_result
def create_sql_agent(
self,
toolkit: SQLDatabaseToolkit,
callback_manager: BaseCallbackManager | None = None,
prefix: str = AGENT_PREFIX,
suffix: str | None = None,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: List[str] | None = None,
max_examples: int = 20,
number_of_instructions: int = 1,
max_iterations: int
| None = int(os.getenv("AGENT_MAX_ITERATIONS", "20")), # noqa: B008
max_execution_time: float | None = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Dict[str, Any] | None = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct an SQL agent from an LLM and tools."""
tools = toolkit.get_tools()
if max_examples > 0 and number_of_instructions > 0:
plan = PLAN_WITH_FEWSHOT_EXAMPLES_AND_INSTRUCTIONS
suffix = SUFFIX_WITH_FEW_SHOT_SAMPLES
elif max_examples > 0:
plan = PLAN_WITH_FEWSHOT_EXAMPLES
suffix = SUFFIX_WITH_FEW_SHOT_SAMPLES
elif number_of_instructions > 0:
plan = PLAN_WITH_INSTRUCTIONS
suffix = SUFFIX_WITHOUT_FEW_SHOT_SAMPLES
else:
plan = PLAN_BASE
suffix = SUFFIX_WITHOUT_FEW_SHOT_SAMPLES
plan = plan.format(
dialect=toolkit.dialect,
max_examples=max_examples,
)
prefix = prefix.format(
dialect=toolkit.dialect, max_examples=max_examples, agent_plan=plan
)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=self.llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
@override
def generate_response(
self,
user_question: Question,
database_connection: DatabaseConnection,
context: List[dict] = None,
generate_csv: bool = False,
) -> Response:
context_store = self.system.instance(ContextStore)
storage = self.system.instance(DB)
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4-1106-preview"),
)
repository = TableDescriptionRepository(storage)
db_scan = repository.get_all_tables_by_db(
{
"db_connection_id": ObjectId(database_connection.id),
"status": TableDescriptionStatus.SYNCHRONIZED.value,
}
)
if not db_scan:
raise ValueError("No scanned tables found for database")
few_shot_examples, instructions = context_store.retrieve_context_for_question(
user_question, number_of_samples=self.max_number_of_examples
)
if few_shot_examples is not None:
new_fewshot_examples = self.remove_duplicate_examples(few_shot_examples)
number_of_samples = len(new_fewshot_examples)
else:
new_fewshot_examples = None
number_of_samples = 0
logger.info(f"Generating SQL response to question: {str(user_question.dict())}")
self.database = SQLDatabase.get_sql_engine(database_connection)
toolkit = SQLDatabaseToolkit(
db=self.database,
context=context,
few_shot_examples=new_fewshot_examples,
instructions=instructions,
db_scan=db_scan,
embedding=OpenAIEmbeddings(
openai_api_key=database_connection.decrypt_api_key(),
model=EMBEDDING_MODEL,
),
)
agent_executor = self.create_sql_agent(
toolkit=toolkit,
verbose=True,
max_examples=number_of_samples,
number_of_instructions=len(instructions) if instructions is not None else 0,
max_execution_time=os.getenv("DH_ENGINE_TIMEOUT", None),
)
agent_executor.return_intermediate_steps = True
agent_executor.handle_parsing_errors = True
with get_openai_callback() as cb:
try:
result = agent_executor({"input": user_question.question})
result = self.check_for_time_out_or_tool_limit(result)
except SQLInjectionError as e:
raise SQLInjectionError(e) from e
except EngineTimeOutORItemLimitError as e:
raise EngineTimeOutORItemLimitError(e) from e
except Exception as e:
return Response(
question_id=user_question.id,
total_tokens=cb.total_tokens,
total_cost=cb.total_cost,
sql_query="",
sql_generation_status="INVALID",
sql_query_result=None,
error_message=str(e),
)
sql_query_list = []
for step in result["intermediate_steps"]:
action = step[0]
if type(action) == AgentAction and action.tool == "sql_db_query":
query = self.format_sql_query(action.tool_input)
if "```sql" in query:
logger.info("**** Removing markdown formatting from the query\n")
query = query.replace("```sql", "").replace("```", "")
logger.info(
f"**** Query after removing markdown formatting: {query}\n"
)
sql_query_list.append(query)
intermediate_steps = self.format_intermediate_representations(
result["intermediate_steps"]
)
logger.info(f"cost: {str(cb.total_cost)} tokens: {str(cb.total_tokens)}")
response = Response(
question_id=user_question.id,
response=result["output"],
intermediate_steps=intermediate_steps,
total_tokens=cb.total_tokens,
total_cost=cb.total_cost,
sql_query=sql_query_list[-1] if len(sql_query_list) > 0 else "",
)
return self.create_sql_query_status(
self.database,
response.sql_query,
response,
top_k=TOP_K,
generate_csv=generate_csv,
database_connection=database_connection,
)
| [
"\n Input is an empty string, output is the current data and time.\n Always use this tool before generating a query if there is any time or date in the given question.\n ",
"\n Input: Comma-separated list of tables.\n Output: Schema of the specified tables.\n Use this tool to discover all columns of the relevant tables and identify potentially relevant columns.\n\n Example Input: table1, table2, table3\n ",
"\n Input: is an empty string.\n Output: Database admin instructions before generating the SQL query.\n The generated SQL query MUST follow the admin instructions even it contradicts with the given question.\n ",
"\n Input: Comma-separated list of potentially relevant columns with their corresponding table.\n Output: Information about the values inside the columns and their descriptions.\n Use this tool to gather details about potentially relevant columns. then, filter them, and identify the relevant ones.\n\n Example Input: table1 -> column1, table1 -> column2, table2 -> column1\n ",
"\n Input: Column name and its corresponding table, and an entity.\n Output: cell-values found in the column similar to the given entity.\n Use this tool to get cell values similar to the given entity in the given column.\n\n Example Input: table1 -> column2, entity\n ",
"\n Input: Number of required Question/SQL pairs.\n Output: List of similar Question/SQL pairs related to the given question.\n Use this tool to fetch previously asked Question/SQL pairs as examples for improving SQL query generation.\n For complex questions, request more examples to gain a better understanding of tables and columns and the SQL keywords to use.\n If the given question is very similar to one of the retrieved examples, it is recommended to use the same SQL query and modify it slightly to fit the given question.\n Always use this tool first and before any other tool!\n ",
"\n Input: SQL query.\n Output: Result from the database or an error message if the query is incorrect.\n If an error occurs, rewrite the query and retry.\n Use this tool to execute SQL queries.\n ",
"\n Input: Given question.\n Output: Comma-separated list of tables with their relevance scores, indicating their relevance to the question.\n Use this tool to identify the relevant tables for the given question.\n "
] |
2024-01-10 | Dataherald/dataherald | dataherald~api~fastapi.py | import json
import logging
import os
import threading
import time
from typing import List
import openai
from bson import json_util
from bson.objectid import InvalidId, ObjectId
from fastapi import BackgroundTasks, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import FileResponse, JSONResponse
from overrides import override
from dataherald.api import API
from dataherald.api.types import Query
from dataherald.config import Settings, System
from dataherald.context_store import ContextStore
from dataherald.db import DB
from dataherald.db_scanner import Scanner
from dataherald.db_scanner.models.types import (
QueryHistory,
TableDescription,
TableDescriptionStatus,
)
from dataherald.db_scanner.repository.base import (
InvalidColumnNameError,
TableDescriptionRepository,
)
from dataherald.db_scanner.repository.query_history import QueryHistoryRepository
from dataherald.eval import Evaluator
from dataherald.finetuning.openai_finetuning import OpenAIFineTuning
from dataherald.repositories.base import ResponseRepository
from dataherald.repositories.database_connections import DatabaseConnectionRepository
from dataherald.repositories.finetunings import FinetuningsRepository
from dataherald.repositories.golden_records import GoldenRecordRepository
from dataherald.repositories.instructions import InstructionRepository
from dataherald.repositories.question import QuestionRepository
from dataherald.sql_database.base import (
InvalidDBConnectionError,
SQLDatabase,
SQLInjectionError,
)
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.sql_generator import SQLGenerator
from dataherald.sql_generator.generates_nl_answer import GeneratesNlAnswer
from dataherald.types import (
CancelFineTuningRequest,
CreateResponseRequest,
DatabaseConnectionRequest,
Finetuning,
FineTuningRequest,
GoldenRecord,
GoldenRecordRequest,
Instruction,
InstructionRequest,
Question,
QuestionRequest,
Response,
ScannerRequest,
TableDescriptionRequest,
UpdateInstruction,
)
from dataherald.utils.models_context_window import OPENAI_CONTEXT_WIDNOW_SIZES
from dataherald.utils.s3 import S3
logger = logging.getLogger(__name__)
MAX_ROWS_TO_CREATE_CSV_FILE = 50
def async_scanning(scanner, database, scanner_request, storage):
scanner.scan(
database,
scanner_request.db_connection_id,
scanner_request.table_names,
TableDescriptionRepository(storage),
QueryHistoryRepository(storage),
)
def async_fine_tuning(storage, model):
openai_fine_tuning = OpenAIFineTuning(storage, model)
openai_fine_tuning.create_fintuning_dataset()
openai_fine_tuning.create_fine_tuning_job()
def delete_file(file_location: str):
os.remove(file_location)
class FastAPI(API):
def __init__(self, system: System):
super().__init__(system)
self.system = system
self.storage = self.system.instance(DB)
@override
def heartbeat(self) -> int:
"""Returns the current server time in nanoseconds to check if the server is alive"""
return int(time.time_ns())
@override
def scan_db(
self, scanner_request: ScannerRequest, background_tasks: BackgroundTasks
) -> bool:
"""Takes a db_connection_id and scan all the tables columns"""
db_connection_repository = DatabaseConnectionRepository(self.storage)
db_connection = db_connection_repository.find_by_id(
scanner_request.db_connection_id
)
if not db_connection:
raise HTTPException(status_code=404, detail="Database connection not found")
try:
database = SQLDatabase.get_sql_engine(db_connection)
except Exception as e:
raise HTTPException( # noqa: B904
status_code=400,
detail=f"Unable to connect to db: {scanner_request.db_connection_id}, {e}",
)
scanner = self.system.instance(Scanner)
all_tables = scanner.get_all_tables_and_views(database)
if scanner_request.table_names:
for table in scanner_request.table_names:
if table not in all_tables:
raise HTTPException(
status_code=404, detail=f"Table named: {table} doesn't exist"
) # noqa: B904
else:
scanner_request.table_names = all_tables
scanner.synchronizing(
scanner_request.table_names,
scanner_request.db_connection_id,
TableDescriptionRepository(self.storage),
)
background_tasks.add_task(
async_scanning, scanner, database, scanner_request, self.storage
)
return True
@override
def answer_question(
self,
run_evaluator: bool = True,
generate_csv: bool = False,
question_request: QuestionRequest = None,
user_question: Question | None = None,
) -> Response:
"""Takes in an English question and answers it based on content from the registered databases"""
sql_generation = self.system.instance(SQLGenerator)
context_store = self.system.instance(ContextStore)
if not user_question:
user_question = Question(
question=question_request.question,
db_connection_id=question_request.db_connection_id,
)
question_repository = QuestionRepository(self.storage)
user_question = question_repository.insert(user_question)
logger.info(f"Answer question: {user_question.question}")
db_connection_repository = DatabaseConnectionRepository(self.storage)
database_connection = db_connection_repository.find_by_id(
user_question.db_connection_id
)
response_repository = ResponseRepository(self.storage)
if not database_connection:
response = response_repository.insert(
Response(
question_id=user_question.id,
error_message="Connections doesn't exist",
sql_query="",
)
)
return JSONResponse(status_code=404, content=jsonable_encoder(response))
try:
context = context_store.retrieve_context_for_question(user_question)
start_generated_answer = time.time()
generated_answer = sql_generation.generate_response(
user_question,
database_connection,
context[0],
generate_csv,
)
logger.info("Starts evaluator...")
if run_evaluator:
evaluator = self.system.instance(Evaluator)
confidence_score = evaluator.get_confidence_score(
user_question, generated_answer, database_connection
)
generated_answer.confidence_score = confidence_score
except Exception as e:
response = response_repository.insert(
Response(
question_id=user_question.id, error_message=str(e), sql_query=""
)
)
return JSONResponse(
status_code=400,
content=jsonable_encoder(response),
)
if (
generate_csv
and len(generated_answer.sql_query_result.rows)
> MAX_ROWS_TO_CREATE_CSV_FILE
):
generated_answer.sql_query_result = None
generated_answer.exec_time = time.time() - start_generated_answer
response_repository = ResponseRepository(self.storage)
return response_repository.insert(generated_answer)
@override
def answer_question_with_timeout(
self,
run_evaluator: bool = True,
generate_csv: bool = False,
question_request: QuestionRequest = None,
) -> Response:
result = None
exception = None
user_question = Question(
question=question_request.question,
db_connection_id=question_request.db_connection_id,
)
question_repository = QuestionRepository(self.storage)
user_question = question_repository.insert(user_question)
stop_event = threading.Event()
def run_and_catch_exceptions():
nonlocal result, exception
if not stop_event.is_set():
result = self.answer_question(
run_evaluator, generate_csv, None, user_question
)
thread = threading.Thread(target=run_and_catch_exceptions)
thread.start()
thread.join(timeout=int(os.getenv("DH_ENGINE_TIMEOUT")))
if thread.is_alive():
stop_event.set()
response_repository = ResponseRepository(self.storage)
response = response_repository.insert(
Response(
question_id=user_question.id,
error_message="Timeout Error",
sql_query="",
)
)
return JSONResponse(status_code=400, content=jsonable_encoder(response))
return result
@override
def create_database_connection(
self, database_connection_request: DatabaseConnectionRequest
) -> DatabaseConnection:
try:
db_connection = DatabaseConnection(
alias=database_connection_request.alias,
uri=database_connection_request.connection_uri,
path_to_credentials_file=database_connection_request.path_to_credentials_file,
llm_api_key=database_connection_request.llm_api_key,
use_ssh=database_connection_request.use_ssh,
ssh_settings=database_connection_request.ssh_settings,
file_storage=database_connection_request.file_storage,
)
SQLDatabase.get_sql_engine(db_connection, True)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) # noqa: B904
except InvalidDBConnectionError as e:
raise HTTPException( # noqa: B904
status_code=400,
detail=f"{e}",
)
db_connection_repository = DatabaseConnectionRepository(self.storage)
return db_connection_repository.insert(db_connection)
@override
def list_database_connections(self) -> list[DatabaseConnection]:
db_connection_repository = DatabaseConnectionRepository(self.storage)
return db_connection_repository.find_all()
@override
def update_database_connection(
self,
db_connection_id: str,
database_connection_request: DatabaseConnectionRequest,
) -> DatabaseConnection:
try:
db_connection = DatabaseConnection(
id=db_connection_id,
alias=database_connection_request.alias,
uri=database_connection_request.connection_uri,
path_to_credentials_file=database_connection_request.path_to_credentials_file,
llm_api_key=database_connection_request.llm_api_key,
use_ssh=database_connection_request.use_ssh,
ssh_settings=database_connection_request.ssh_settings,
file_storage=database_connection_request.file_storage,
)
SQLDatabase.get_sql_engine(db_connection, True)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) # noqa: B904
except InvalidDBConnectionError as e:
raise HTTPException( # noqa: B904
status_code=400,
detail=f"{e}",
)
db_connection_repository = DatabaseConnectionRepository(self.storage)
return db_connection_repository.update(db_connection)
@override
def update_table_description(
self,
table_description_id: str,
table_description_request: TableDescriptionRequest,
) -> TableDescription:
scanner_repository = TableDescriptionRepository(self.storage)
try:
table = scanner_repository.find_by_id(table_description_id)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not table:
raise HTTPException(
status_code=404, detail="Scanned database table not found"
)
try:
return scanner_repository.update_fields(table, table_description_request)
except InvalidColumnNameError as e:
raise HTTPException(status_code=400, detail=str(e)) from e
@override
def list_table_descriptions(
self, db_connection_id: str, table_name: str | None = None
) -> list[TableDescription]:
scanner_repository = TableDescriptionRepository(self.storage)
table_descriptions = scanner_repository.find_by(
{"db_connection_id": ObjectId(db_connection_id), "table_name": table_name}
)
if db_connection_id:
db_connection_repository = DatabaseConnectionRepository(self.storage)
db_connection = db_connection_repository.find_by_id(db_connection_id)
database = SQLDatabase.get_sql_engine(db_connection)
scanner = self.system.instance(Scanner)
all_tables = scanner.get_all_tables_and_views(database)
for table_description in table_descriptions:
if table_description.table_name not in all_tables:
table_description.status = TableDescriptionStatus.DEPRECATED.value
else:
all_tables.remove(table_description.table_name)
for table in all_tables:
table_descriptions.append(
TableDescription(
table_name=table,
status=TableDescriptionStatus.NOT_SYNCHRONIZED.value,
db_connection_id=db_connection_id,
columns=[],
)
)
return table_descriptions
@override
def get_table_description(self, table_description_id: str) -> TableDescription:
scanner_repository = TableDescriptionRepository(self.storage)
try:
result = scanner_repository.find_by_id(table_description_id)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not result:
raise HTTPException(status_code=404, detail="Table description not found")
return result
@override
def get_query_history(self, db_connection_id: str) -> list[QueryHistory]:
query_history_repository = QueryHistoryRepository(self.storage)
return query_history_repository.find_by(
{"db_connection_id": ObjectId(db_connection_id)}
)
@override
def get_responses(self, question_id: str | None = None) -> list[Response]:
response_repository = ResponseRepository(self.storage)
query = {}
if question_id:
query = {"question_id": ObjectId(question_id)}
return response_repository.find_by(query)
@override
def get_response(self, response_id: str) -> Response:
response_repository = ResponseRepository(self.storage)
try:
result = response_repository.find_by_id(response_id)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not result:
raise HTTPException(status_code=404, detail="Question not found")
return result
@override
def get_response_file(
self, response_id: str, background_tasks: BackgroundTasks
) -> FileResponse:
response_repository = ResponseRepository(self.storage)
question_repository = QuestionRepository(self.storage)
db_connection_repository = DatabaseConnectionRepository(self.storage)
try:
result = response_repository.find_by_id(response_id)
question = question_repository.find_by_id(result.question_id)
db_connection = db_connection_repository.find_by_id(
question.db_connection_id
)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not result:
raise HTTPException(
status_code=404, detail="Question, response, or db_connection not found"
)
# Check if the file is to be returned from server (locally) or from S3
if Settings().only_store_csv_files_locally:
file_location = result.csv_file_path
# check if the file exists
if not os.path.exists(file_location):
raise HTTPException(
status_code=404,
detail="CSV file not found. Possibly deleted/removed from server.",
)
else:
s3 = S3()
file_location = s3.download(
result.csv_file_path, db_connection.file_storage
)
background_tasks.add_task(delete_file, file_location)
return FileResponse(
file_location,
media_type="text/csv",
)
@override
def update_response(self, response_id: str) -> Response:
response_repository = ResponseRepository(self.storage)
try:
response = response_repository.find_by_id(response_id)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not response:
raise HTTPException(status_code=404, detail="Question not found")
start_generated_answer = time.time()
try:
generates_nl_answer = GeneratesNlAnswer(self.system, self.storage)
response = generates_nl_answer.execute(response)
response.exec_time = time.time() - start_generated_answer
response_repository.update(response)
except openai.AuthenticationError as e:
raise HTTPException(status_code=400, detail=str(e)) from e
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e)) from e
except SQLInjectionError as e:
raise HTTPException(status_code=404, detail=str(e)) from e
return response
@override
def get_questions(self, db_connection_id: str | None = None) -> list[Question]:
question_repository = QuestionRepository(self.storage)
query = {}
if db_connection_id:
query = {"db_connection_id": ObjectId(db_connection_id)}
return question_repository.find_by(query)
@override
def get_question(self, question_id: str) -> Question:
question_repository = QuestionRepository(self.storage)
try:
result = question_repository.find_by_id(question_id)
except InvalidId as e:
raise HTTPException(status_code=400, detail=str(e)) from e
if not result:
raise HTTPException(status_code=404, detail="Question not found")
return result
@override
def add_golden_records(
self, golden_records: List[GoldenRecordRequest]
) -> List[GoldenRecord]:
"""Takes in a list of NL <> SQL pairs and stores them to be used in prompts to the LLM"""
context_store = self.system.instance(ContextStore)
return context_store.add_golden_records(golden_records)
@override
def execute_sql_query(self, query: Query) -> tuple[str, dict]:
"""Executes a SQL query against the database and returns the results"""
db_connection_repository = DatabaseConnectionRepository(self.storage)
database_connection = db_connection_repository.find_by_id(
query.db_connection_id
)
if not database_connection:
raise HTTPException(status_code=404, detail="Database connection not found")
database = SQLDatabase.get_sql_engine(database_connection)
try:
result = database.run_sql(query.sql_query)
except SQLInjectionError as e:
raise HTTPException(status_code=404, detail=str(e)) from e
return result
@override
def create_response(
self,
run_evaluator: bool = True,
sql_response_only: bool = False,
generate_csv: bool = False,
query_request: CreateResponseRequest = None, # noqa: ARG002
) -> Response:
question_repository = QuestionRepository(self.storage)
response_repository = ResponseRepository(self.storage)
user_question = question_repository.find_by_id(query_request.question_id)
if not user_question:
raise HTTPException(status_code=404, detail="Question not found")
db_connection_repository = DatabaseConnectionRepository(self.storage)
database_connection = db_connection_repository.find_by_id(
user_question.db_connection_id
)
if not database_connection:
raise HTTPException(status_code=404, detail="Database connection not found")
try:
if not query_request.sql_query:
sql_generation = self.system.instance(SQLGenerator)
context_store = self.system.instance(ContextStore)
context = context_store.retrieve_context_for_question(user_question)
start_generated_answer = time.time()
response = sql_generation.generate_response(
user_question,
database_connection,
context[0],
generate_csv,
)
else:
response = Response(
question_id=query_request.question_id,
sql_query=query_request.sql_query,
)
start_generated_answer = time.time()
generates_nl_answer = GeneratesNlAnswer(self.system, self.storage)
response = generates_nl_answer.execute(
response, sql_response_only, generate_csv
)
except openai.AuthenticationError as e:
raise HTTPException(status_code=400, detail=str(e)) from e
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e)) from e
except SQLInjectionError as e:
raise HTTPException(status_code=404, detail=str(e)) from e
if run_evaluator:
evaluator = self.system.instance(Evaluator)
confidence_score = evaluator.get_confidence_score(
user_question, response, database_connection
)
response.confidence_score = confidence_score
if (
generate_csv
and len(response.sql_query_result.rows) > MAX_ROWS_TO_CREATE_CSV_FILE
):
response.sql_query_result = None
response.exec_time = time.time() - start_generated_answer
response_repository.insert(response)
return response
@override
def delete_golden_record(self, golden_record_id: str) -> dict:
context_store = self.system.instance(ContextStore)
status = context_store.remove_golden_records([golden_record_id])
return {"status": status}
@override
def get_golden_records(
self, db_connection_id: str = None, page: int = 1, limit: int = 10
) -> List[GoldenRecord]:
golden_records_repository = GoldenRecordRepository(self.storage)
if db_connection_id:
return golden_records_repository.find_by(
{"db_connection_id": ObjectId(db_connection_id)},
page=page,
limit=limit,
)
return golden_records_repository.find_all(page=page, limit=limit)
@override
def add_instruction(self, instruction_request: InstructionRequest) -> Instruction:
instruction_repository = InstructionRepository(self.storage)
instruction = Instruction(
instruction=instruction_request.instruction,
db_connection_id=instruction_request.db_connection_id,
)
return instruction_repository.insert(instruction)
@override
def get_instructions(
self, db_connection_id: str = None, page: int = 1, limit: int = 10
) -> List[Instruction]:
instruction_repository = InstructionRepository(self.storage)
if db_connection_id:
return instruction_repository.find_by(
{"db_connection_id": ObjectId(db_connection_id)},
page=page,
limit=limit,
)
return instruction_repository.find_all(page=page, limit=limit)
@override
def delete_instruction(self, instruction_id: str) -> dict:
instruction_repository = InstructionRepository(self.storage)
deleted = instruction_repository.delete_by_id(instruction_id)
if deleted == 0:
raise HTTPException(status_code=404, detail="Instruction not found")
return {"status": "success"}
@override
def update_instruction(
self,
instruction_id: str,
instruction_request: UpdateInstruction,
) -> Instruction:
instruction_repository = InstructionRepository(self.storage)
instruction = instruction_repository.find_by_id(instruction_id)
if not instruction:
raise HTTPException(status_code=404, detail="Instruction not found")
updated_instruction = Instruction(
id=instruction_id,
instruction=instruction_request.instruction,
db_connection_id=instruction.db_connection_id,
)
instruction_repository.update(updated_instruction)
return json.loads(json_util.dumps(updated_instruction))
@override
def create_finetuning_job(
self, fine_tuning_request: FineTuningRequest, background_tasks: BackgroundTasks
) -> Finetuning:
db_connection_repository = DatabaseConnectionRepository(self.storage)
db_connection = db_connection_repository.find_by_id(
fine_tuning_request.db_connection_id
)
if not db_connection:
raise HTTPException(status_code=404, detail="Database connection not found")
golden_records_repository = GoldenRecordRepository(self.storage)
golden_records = []
if fine_tuning_request.golden_records:
for golden_record_id in fine_tuning_request.golden_records:
golden_record = golden_records_repository.find_by_id(golden_record_id)
if not golden_record:
raise HTTPException(
status_code=404, detail="Golden record not found"
)
golden_records.append(golden_record)
else:
golden_records = golden_records_repository.find_by(
{"db_connection_id": ObjectId(fine_tuning_request.db_connection_id)},
page=0,
limit=0,
)
if not golden_records:
raise HTTPException(status_code=404, detail="No golden records found")
if fine_tuning_request.base_llm.model_name not in OPENAI_CONTEXT_WIDNOW_SIZES:
raise HTTPException(
status_code=400,
detail=f"Model {fine_tuning_request.base_llm.model_name} not supported",
)
model_repository = FinetuningsRepository(self.storage)
model = model_repository.insert(
Finetuning(
db_connection_id=fine_tuning_request.db_connection_id,
alias=fine_tuning_request.alias,
base_llm=fine_tuning_request.base_llm,
golden_records=[
str(golden_record.id) for golden_record in golden_records
],
)
)
background_tasks.add_task(async_fine_tuning, self.storage, model)
return model
@override
def cancel_finetuning_job(
self, cancel_fine_tuning_request: CancelFineTuningRequest
) -> Finetuning:
model_repository = FinetuningsRepository(self.storage)
model = model_repository.find_by_id(cancel_fine_tuning_request.finetuning_id)
if not model:
raise HTTPException(status_code=404, detail="Model not found")
if model.status == "succeeded":
raise HTTPException(
status_code=400, detail="Model has already succeeded. Cannot cancel."
)
if model.status == "failed":
raise HTTPException(
status_code=400, detail="Model has already failed. Cannot cancel."
)
if model.status == "cancelled":
raise HTTPException(
status_code=400, detail="Model has already been cancelled."
)
openai_fine_tuning = OpenAIFineTuning(self.storage, model)
return openai_fine_tuning.cancel_finetuning_job()
@override
def get_finetuning_job(self, finetuning_job_id: str) -> Finetuning:
model_repository = FinetuningsRepository(self.storage)
model = model_repository.find_by_id(finetuning_job_id)
if not model:
raise HTTPException(status_code=404, detail="Model not found")
openai_fine_tuning = OpenAIFineTuning(self.storage, model)
return openai_fine_tuning.retrieve_finetuning_job()
| [] |
2024-01-10 | Dataherald/dataherald | dataherald~vector_store~pinecone.py | import os
from typing import Any, List
import pinecone
from langchain.embeddings import OpenAIEmbeddings
from overrides import override
from dataherald.config import System
from dataherald.db import DB
from dataherald.repositories.database_connections import DatabaseConnectionRepository
from dataherald.vector_store import VectorStore
EMBEDDING_MODEL = "text-embedding-ada-002"
class Pinecone(VectorStore):
def __init__(self, system: System):
super().__init__(system)
api_key = os.environ.get("PINECONE_API_KEY")
environment = os.environ.get("PINECONE_ENVIRONMENT")
if api_key is None:
raise ValueError("PINECONE_API_KEY environment variable not set")
if environment is None:
raise ValueError("PINECONE_ENVIRONMENT environment variable not set")
pinecone.init(api_key=api_key, environment=environment)
@override
def query(
self,
query_texts: List[str],
db_connection_id: str,
collection: str,
num_results: int,
) -> list:
index = pinecone.Index(collection)
db_connection_repository = DatabaseConnectionRepository(
self.system.instance(DB)
)
database_connection = db_connection_repository.find_by_id(db_connection_id)
embedding = OpenAIEmbeddings(
openai_api_key=database_connection.decrypt_api_key(), model=EMBEDDING_MODEL
)
xq = embedding.embed_query(query_texts[0])
query_response = index.query(
queries=[xq],
filter={
"db_connection_id": {"$eq": db_connection_id},
},
top_k=num_results,
include_metadata=True,
)
return query_response.to_dict()["results"][0]["matches"]
@override
def add_record(
self,
documents: str,
db_connection_id: str,
collection: str,
metadata: Any,
ids: List,
):
if collection not in pinecone.list_indexes():
self.create_collection(collection)
db_connection_repository = DatabaseConnectionRepository(
self.system.instance(DB)
)
database_connection = db_connection_repository.find_by_id(db_connection_id)
embedding = OpenAIEmbeddings(
openai_api_key=database_connection.decrypt_api_key(), model=EMBEDDING_MODEL
)
index = pinecone.Index(collection)
embeds = embedding.embed_documents([documents])
record = [(ids[0], embeds, metadata[0])]
index.upsert(vectors=record)
@override
def delete_record(self, collection: str, id: str):
if collection not in pinecone.list_indexes():
self.create_collection(collection)
index = pinecone.Index(collection)
index.delete(ids=[id])
@override
def delete_collection(self, collection: str):
return pinecone.delete_index(collection)
@override
def create_collection(self, collection: str):
pinecone.create_index(name=collection, dimension=1536, metric="cosine")
| [] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~generates_nl_answer.py | import os
from langchain.chains import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from dataherald.model.chat_model import ChatModel
from dataherald.repositories.database_connections import DatabaseConnectionRepository
from dataherald.repositories.question import QuestionRepository
from dataherald.sql_database.base import SQLDatabase
from dataherald.sql_generator.create_sql_query_status import create_sql_query_status
from dataherald.types import Response
SYSTEM_TEMPLATE = """ Given a Question, a Sql query and the sql query result try to answer the question
If the sql query result doesn't answer the question just say 'I don't know'
"""
HUMAN_TEMPLATE = """ Answer the question given the sql query and the sql query result.
Question: {question}
SQL query: {sql_query}
SQL query result: {sql_query_result}
"""
class GeneratesNlAnswer:
def __init__(self, system, storage):
self.system = system
self.storage = storage
self.model = ChatModel(self.system)
def execute(
self,
query_response: Response,
sql_response_only: bool = False,
generate_csv: bool = False,
) -> Response:
question_repository = QuestionRepository(self.storage)
question = question_repository.find_by_id(query_response.question_id)
db_connection_repository = DatabaseConnectionRepository(self.storage)
database_connection = db_connection_repository.find_by_id(
question.db_connection_id
)
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4"),
)
database = SQLDatabase.get_sql_engine(database_connection)
if not query_response.sql_query_result:
query_response = create_sql_query_status(
database,
query_response.sql_query,
query_response,
top_k=int(os.getenv("UPPER_LIMIT_QUERY_RETURN_ROWS", "50")),
generate_csv=generate_csv,
database_connection=database_connection,
)
if query_response.csv_file_path:
query_response.response = None
return query_response
if not sql_response_only:
system_message_prompt = SystemMessagePromptTemplate.from_template(
SYSTEM_TEMPLATE
)
human_message_prompt = HumanMessagePromptTemplate.from_template(
HUMAN_TEMPLATE
)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(llm=self.llm, prompt=chat_prompt)
nl_resp = chain.run(
question=question.question,
sql_query=query_response.sql_query,
sql_query_result=str(query_response.sql_query_result),
)
query_response.response = nl_resp
return query_response
| [
" Answer the question given the sql query and the sql query result.\nQuestion: {question}\nSQL query: {sql_query}\nSQL query result: {sql_query_result}\n",
"t know",
" Given a Question, a Sql query and the sql query result try to answer the question\nIf the sql query result doesn't answer the question just say 'I don't know'\n",
"[PLACEHOLDER, PLACEHOLDER]",
"t answer the question just say "
] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~llamaindex.py | """A wrapper for the SQL generation functions in langchain"""
import logging
import os
import time
from typing import Any, List
import tiktoken
from langchain.callbacks.openai_info import MODEL_COST_PER_1K_TOKENS
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.indices.struct_store import SQLTableRetrieverQueryEngine
from llama_index.objects import ObjectIndex, SQLTableNodeMapping, SQLTableSchema
from overrides import override
from sqlalchemy import MetaData
from dataherald.sql_database.base import SQLDatabase
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.sql_generator import SQLGenerator
from dataherald.types import Question, Response
logger = logging.getLogger(__name__)
class LlamaIndexSQLGenerator(SQLGenerator):
llm: Any | None = None
@override
def generate_response(
self,
user_question: Question,
database_connection: DatabaseConnection,
context: List[dict] = None,
generate_csv: bool = False,
) -> Response:
start_time = time.time()
logger.info(f"Generating SQL response to question: {str(user_question.dict())}")
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4-1106-preview"),
)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(self.llm.model_name).encode,
verbose=False, # set to true to see usage printed to the console
)
callback_manager = CallbackManager([token_counter])
self.database = SQLDatabase.get_sql_engine(database_connection)
db_engine = self.database.engine
# load all table definitions
metadata_obj = MetaData()
metadata_obj.reflect(db_engine)
table_schema_objs = []
table_node_mapping = SQLTableNodeMapping(self.database)
if context is not None:
samples_prompt_string = "The following are some similar previous questions and their correct SQL queries from these databases: \
\n"
for sample in context:
samples_prompt_string += (
f"Question: {sample['nl_question']} \nSQL: {sample['sql_query']} \n"
)
question_with_context = (
f"{user_question.question} An example of a similar question and the query that was generated to answer it \
is the following {samples_prompt_string}"
if context is not None
else user_question.question
)
for table_name in metadata_obj.tables.keys():
table_schema_objs.append(SQLTableSchema(table_name=table_name))
llm_predictor = LLMPredictor(llm=self.llm)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, callback_manager=callback_manager
)
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
print(question_with_context)
# We construct a SQLTableRetrieverQueryEngine.
# Note that we pass in the ObjectRetriever so that we can dynamically retrieve the table during query-time.
# ObjectRetriever: A retriever that retrieves a set of query engine tools.
query_engine = SQLTableRetrieverQueryEngine(
self.database,
obj_index.as_retriever(similarity_top_k=1),
service_context=service_context,
)
result = query_engine.query(question_with_context)
total_cost = (
token_counter.total_llm_token_count
* MODEL_COST_PER_1K_TOKENS[self.llm.model_name]
)
logger.info(
f"total cost: {str(total_cost)} {str(token_counter.total_llm_token_count)}"
)
exec_time = time.time() - start_time
response = Response(
question_id=user_question.id,
response=result.response,
exec_time=exec_time,
total_tokens=token_counter.total_llm_token_count,
total_cost=total_cost,
intermediate_steps=[str(result.metadata)],
sql_query=self.format_sql_query(result.metadata["sql_query"]),
)
return self.create_sql_query_status(
self.database,
response.sql_query,
response,
generate_csv=generate_csv,
database_connection=database_connection,
)
| [
"The following are some similar previous questions and their correct SQL queries from these databases: \n",
"Question: PLACEHOLDER \nSQL: PLACEHOLDER \n"
] |
2024-01-10 | Dataherald/dataherald | dataherald~sql_generator~langchain_sqlchain.py | """A wrapper for the SQL generation functions in langchain"""
import logging
import os
import time
from typing import Any, List
from langchain import SQLDatabaseChain
from langchain.callbacks import get_openai_callback
from overrides import override
from dataherald.sql_database.base import SQLDatabase
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.sql_generator import SQLGenerator
from dataherald.types import Question, Response
logger = logging.getLogger(__name__)
PROMPT_WITHOUT_CONTEXT = """
Given an input question,
first create a syntactically correct postgresql query to run,
then look at the results of the query and return the answer.
The question:
{user_question}
"""
PROMPT_WITH_CONTEXT = """
Given an input question,
first create a syntactically correct postgresql query to run,
then look at the results of the query and return the answer.
An example of a similar question and the query that was generated to answer it is the following
{context}
The question:
{user_question}
"""
class LangChainSQLChainSQLGenerator(SQLGenerator):
llm: Any | None = None
@override
def generate_response(
self,
user_question: Question,
database_connection: DatabaseConnection,
context: List[dict] = None,
generate_csv: bool = False,
) -> Response:
start_time = time.time()
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4-1106-preview"),
)
self.database = SQLDatabase.get_sql_engine(database_connection)
logger.info(
f"Generating SQL response to question: {str(user_question.dict())} with passed context {context}"
)
if context is not None:
samples_prompt_string = "The following are some similar previous questions and their correct SQL queries from these databases: \
\n"
for sample in context:
samples_prompt_string += (
f"Question: {sample['nl_question']} \nSQL: {sample['sql_query']} \n"
)
prompt = PROMPT_WITH_CONTEXT.format(
user_question=user_question.question, context=samples_prompt_string
)
else:
prompt = PROMPT_WITHOUT_CONTEXT.format(user_question=user_question.question)
# should top_k be an argument?
db_chain = SQLDatabaseChain.from_llm(
self.llm, self.database, top_k=3, return_intermediate_steps=True
)
with get_openai_callback() as cb:
result = db_chain(prompt)
intermediate_steps = self.format_intermediate_representations(
result["intermediate_steps"]
)
exec_time = time.time() - start_time
logger.info(
f"cost: {str(cb.total_cost)} tokens: {str(cb.total_tokens)} time: {str(exec_time)}"
)
response = Response(
question_id=user_question.id,
response=result["result"],
intermediate_steps=intermediate_steps,
exec_time=exec_time,
total_cost=cb.total_cost,
total_tokens=cb.total_tokens,
sql_query=self.format_sql_query(result["intermediate_steps"][1]),
)
return self.create_sql_query_status(
self.database,
response.sql_query,
response,
generate_csv=generate_csv,
database_connection=database_connection,
)
| [
"The following are some similar previous questions and their correct SQL queries from these databases: \n",
"\nGiven an input question,\nfirst create a syntactically correct postgresql query to run,\nthen look at the results of the query and return the answer.\n\nThe question:\n{user_question}\n",
"Question: PLACEHOLDER \nSQL: PLACEHOLDER \n",
"\nGiven an input question,\nfirst create a syntactically correct postgresql query to run,\nthen look at the results of the query and return the answer.\n\nAn example of a similar question and the query that was generated to answer it is the following\n{context}\n\nThe question:\n{user_question}\n"
] |
2024-01-10 | Dataherald/dataherald | dataherald~model~chat_model.py | from typing import Any
from langchain.chat_models import ChatAnthropic, ChatCohere, ChatGooglePalm, ChatOpenAI
from overrides import override
from dataherald.model import LLMModel
from dataherald.sql_database.models.types import DatabaseConnection
class ChatModel(LLMModel):
def __init__(self, system):
super().__init__(system)
@override
def get_model(
self,
database_connection: DatabaseConnection,
model_family="openai",
model_name="gpt-4-32k",
**kwargs: Any
) -> Any:
api_key = database_connection.decrypt_api_key()
if model_family == "openai":
return ChatOpenAI(model_name=model_name, openai_api_key=api_key, **kwargs)
if model_family == "anthropic":
return ChatAnthropic(
model_name=model_name, anthropic_api_key=api_key, **kwargs
)
if model_family == "google":
return ChatGooglePalm(
model_name=model_name, google_api_key=api_key, **kwargs
)
if model_family == "cohere":
return ChatCohere(model_name=model_name, cohere_api_key=api_key, **kwargs)
raise ValueError("No valid API key environment variable found")
| [] |
2024-01-10 | Dataherald/dataherald | dataherald~eval~eval_agent.py | import logging
import os
import re
import time
from difflib import SequenceMatcher
from typing import Any, Dict, List
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.chains.llm import LLMChain
from langchain.tools import BaseTool
from langchain.tools.sql_database.tool import (
BaseSQLDatabaseTool,
InfoSQLDatabaseTool,
QuerySQLDataBaseTool,
)
from overrides import override
from pydantic import Field, confloat
from sqlalchemy import MetaData, Table, select
from dataherald.config import System
from dataherald.eval import Evaluation, Evaluator
from dataherald.sql_database.base import SQLDatabase
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.types import Question, Response
logger = logging.getLogger(__name__)
AGENT_PREFIX: str = """You are a {dialect} expert.
Given a question and a SQL query, analyze the correctness of the SQL query and provide a score as the final answer.
Score indicates how correctly and accurately SQL query answers the question.
Note that the score should be between 0 and 100. Higher scores means the SQL Query is more accurate.
Think step by step to provide the score.
Perform all of the below checks by using the tools:
1) columns used in the SELECT clause should correspond exactly to what user wants.
2) for each of the conditions in the WHERE clause:
2.1) correct columns should be used to filter the rows (always use entity_finder tool to confirm the correctness)
2.2) database value used in the condition should handle different scenarios or edge cases
3) all of the calculations should be double checked
4) nested queries and sub-queries should be broken down to simpler parts and all of those part should be checked.
5) the columns used for joining tables must have matching values in both tables
6) execute the given SQL query to check its results and compare it to the expectations
Always predict the score equal to zero if the query returns an empty result.
"""
FORMAT_INSTRUCTIONS = """Use the following format:
Thought: you should always think about what to do
Action: One of the [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the score between 0 and 100 indicating the correctness of the SQL query. score should always be after 'Score:'."""
AGENT_SUFFIX: str = """How accurately the SQL query can answer the question?
Give me a score between 0 and 100 by performing a step by step evaluation.
Question: {question}
SQL: {SQL}
"""
class EntityFinder(BaseSQLDatabaseTool, BaseTool):
"""Tool finding all syntactically similar entites from a database"""
name = "entity_finder"
description = """
Input to this tool is an enitity, a column, and the table containing the column.
All the rows that have similar values to the given entity are returned.
If the entity is not found, a not found message will be returned.
Use this tool to check the correctness of conditions used in the WHERE clause.
Input format: entity, column_name, table_name
Example Input: David, name, singer
"""
similarity_threshold: confloat(ge=0, le=1) = 0.7
number_similar_items: int = 20
def similarity(self, first_string: str, second_string: str) -> float:
return SequenceMatcher(None, first_string, second_string).ratio()
def _run(
self,
input: str,
run_manager: CallbackManagerForToolRun | None = None, # noqa: ARG002
) -> str:
"""Execute the query, return the results or an error message."""
try:
response = ""
entity, column_name, table_name = input.split(", ")
engine = self.db._engine
metadata = MetaData(bind=engine)
table = Table(table_name, metadata, autoload=True)
column = table.c[column_name]
query = select(column.distinct()).select_from(table)
# Execute the query and fetch all rows
with engine.connect() as conn:
result = conn.execute(query)
rows = result.fetchall()
# Process the retrieved rows as needed
similar_items = []
for row in rows:
pair_similarity = self.similarity(entity, str(row[0]))
if pair_similarity > self.similarity_threshold:
similar_items.append({"row": str(row[0]), "score": pair_similarity})
similar_items = sorted(
similar_items, key=lambda x: x["score"], reverse=True
)[: self.number_similar_items]
for item in similar_items:
response += f"Column {column_name}, contains -> {item['row']}.\n"
if not response:
response = f"Column {column_name} doesn't contain any value similar to {entity}"
return response
except Exception as e:
return str(e)
async def _arun(
self,
query: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
raise NotImplementedError("QuerySqlDbTool does not support async")
class SQLEvaluationToolkit(BaseToolkit):
"""Toolkit for interacting with SQL databases for the evaluation of the SQL Query"""
db: SQLDatabase = Field(exclude=True)
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
info_sql_database_tool_description = (
"Input to this tool is a comma-separated list of tables, output is the schema and sample first rows for those tables."
"Use this tool to find the columns inside each table and their sample rows."
"Example Input: table1, table2, table3"
)
info_sql_database_tool = InfoSQLDatabaseTool(
db=self.db, description=info_sql_database_tool_description
)
query_sql_database_tool_description = (
"Input to this tool is a SQL query, output is a result from the database. If the query is not correct, an error message "
"will be returned. If an error is returned, rewrite the query and try again. If you encounter an issue with Unknown column "
f"'xxxx' in 'field list', using {info_sql_database_tool.name} "
"to query the correct table fields."
"Use this tool to search for a specific value or to check a specific condition in the database."
)
query_sql_database_tool = QuerySQLDataBaseTool(
db=self.db, description=query_sql_database_tool_description
)
entity_finder = EntityFinder(db=self.db)
return [query_sql_database_tool, info_sql_database_tool, entity_finder]
class EvaluationAgent(Evaluator):
sample_rows: int = 10
llm: Any = None
def __init__(self, system: System):
super().__init__(system)
self.system = system
def answer_parser(self, answer: str) -> int:
"""
Extract the number after the Score:
If not found extract the last number between 0 and 100
If not found return 0
"""
pattern = r".*Score:\s*(\d+)"
match = re.search(pattern, answer)
output = 0
if match:
output = int(match.group(1))
else:
pattern = r"\b([0-9]{1,2}|100)\b"
numbers = re.findall(pattern, answer)
if numbers:
output = int(numbers[-1])
return output
def create_evaluation_agent(
self,
toolkit: SQLEvaluationToolkit,
database_connection: DatabaseConnection,
prefix: str = AGENT_PREFIX,
suffix: str = AGENT_SUFFIX,
callback_manager: BaseCallbackManager | None = None,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: List[str] | None = None,
max_iterations: int | None = 15,
max_execution_time: float | None = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Dict[str, Any] | None = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
database = SQLDatabase.get_sql_engine(database_connection)
tools = toolkit.get_tools()
prefix = prefix.format(dialect=database.dialect)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=self.llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
@override
def evaluate(
self,
question: Question,
generated_answer: Response,
database_connection: DatabaseConnection,
) -> Evaluation:
start_time = time.time()
logger.info(
f"Generating score for the question/sql pair: {str(question.question)}/ {str(generated_answer.sql_query)}"
)
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4"),
)
database = SQLDatabase.get_sql_engine(database_connection)
user_question = question.question
sql = generated_answer.sql_query
database._sample_rows_in_table_info = self.sample_rows
toolkit = SQLEvaluationToolkit(db=database)
agent_executor = self.create_evaluation_agent(
toolkit=toolkit,
database_connection=database_connection,
verbose=True,
input_variables=["question", "SQL"],
)
answer = agent_executor({"question": user_question, "SQL": sql})["output"]
score = self.answer_parser(answer=answer) / 100
end_time = time.time()
logger.info(f"Evaluation time elapsed: {str(end_time - start_time)}")
return Evaluation(
question_id=question.id, answer_id=generated_answer.id, score=score
)
| [
"\n Input to this tool is an enitity, a column, and the table containing the column.\n All the rows that have similar values to the given entity are returned.\n If the entity is not found, a not found message will be returned.\n Use this tool to check the correctness of conditions used in the WHERE clause.\n\n Input format: entity, column_name, table_name\n\n Example Input: David, name, singer\n "
] |
2024-01-10 | Dataherald/dataherald | dataherald~eval~simple_evaluator.py | import logging
import os
import re
import time
from typing import Any
from bson.objectid import ObjectId
from langchain.chains import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from overrides import override
from sql_metadata import Parser
from dataherald.config import System
from dataherald.db import DB
from dataherald.db_scanner.models.types import TableDescriptionStatus
from dataherald.db_scanner.repository.base import TableDescriptionRepository
from dataherald.eval import Evaluation, Evaluator
from dataherald.sql_database.base import SQLDatabase
from dataherald.sql_database.models.types import DatabaseConnection
from dataherald.types import Question, Response
logger = logging.getLogger(__name__)
SYSTEM_TEMPLATE = """You are a {dialect} expert.
Given a question, a SQL query, and the database schema, analyze the correctness of the SQL query and provide a score.
Score indicates how correctly and accurately SQL query answers the question.
Note that the score should be between 0 and 100. Higher scores means the SQL Query is more accurate.
Double check the SQL query for the common mistakes, including:
- For columns that can contain NULL values, NULL values should be filtered out by using the IS NOT NULL operator in the WHERE condition
- when intention of the question is to include all rows from both sets, including duplicates, using UNION ALL is better than UNION
- BETWEEN is inclusive, if the intention is to exclude the endpoints of the range, use comparison operators (< and >)
- Conditions in the WHERE clause should not have any DATA TYPE mismatch problem
- columns names which contain Spaces, non-alphanumeric character, Reserved keywords or special characters should be inside backticks (``)
- Using the correct number of arguments for functions
- Casting to the correct data type
- Using the proper columns for joins
- using the correct set operators for nested queries
- columns in the SELECT clause should correspond to what exactly asked by user in the question
- check for the improper use of the aggergation functions (SUM, AVG, MIN, MAX, ...)
- robustness of the SQL query in handling cases where data values can be in different format (WHERE lower(column) = lower(entity))
"""
HUMAN_TEMPLATE = """
Give me a score for the SQL query.
Schema of the tables:
{schema}
Here is the question:
Question: {question}
Evaluate the following SQL query:
SQL Query: {SQL}
SQL Query Result: {SQL_result}
give me a one or two lines explanation and the score after 'Score: '.
"""
class SimpleEvaluator(Evaluator):
llm: Any = None
def __init__(self, system: System):
super().__init__(system)
self.system = system
def answer_parser(self, answer: str) -> int:
"""
Extract the number after the Score:
If not found extract the last number between 0 and 100
If not found return 0
"""
pattern = r".*Score:\s*(\d+)"
match = re.search(pattern, answer)
output = 0
if match:
output = int(match.group(1))
else:
pattern = r"\b([0-9]{1,2}|100)\b"
numbers = re.findall(pattern, answer)
if numbers:
output = int(numbers[-1])
return output
@override
def evaluate(
self,
question: Question,
generated_answer: Response,
database_connection: DatabaseConnection,
) -> Evaluation:
database = SQLDatabase.get_sql_engine(database_connection)
logger.info(
f"(Simple evaluator) Generating score for the question/sql pair: {str(question.question)}/ {str(generated_answer.sql_query)}"
)
storage = self.system.instance(DB)
repository = TableDescriptionRepository(storage)
db_scan = repository.get_all_tables_by_db(
{
"db_connection_id": ObjectId(database_connection.id),
"status": TableDescriptionStatus.SYNCHRONIZED.value,
}
)
self.llm = self.model.get_model(
database_connection=database_connection,
temperature=0,
model_name=os.getenv("LLM_MODEL", "gpt-4"),
)
start_time = time.time()
system_message_prompt = SystemMessagePromptTemplate.from_template(
SYSTEM_TEMPLATE
)
human_message_prompt = HumanMessagePromptTemplate.from_template(HUMAN_TEMPLATE)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
user_question = question.question
sql = generated_answer.sql_query
dialect = database.dialect
tables = Parser(sql).tables
schema = ""
for scanned_table in db_scan:
if scanned_table.table_name in tables:
schema += f"Table: {scanned_table.table_schema}\n"
if generated_answer.sql_query_result is None:
logger.info(
f"(Simple evaluator) SQL query: {sql} is not valid. Returning score 0"
)
return Evaluation(
question_id=question.id, answer_id=generated_answer.id, score=0
)
chain = LLMChain(llm=self.llm, prompt=chat_prompt)
answer = chain.run(
{
"dialect": dialect,
"question": user_question,
"SQL": sql,
"SQL_result": str(generated_answer.sql_query_result.json()),
"schema": schema,
}
)
logger.info(f"(Simple evaluator) answer of the evaluator: {answer}")
score = self.answer_parser(answer=answer) / 100
logger.info(f"(Simple evaluator) score of the evaluator: {str(score)}")
end_time = time.time()
logger.info(f"Evaluation time elapsed: {str(end_time - start_time)}")
return Evaluation(
question_id=question.id, answer_id=generated_answer.id, score=score
)
| [
"[PLACEHOLDER, PLACEHOLDER]",
"You are a {dialect} expert.\nGiven a question, a SQL query, and the database schema, analyze the correctness of the SQL query and provide a score.\nScore indicates how correctly and accurately SQL query answers the question.\nNote that the score should be between 0 and 100. Higher scores means the SQL Query is more accurate.\nDouble check the SQL query for the common mistakes, including:\n- For columns that can contain NULL values, NULL values should be filtered out by using the IS NOT NULL operator in the WHERE condition\n- when intention of the question is to include all rows from both sets, including duplicates, using UNION ALL is better than UNION\n- BETWEEN is inclusive, if the intention is to exclude the endpoints of the range, use comparison operators (< and >)\n- Conditions in the WHERE clause should not have any DATA TYPE mismatch problem\n- columns names which contain Spaces, non-alphanumeric character, Reserved keywords or special characters should be inside backticks (``)\n- Using the correct number of arguments for functions\n- Casting to the correct data type\n- Using the proper columns for joins\n- using the correct set operators for nested queries\n- columns in the SELECT clause should correspond to what exactly asked by user in the question\n- check for the improper use of the aggergation functions (SUM, AVG, MIN, MAX, ...)\n- robustness of the SQL query in handling cases where data values can be in different format (WHERE lower(column) = lower(entity))\n",
"\nGive me a score for the SQL query.\nSchema of the tables:\n{schema}\nHere is the question:\nQuestion: {question}\nEvaluate the following SQL query:\nSQL Query: {SQL}\nSQL Query Result: {SQL_result}\ngive me a one or two lines explanation and the score after 'Score: '.\n"
] |
2024-01-10 | chriscarrollsmith/knowledge-gpt | knowledgegpt~utils~utils_completion.py | # https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb sourced from here
from knowledgegpt.utils.utils_prompt import construct_prompt
import openai
import pandas as pd
import numpy as np
import tiktoken
model_types = {
"gpt-3.5-turbo": {
"temperature": 0.0,
"model": "gpt-3.5-turbo",
"max_tokens": 1000,
},
"gpt-4": {
"temperature": 0.0,
"model": "gpt-4",
"max_tokens": 4096,
},
"davinci": {
"temperature": 0.0,
"model": "text-davinci-003",
"max_tokens": 1000,
}
}
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
verbose: bool = False,
embedding_type: str = "hf",
model_lang: str = "en",
is_turbo: bool = False,
is_gpt4: bool = False,
messages: list = None,
index_type: str = "basic",
max_tokens=1000,
prompt_template=None
) -> str:
"""
Answer a query using the provided context.
:param query: The query to answer.
:param df: The dataframe containing the document sections.
:param document_embeddings: The embeddings of the document sections.
:param show_prompt: Whether to print the prompt.
:param embedding_type: The type of embedding used. Can be "hf" or "tf".
:param model_lang: The language of the model. Can be "en" or "tr".
:param is_turbo: Whether to use turbo model or not. Can be "true" or "false".
:param messages: The messages to be used in turbo model.
:param is_first_time: Whether it is the first time to use turbo model or not.
:param max_tokens: The maximum number of tokens to be used in turbo model.
:return: The answer to the query.
"""
if len(messages) < 3 or not is_turbo:
prompt = construct_prompt(
verbose=verbose,
question=query,
context_embeddings=document_embeddings,
df=df,
embedding_type=embedding_type,
model_lang=model_lang,
max_tokens=max_tokens,
index_type=index_type,
prompt_template=prompt_template
)
if is_turbo:
messages.append({"role": "user", "content": prompt})
else:
prompt = query
if is_turbo:
messages.append({"role": "user", "content": prompt})
encoding = encoding = tiktoken.get_encoding("gpt2")
if is_turbo:
messages_token_length = encoding.encode(str(messages))
if len(messages_token_length) > 4096:
del messages[2:4]
if not verbose:
print(prompt)
if not is_turbo :
prompt_len = len(encoding.encode(prompt))
model_types["davinci"]["max_tokens"] = 2000 - prompt_len
response = openai.Completion.create(
prompt=prompt,
** model_types["davinci"]
)
else:
if is_gpt4:
messages_token_length = encoding.encode(str(messages))
model_types["gpt-4"]["max_tokens"] = 8192 - len(messages_token_length)
response = openai.ChatCompletion.create(
messages=messages,
**model_types["gpt-4"],
)
else:
messages_token_length = encoding.encode(str(messages))
model_types["gpt-3.5-turbo"]["max_tokens"] = 4096 - len(messages_token_length)
response = openai.ChatCompletion.create(
messages=messages,
**model_types["gpt-3.5-turbo"],
)
if is_turbo:
messages.append({"role": "assistant", "content": response["choices"][0]["message"]["content"].strip(" \n")})
return response["choices"][0]["message"]["content"].strip(" \n"), prompt, messages
else:
return response["choices"][0]["text"].strip(" \n"), prompt, messages
| [
"content",
" \n"
] |
2024-01-10 | Oleg-Pashchenko/avatarex_widget | misc.py | import json
from dataclasses import dataclass
import os
import pandas as pd
import gdown
from openai import OpenAI
from sqlalchemy import create_engine, Column, Integer, String, MetaData, Table
from sqlalchemy.orm import sessionmaker
# Load environment variables
from dotenv import load_dotenv
descr = "Ищет соотвтествующий вопрос если не нашел соотвтествия - возвращает пустоту"
load_dotenv()
# Set up database connection
DB_HOST = os.getenv("DB_HOST")
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_PORT = os.getenv("DB_PORT")
DATABASE_URL = f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
engine = create_engine(DATABASE_URL)
metadata = MetaData()
avatarexsettings = Table(
'home_avatarexsettings', metadata,
Column('id', Integer, primary_key=True),
Column('knowledge_link', String),
Column('context', String),
Column('api_token', String),
Column('error_message', String),
)
# Создаем сессию
Session = sessionmaker(bind=engine)
session = Session()
@dataclass()
class AvatarexSettings:
knowledge_link: str
context: str
api_token: str
error_message: str
def get_execution_function(filename):
df = pd.read_excel(filename)
first_row = list(df.iloc[:, 0])
properties = {}
rq = []
for r in first_row:
if r in rq:
continue
rq.append(r)
properties[r] = {'type': 'boolean', 'description': 'Вопрос полностью соответствует заданному?'}
return [{
"name": "get_question_by_context",
"description": descr,
"parameters": {
"type": "object",
"properties": properties,
'required': rq
}
}]
def read_avatarex_settings() -> AvatarexSettings:
# Read data from the table where id = 2
result = session.query(avatarexsettings).filter(avatarexsettings.c.id == 2).first()
return AvatarexSettings(
knowledge_link=result.knowledge_link,
context=result.context,
api_token=result.api_token,
error_message=result.error_message
)
def download_file(db_name):
file_id = db_name.replace('https://docs.google.com/spreadsheets/d/', '')
file_id = file_id.split('/')[0]
try:
os.remove(f"uploads/{file_id}.xlsx")
except OSError:
pass
try:
download_url = f"https://drive.google.com/uc?id={file_id}"
output_path = f"uploads/{file_id}.xlsx"
gdown.download(download_url, output_path, quiet=True)
except:
pass
return output_path
def get_keywords_values(message, func):
client = OpenAI()
try:
messages = [
{'role': 'system', 'content': descr},
{"role": "user",
"content": message}]
response = client.chat.completions.create(model="gpt-3.5-turbo",
messages=messages,
functions=func,
function_call={"name": "get_question_by_context"})
response_message = response.choices[0].message
except Exception as e:
print("ERROR", e)
return {'is_ok': False, 'args': {}}
if response_message.function_call:
function_args = json.loads(response_message.function_call.arguments)
try:
return {'is_ok': True, 'args': list(function_args.keys())}
except:
return {'is_ok': False, 'args': []}
else:
return {'is_ok': False, 'args': []}
def get_answer_by_question(questions, filename):
answer = ''
try:
df = pd.read_excel(filename)
list_of_arrays = list(df.iloc)
for i in list_of_arrays:
if questions.strip().lower() in i[0].strip().lower():
answer += str(i[1]) + '\n'
break
except Exception as e:
print(e)
return answer | [] |
2024-01-10 | ai-ld/Open-Assistant | model~model_training~custom_datasets~rank_datasets.py | import random
from collections import defaultdict
from typing import List
import numpy as np
from datasets import load_dataset
from torch.utils.data import Dataset
SEED = 2020
class SHPDataset(Dataset):
"""
Dataset class to load stanfordnlp/SHP for Reward Modeling
"""
name = "SHP"
def __init__(self, split: str | list[str] | None, max_answers: int = 5):
super().__init__()
self.questions = []
self.answers = []
if not isinstance(split, list):
split = [split]
dataset_splits = load_dataset("stanfordnlp/SHP", split=split)
answers_by_id = defaultdict(dict)
history_by_id = dict()
for split in dataset_splits:
for row in split:
post_id = row["post_id"]
history_by_id[post_id] = row["history"]
answers_by_id[post_id][row["human_ref_A"]] = row["score_A"]
answers_by_id[post_id][row["human_ref_B"]] = row["score_B"]
for post_id, history in history_by_id.items():
self.questions.append(history)
answers = answers_by_id[post_id]
# Sort answer dict with the highest score first (hence the prefactor -1).
# Then take only the first `max_answers` elements (usually there are just
# 2, but there are examples where we have more)
answers_sorted = [x[0] for x in sorted(answers.items(), key=lambda x: -1 * x[1])]
self.answers.append(answers_sorted[:max_answers])
def __len__(self):
return len(self.questions)
def __getitem__(self, index):
return [self.questions[index]], self.answers[index]
class HellaSwagDataset(Dataset):
"""
Dataset class to use data from https://arxiv.org/pdf/1905.07830.pdf
for Reward modeling
Note: In order to disable dialog-formatting None is returned as context.
"""
name = "hellaswag"
def __init__(self, split: str | list[str] | None, seed: int = SEED) -> None:
super().__init__()
np.random.seed(seed)
self.dataset_list = []
if not isinstance(split, List):
split = [split]
dataset = load_dataset("AlekseyKorshuk/hellaswag", split=split)
for data in dataset:
for item in data:
context = item.get("ctx")
endings = item.get("endings")
selected = endings.pop(item.get("label"))
ordered_ends = [selected, np.random.choice(endings)]
self.dataset_list.append({"context": context, "completions": ordered_ends})
def __len__(self) -> int:
return len(self.dataset_list)
def __getitem__(self, idx) -> tuple[str | None, list[list]]:
context, completions = self.dataset_list[idx].values()
return None, [context + c for c in completions]
class HFDataset(Dataset):
"""
Dataset class to use data from openai/summarize_from_feedback for Reward modeling.
Summaries ranked by overall score.
"""
name = "open_ai_summarize_from_feedback"
def __init__(self, split: str | list[str] | None = None, subset: str = "axis") -> None:
super().__init__()
# axis subset contains splits 'test' and 'validation'
# comparisons subset contains splits 'train' and 'validation'
if not isinstance(split, List):
split = [split]
dataset = load_dataset("openai/summarize_from_feedback", subset, split=split)
self.subset = subset
# in axis subset the summaries are ranked
self.axis_post_ids = []
self.axis_post_dict = defaultdict(dict)
# in comparison subset we have each time a pair
# of summarizations and then the chosen out of 2
self.comparisons = []
if subset == "axis":
self._handle_axis(dataset)
else:
self._handle_comparisons(dataset)
def _handle_comparisons(self, dataset):
for data in dataset:
for item in dataset:
choice = item["choice"] # indicates the preferred summary
full_post = item["info"]["post"]
summaries = [item["summaries"][choice]["text"], item["summaries"][1 - choice]["text"]]
self.comparisons.append([[full_post], summaries])
def _handle_axis(self, dataset):
for data in dataset:
for item in dataset:
if item["summary"].get("axes").get("overall") is not None:
post_id = item.get("info")["id"]
if post_id not in self.axis_post_ids:
self.axis_post_ids.append(post_id)
item_content = item["info"]["post"] or item["info"]["article"]
self.axis_post_dict[post_id].update({"post": item_content, "summaries": [item["summary"]]})
else:
self.axis_post_dict[post_id]["summaries"].append(item["summary"])
def __len__(self):
if self.subset == "axis":
return len(self.axis_post_ids)
return len(self.comparisons)
def __getitem__(self, idx):
post, summaries = self.post_dict[self.post_ids[idx]].values()
summaries = sorted(summaries, key=lambda x: x["axes"]["overall"], reverse=True)
summaries = [summary["text"] for summary in summaries]
return [post], summaries
class AugmentedOA(Dataset):
def __init__(self, json_filename: str, split: str = "train") -> None:
super().__init__()
import json
assert split in ("train", "val")
pairs = []
with open(json_filename, "r", encoding="utf-8") as f:
for line in f:
data = json.loads(line)
if data["split"] == split:
augmented = data["augmented"]
if split == "val": # disable augmentation during validation
augmented = []
pairs.append((data["prefixes"], data["responses"], augmented))
self.pairs = pairs
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
prefixes, user_answer_ranks, bad_samples = self.pairs[idx]
# we want to prevent modifying user_answer_ranks
rank = user_answer_ranks
if len(bad_samples) > 0:
additional = random.choice(bad_samples)
rank = user_answer_ranks + [additional]
return prefixes, rank
class AnthropicRLHF(Dataset):
name = "anthropic_rlhf"
@staticmethod
def _split_dialogue(text: str) -> list[tuple[str, str]]:
lines = text.split("\n\n")
dialogue: list[tuple[str, str]] = []
# go over messages and combine consecutive messages from the
# same speaker (OA v1 expects alternating roles)
role = None
messages = []
for line in lines:
if line.startswith("Human:"):
speaker = "Human"
message = line[7:]
elif line.startswith("Assistant:"):
speaker = "Assistant"
message = line[11:]
else:
continue
if role != speaker:
if role is not None:
dialogue.append((role, "\n".join(messages)))
messages = []
role = speaker
messages.append(message.strip())
if role is not None and len(messages) > 0:
dialogue.append((role, "\n".join(messages)))
return dialogue
def __init__(self, split: str = "train") -> None:
super().__init__()
assert split in ("train", "test")
self.split = split
self.data = []
dataset = load_dataset("Anthropic/hh-rlhf")[split]
for entry in dataset:
chosen = entry["chosen"]
if "Assistant" not in chosen:
continue
rejected = entry["rejected"]
chosen = self._split_dialogue(chosen)
rejected = self._split_dialogue(rejected)
assert rejected[0][0] == "Human" and chosen[0][0] == "Human"
# only very few items have non matching lengths
if len(rejected) == len(chosen):
prefix = [line for (speaker, line) in chosen[:-1]]
good_reply = chosen[-1][1] # last part of dialog, the text
bad_reply = rejected[-1][1] # last part of dialog, the text
self.data.append((prefix, [good_reply, bad_reply]))
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> tuple[str, list[str]]:
return self.data[index]
| [] |
2024-01-10 | vaz1306011/OA_Bot | cogs~event.py | import os
import random
import re
import sqlite3
import discord
import openai
from discord import app_commands
from discord.app_commands import MissingPermissions
from discord.ext import commands
from core.check import is_exception_content
from core.classes import Cog_Extension
from core.data import PRESENCE
class Event(Cog_Extension):
def __init__(self, bot: commands.Bot):
super().__init__(bot)
self.conn_dom = sqlite3.connect("./data/on_message_ignore.db")
@commands.Cog.listener()
async def on_ready(self):
from datetime import datetime
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] - OA_Bot上線")
activity = discord.Activity(
type=PRESENCE["type"], name=PRESENCE["name"], url=PRESENCE["url"]
)
await self.bot.change_presence(status=PRESENCE["status"], activity=activity)
openai.api_key = os.environ.get("OPENAI_API_KEY")
omi_group = app_commands.Group(name="omi", description="關鍵字檢測指令群組")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
content = re.sub(r"https?://\S{2,}\b", "", message.content)
if is_exception_content(message):
return
disabled = self.check_in_omi(message)
if disabled:
return
# 中獎判斷
if random.randint(1, 10_000) == 1:
await message.channel.send("10000分之1的機率,被雷劈", reference=message)
if random.randint(1, 22_000_000) == 1:
await message.channel.send("2200萬分之一的機率,威力彩頭獎", reference=message)
# 關鍵字判斷
if any(word in content for word in ("笑", "草", "ww")):
word = random.choice(("笑死", "草", ""))
await message.channel.send(word + "w" * random.randint(2, 5))
if "好" in content:
await message.channel.send("好耶")
if re.search(r"[確雀][實石食]", content):
word = random.choice(("確實", "雀石", "雀食"))
await message.channel.send(word)
def check_in_omi(self, message: discord.Message) -> bool:
"""檢查是否在忽略名單
Args:
message (discord.Message): message
Returns:
bool: 是否在忽略名單
"""
try:
cursor = self.conn_dom.cursor()
disabled = any(
(
cursor.execute(
"SELECT 1 FROM guilds WHERE id = ?",
(message.guild.id,),
).fetchone(),
cursor.execute(
"SELECT 1 FROM channels WHERE id = ?",
(message.channel.id,),
).fetchone(),
cursor.execute(
"SELECT 1 FROM users WHERE id = ?",
(message.author.id,),
).fetchone(),
)
)
finally:
cursor.close()
return disabled
def omi_insert(self, table: str, id: int, name: str):
"""新增忽略
Args:
table (str): 資料表
id (int): id
name (str): 名稱
"""
try:
cursor = self.conn_dom.cursor()
cursor.execute(
f"INSERT OR IGNORE INTO {table} VALUES (?, ?)",
(id, name),
)
self.conn_dom.commit()
finally:
cursor.close()
def omi_delete(self, table: str, id: int):
"""刪除忽略
Args:
table (str): 資料表
id (int): id
"""
try:
cursor = self.conn_dom.cursor()
cursor.execute(
f"DELETE FROM {table} WHERE id = ?",
(id,),
)
self.conn_dom.commit()
finally:
cursor.close()
@omi_group.command()
@app_commands.checks.has_permissions(manage_guild=True)
async def guild(self, interaction: discord.Interaction, status: bool):
"""忽略伺服器的關鍵字檢測
Args:
interaction (discord.Interaction): interaction
status (bool): 開關
"""
if status:
self.omi_insert(
"guilds",
interaction.guild_id,
interaction.guild.name,
)
else:
self.omi_delete("guilds", interaction.guild_id)
await interaction.response.send_message(
f"已**{'忽略' if status else '啟用'}**此伺服器的關鍵字檢測", ephemeral=True
)
@omi_group.command()
@app_commands.checks.has_permissions(manage_channels=True)
async def channel(self, interaction: discord.Interaction, status: bool):
"""忽略頻道的關鍵字檢測
Args:
interaction (discord.Interaction): interaction
status (bool): 開關
"""
if status:
self.omi_insert(
"channels",
interaction.channel_id,
interaction.channel.name,
)
else:
self.omi_delete("channels", interaction.channel_id)
await interaction.response.send_message(
f"已**{'忽略' if status else '啟用'}**此頻道的關鍵字檢測", ephemeral=True
)
@guild.error
@channel.error
async def guild_and_channel_error(self, interaction: discord.Interaction, error):
if isinstance(error, MissingPermissions):
await interaction.response.send_message("你沒有權限這麼做", ephemeral=True)
@omi_group.command()
async def me(self, interaction: discord.Interaction, status: bool):
"""忽略你的關鍵字檢測
Args:
interaction (discord.Interaction): interaction
status (bool): 開關
"""
if status:
self.omi_insert(
"users",
interaction.user.id,
interaction.user.name,
)
else:
self.omi_delete("users", interaction.user.id)
await interaction.response.send_message(
f"已**{'忽略' if status else '啟用'}**你的關鍵字檢測", ephemeral=True
)
@omi_group.command()
async def status(self, interaction: discord.Interaction):
"""查看忽略狀態
Args:
interaction (discord.Interaction): interaction
"""
try:
cursor = self.conn_dom.cursor()
guild_status = not not cursor.execute(
"SELECT 1 FROM guilds WHERE id = ?", (interaction.guild_id,)
).fetchone()
channel_status = not not cursor.execute(
"SELECT 1 FROM channels WHERE id = ?", (interaction.channel_id,)
).fetchone()
user_status = not not cursor.execute(
"SELECT 1 FROM users WHERE id = ?", (interaction.user.id,)
).fetchone()
finally:
cursor.close()
def format_status(status: bool, name: str) -> str:
return f"{'+' if status else '-'} {name}: {'忽略' if status else '偵測'}"
await interaction.response.send_message(
f"**忽略狀態:**\n```diff\n{format_status(guild_status, '伺服器')}\n{format_status(channel_status, '頻道')}\n{format_status(user_status, '你')}```",
ephemeral=True,
)
async def setup(bot: commands.Bot):
print("已讀取Event")
await bot.add_cog(Event(bot))
async def teardown(bot: commands.Bot):
print("已移除Event")
await bot.remove_cog("Event")
| [] |
2024-01-10 | tpnto/RVC-Discord-Bot | RVC_Bot.py | import openai
import discord
import os
import subprocess
from discord.ext import commands
from VoiceList import voicelist
from rvc_infer import rvc_convert
from VoiceList import checarvoz
# --------------------Discord-Token-------------------- #
DISCORD_TOKEN = open("Token.txt", "r").read()
# ---------------------OpenAI-API---------------------- #
API_KEY = open("OA_API_KEY", "r").read()
openai.api_key = API_KEY
# ---------------------Bot Set-up---------------------- #
intents = discord.Intents.default()
intents.message_content = True
# Initialize DEFVOICE to None
DEFVOICE = None
# Initialize the queue
queue = []
# if you are looking to change the PREFIX, it's
bot = commands.Bot(command_prefix=".", # <------ This line!
intents=intents)
# this will let us know if the bot is online
@bot.event
async def on_ready():
print(f'{bot.user} esta listo!')
activity = discord.Game(name="Queues & yt-dlp")
await bot.change_presence(activity=activity)
# -----------------VOICE-SELECTION-------------------- #
@bot.command(help="Cambia la voz y el pitch default del bot.")
async def select(ctx, *, args):
global DEFVOICE, DEFPITCH # use the previous declared variables
# argument split using comma: "voice, pitch"
args_list = args.split(',', 1)
# set the values for both variables
DEFVOICE, DEFPITCH = map(str.strip, args_list)
DEFVOICE = DEFVOICE.lower()
# send "voice" to function in order to see if the voice is available.
rvc_voice = checarvoz(voz=DEFVOICE)
if rvc_voice is None:
# if the function returns None, then it's not available.
await ctx.send("Esa voz no esta disponible!")
return
# if you see this in discord, it means it works :)
await ctx.send(f"voz cambiada a: {DEFVOICE}, pitch cambiado a: {DEFPITCH}")
# ---------------------TTS/GPT------------------------ #
@bot.command(help="Has que el bot responda a tus preguntas.")
async def chat(ctx, *, args):
# argument split using commas: "voice, pitch, user_response".
voz, pitch, user_response = map(str.strip, args.split(',', 2))
# this line of code is very important!! if you are looking to use TTS in English,
# you will have to replace "es-ES-AlvaroNeural" with an english speaker.
# In order to see English voices, go ahead and run "edge-tts --list-voices".
TTSVoice = "es-AR-TomasNeural" # <---------- This line here!!!!!
# the function below will help us check if the voice we chose is available.
rvc_voice = checarvoz(voz=voz)
if rvc_voice is None:
# if the function returns None, then it's not available.
await ctx.send("Esa voz no esta disponible")
return
# Insert the user into the queue
username = ctx.author.display_name
queue.append(username)
posicion = len(queue)
# temp message
tempmsg = await ctx.send("Generando respuesta...")
# send "user_response" to ChatGPT.
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": user_response}
]
)
# this will help us clean up ChatGPT's messy response.
assistant_response = response['choices'][0]['message']['content']
# assistant_response is going to be the new variable we use know
# since it's ChatGPT's generated response.
# So, we are going to send it to Edge-tts.
# Note: Edge-tts will run locally within your computer.
outputedgetts = f"chat_{posicion}.mp3"
command = ["edge-tts", "--voice", TTSVoice, "--text", assistant_response, "--write-media", outputedgetts]
subprocess.run(command)
# sends "edgetts.mp3" to RVC.
rvc_convert(model_path=rvc_voice,
f0_up_key=pitch,
input_path=outputedgetts,
output_file_name=f"out{posicion}.wav"
)
# this will delete our temp message.
await tempmsg.delete()
# Let's assign the location of our RVC'd audio so then we can send it to discord.
outputpath = f"output/out{posicion}.wav"
audio_file = discord.File(outputpath)
# send the file to discord
await ctx.send(file=audio_file)
# POP!
queue.pop(0)
# clean up!
os.remove(outputedgetts)
os.remove(outputpath)
# ---------------------TTS----------------------------- #
@bot.command(help="Has que cualquier voz diga cualquier cosa!")
async def say(ctx, *, args):
# argument split using commas: "voice, pitch, user_response".
voice, pitch, user_response = map(str.strip, args.split(',', 2))
# this line of code is very important!! if you are looking to use TTS in English,
# you will have to replace "es-ES-AlvaroNeural" with an english speaker.
# In order to see English voices, go ahead and run "edge-tts --list-voices".
TTSVoice = "es-AR-TomasNeural" # <----------- This line here!!!!!
# send "voice" to function in order to see if the voice is available.
rvc_voice = checarvoz(voz=voice)
if rvc_voice is None:
# if the function returns None, then it's not available.
await ctx.send("Esa voz no esta disponible")
return
# TempMSG
tempmsg = await ctx.send("Generando respuesta...")
# Insert the user into the queue
username = ctx.author.display_name
queue.append(username)
posicion = len(queue)
# Send it to edge-tts.
# Note: Edge-tts will run locally within your computer.
outputedgetts = f"say_{posicion}.mp3"
command = ["edge-tts", "--voice", TTSVoice, "--text", user_response, "--write-media", outputedgetts]
subprocess.run(command)
# Send it the generated audio to RCV.
rvc_convert(model_path=rvc_voice,
f0_up_key=pitch,
input_path=outputedgetts,
output_file_name=f"out{posicion}.wav"
)
# Delete TempMSG
await tempmsg.delete()
# Let's assign the location of our RVC'd audio so then we can send it to discord.
outputpath = f"output/out{posicion}.wav"
audio_file = discord.File(outputpath)
# Send the audio file to discord
await ctx.send(file=audio_file)
# POP!
queue.pop(0)
# clean up!
os.remove(outputedgetts)
os.remove(outputpath)
# ---------------------Copy audio---------------------- #
@bot.command(help="Has que cualquier voz cante o hable, funciona tambien con videos!")
async def audio(ctx):
# These lines will check if the user input has any audio/video attachments.
if len(ctx.message.attachments) == 0:
# if none, end the process
await ctx.send("Ocupo un archivo de audio/video!!")
return
# lets make sure the user has selected a voice, if not, lets instruct him how to do it.
rvc_voice = checarvoz(voz=DEFVOICE)
if rvc_voice is None:
await ctx.send("Esa voz no esta disponible!")
await ctx.send("Si quieres escoger una voz usa el siguiente comando: ```.select voz, pitch``` ")
await ctx.send("Para ver la lista de voces usa: ```.voces```")
return
# Insert the user into the queue
username = ctx.author.display_name
queue.append(username)
posicion = len(queue)
# receives the attachment and saves it as "archivo"
archivo = ctx.message.attachments[0]
# this if statement will check if your input is a compatible audio file.
if archivo.filename.endswith((".mp3", ".wav", ".flac")):
# it will then name the input as "input.mp3"
outputname = f"input{posicion}.mp3"
tempmsg = await ctx.send(f"{posicion}, Generando audio...")
# this if statement will check if your input is a compatible video file.
elif archivo.filename.endswith((".mp4", ".mov", ".mkv", ".webm")):
# it will then name the input as "inputvideo.mp3".
outputname = f"inputvideo{posicion}.mp3"
tempmsg = await ctx.send(f"{posicion}, Generando video...")
# using FFMPEG, it will then process the video so it gets converted as an mp3 file.
command2 = ["ffmpeg", "-i", archivo.url, "-c:a", "aac", "-fs", "20M", outputname, "-y"]
# Note: this will run locally in your computer.
subprocess.run(command2)
# if your file is none of the above, then it's just not compatible, and ends the process.
else:
await ctx.send("Tu archivo no es compatible!")
queue.pop(0)
return
# this will download the audio file that was provided by the user
with open(outputname, "wb") as outputfile:
outputfile.write(await archivo.read())
# if, your input name is "inputvideo" it will get sent to
# FFMPEG in order to get the video without any audio.
if outputname == f"inputvideo{posicion}.mp3":
command3 = ["ffmpeg", "-i", outputname, "-an", f"input{posicion}.mp4", "-y"]
subprocess.run(command3)
# If you want to check the queue
# print(queue) #<--- uncomment this
# send the audio input to RVC
rvc_convert(model_path=rvc_voice,
f0_up_key=DEFPITCH,
input_path=outputname,
output_file_name=f"out{posicion}.wav"
)
# delete tempMSG
await tempmsg.delete()
# Let's assign the location of our RVC'd audio so then we can use it on FFMPEG
# or send it to discord.
outputpath = f"output/out{posicion}.wav"
audio_file = discord.File(outputpath)
# if your outputname was: "inputvideo.mp3", it will get send yet again to FFMPEG
# this time it will combine both the video with no audio and the RVC processed audio.
if(outputname==f"inputvideo{posicion}.mp3"):
# we need to close the file in order to delete it :-)
audio_file.close()
# FFMPEG command
command4 = ["ffmpeg","-i", f"input{posicion}.mp4","-i", outputpath,
"-c:v", "copy", "-map", "0:v:0", "-map", "1:a:0", "-fs", "20M", f"out{posicion}.mp4", "-y"]
subprocess.run(command4)
outputvid = f"out{posicion}.mp4"
video_file = discord.File(outputvid)
# send the file to discord
await ctx.send(file=video_file)
video_file.close()
# Queue pop front
queue.pop(0)
# clean up!
os.remove(outputvid)
os.remove(f"input{posicion}.mp4")
os.remove(f"inputvideo{posicion}.mp3")
os.remove(outputpath)
else:
# if your audio was named "input{queue_position}.mp3", it will get sent over here!
await ctx.send(file=audio_file)
audio_file.close()
# Queue pop front
queue.pop(0)
# clean up!
os.remove(f"input{posicion}.mp3")
os.remove(outputpath)
# ----------------------YT-DLP audio---------------------- #
@bot.command(help="Lo mismo que audio, pero este descarga videos de youtube y otros sitios!")
async def url(ctx, user_response):
# argument split using commas: "voice, pitch, user_response".
# lets make sure the user has selected a voice, if not, lets instruct him how to do it.
rvc_voice = checarvoz(voz=DEFVOICE)
if rvc_voice is None:
await ctx.send("Esa voz no esta disponible!")
await ctx.send("Si quieres escoger una voz usa el siguiente comando: ```.select voz, pitch``` ")
await ctx.send("Para ver la lista de voces usa: ```.voces```")
return
# Insert the user into the queue
username = ctx.author.display_name
queue.append(username)
posicion = len(queue)
# temp message
tempmsg = await ctx.send(f"{posicion}, Descargando video...")
# Setting up variables
ytvid = f"output{posicion}.mp4"
outputname = f"inputvideo{posicion}.mp3"
# Youtube-dlp download
commandyt = ["yt-dlp", "-f", "mp4",
user_response, "-o", ytvid]
subprocess.run(commandyt)
await tempmsg.delete()
tempmsg = await ctx.send(f"{posicion}, Generando video...")
# using FFMPEG, it will then process the video so it gets converted as an mp3 file.
commandff1 = ["ffmpeg", "-i", ytvid, "-map", "0:a:0", "-c:a", "mp3", "-fs", "20M", outputname, "-y"]
# Note: this will run locally in your computer.
subprocess.run(commandff1)
# If you want to check the queue
# print(queue) #<--- uncomment this
# send the audio input to RVC
rvc_convert(model_path=rvc_voice,
f0_up_key=DEFPITCH,
input_path=outputname,
output_file_name=f"out{posicion}.wav"
)
# delete tempMSG
await tempmsg.delete()
# Let's assign the location of our RVC'd audio so then we can use it on FFMPEG
# or send it to discord.
outputpath = f"output/out{posicion}.wav"
audio_file = discord.File(outputpath)
# if your outputname was: "inputvideo.mp3", it will get send yet again to FFMPEG
# this time it will combine both the video with no audio and the RVC processed audio.
audio_file.close()
# FFMPEG command
command4 = [
"ffmpeg", "-i", ytvid, "-i", outputpath,
"-c:v", "libx264", "-crf", "32", # CRF/Custom Rate Factor
"-vf", "scale=480:-2", # Resolution
"-map", "0:v:0", "-map", "1:a:0", "-fs", "20M", f"out{posicion}.mp4", # File size
"-y"]
subprocess.run(command4)
outputvid = f"out{posicion}.mp4"
video_file = discord.File(outputvid)
# send the file to discord
await ctx.send(file=video_file)
video_file.close()
# Remove from queue
queue.pop(0)
# clean up!
os.remove(outputvid)
os.remove(ytvid)
os.remove(outputname)
os.remove(outputpath)
# ----------------------Voice List---------------------- #
@bot.command(help="Lista de voces disponibles!")
async def voces(ctx):
listadevoces = voicelist()
await ctx.send("Esta es la lista de voces disponibles para el TTS! ")
await ctx.send(listadevoces)
await ctx.send("No olvides que el syntax para chat/say es: ```.commando voz, pitch, palabras```")
# ----------------------- End of code ------ Run ------ #
bot.run(DISCORD_TOKEN)
| [] |
2024-01-10 | Tuminha/brave_gpt | brave.py | import openai
from langchain.document_loaders import BraveSearchLoader
from dotenv import load_dotenv
import os
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
BRAVE_API_KEY = os.getenv("BRAVE_API_KEY")
query = "OpenAI" # Or any other query
# Initialize the loader with your query and API key
loader = BraveSearchLoader(query=query, api_key=BRAVE_API_KEY, search_kwargs={"count": 3})
# Load the documents (perform the search)
docs = loader.load()
# Initialize an empty list to store the formatted information
info_list = []
# Iterate over each document
for doc in docs:
title = doc.metadata['title']
link = doc.metadata['link']
snippet = doc.page_content
# Format the information
info = f"Title: {title}\nLink: {link}\nSnippet: {snippet}\n"
# Add the formatted information to the list
info_list.append(info)
# Combine all the formatted information into a single string
info_str = "\n".join(info_list)
# Initialize the OpenAI API
openai.api_key = OPENAI_API_KEY
# Construct the prompt for the OpenAI model
prompt = f"I found the following information about {query}:\n{info_str}\nBased on this information, can you tell me more about {query}?"
# Generate a response from the OpenAI model
response = openai.Completion.create(prompt=prompt, engine="text-davinci-003", max_tokens=100)
# Print the generated text
print(response['choices'][0]['text'])
| [
"I found the following information about OpenAI:\nPLACEHOLDER\nBased on this information, can you tell me more about OpenAI?"
] |
2024-01-10 | umich-its-ai/langchain-doc-dropbox | dropbox_langchain~dropbox_files.py | """Loads Files from Dropbox."""
import tempfile
from typing import List
import pathlib
import urllib.parse
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import UnstructuredExcelLoader
from langchain.document_loaders import UnstructuredPowerPointLoader
from langchain.document_loaders import UnstructuredMarkdownLoader
from striprtf.striprtf import rtf_to_text
ALLOWED_EXTENSIONS = [
"md",
"htm",
"html",
"docx",
"xls",
"xlsx",
"pptx",
"pdf",
"rtf",
"txt",
"paper"
]
class DropboxLoader(BaseLoader):
"""Loading logic for Dropbox files."""
def __init__(self, auth: str, app_key: str = None, app_secret: str = None, folder_path: str = None, file_paths: List = None, file_path: str = None):
"""Initialize with auth.
Args:
auth: Dropbox auth token dict, contains:
{
"access": "ACCESS_TOKEN_FROM_OAUTH",
"refresh": "REFRESH_TOKEN",
"id_token": "ID_TOKEN_NOT_USED",
"expire": "EXPIRE_TIMESTAMP"
}
To use the refresh token, optionally pass in:
app_key
app_secret
One of the following:
folder_path: Path to a folder in the Dropbox account. If the root folder, an empty string
file_paths: List of paths to files in Dropbox
file_path: A single file path to a file in Dropbox
"""
self.auth = auth
self.app_key = app_key
self.app_secret = app_secret
self.folder_path = None
self.file_paths = None
self.file_path = None
if folder_path is not None:
self.folder_path = folder_path
elif file_paths is not None:
self.file_paths = file_paths
else:
self.file_path = file_path
self.invalid_files = []
self.errors = []
def _get_html_as_string(self, html) -> str:
try:
# Import the html parser class
from bs4 import BeautifulSoup
except ImportError as exp:
raise ImportError(
"Could not import beautifulsoup4 python package. "
"Please install it with `pip install beautifulsoup4`."
) from exp
html_string = BeautifulSoup(html, "lxml").text.strip()
return html_string
def _load_text_file(self, file_path, download_path, source) -> List[Document]:
filename = pathlib.Path(download_path).name
file_contents = pathlib.Path(download_path).read_text()
return [Document(
page_content=file_contents.strip(),
metadata={ "source": source, "filename": filename, "kind": "file" }
)]
def _load_html_file(self, file_path, download_path, source) -> List[Document]:
file_contents = pathlib.Path(download_path).read_text()
return [Document(
page_content=self._get_html_as_string(file_contents),
metadata={ "source": source, "kind": "file" }
)]
def _load_rtf_file(self, file_path, download_path, source) -> List[Document]:
file_contents = pathlib.Path(download_path).read_text()
return[Document(
page_content=rtf_to_text(file_contents).strip(),
metadata={ "source": source, "kind": "file" }
)]
def _load_pdf_file(self, file_path, download_path, source) -> List[Document]:
try:
# Import PDF parser class
from PyPDF2 import PdfReader
from PyPDF2 import errors
except ImportError as exp:
raise ImportError(
"Could not import PyPDF2 python package. "
"Please install it with `pip install PyPDF2`."
) from exp
docs = []
try:
pdf_reader = PdfReader(download_path)
for i, page in enumerate(pdf_reader.pages):
docs.append(Document(
page_content=page.extract_text(),
metadata={ "source": source, "kind": "file", "page": i+1 }
))
except errors.FileNotDecryptedError as err:
self.errors.append({ "message": err, "file_path": file_path })
return docs
def _load_docx_file(self, file_path, download_path, source) -> List[Document]:
loader = Docx2txtLoader(download_path)
docs = loader.load()
return self._normalize_docs(docs, source)
def _load_excel_file(self, file_path, download_path, source) -> List[Document]:
loader = UnstructuredExcelLoader(download_path)
docs = loader.load()
return self._normalize_docs(docs, source)
def _load_pptx_file(self, file_path, download_path, source) -> List[Document]:
loader = UnstructuredPowerPointLoader(download_path)
docs = loader.load()
return self._normalize_docs(docs, source)
def _load_md_file(self, file_path, download_path, source) -> List[Document]:
loader = UnstructuredMarkdownLoader(download_path)
docs = loader.load()
return self._normalize_docs(docs, source)
def _normalize_docs(self, docs, source) -> List[Document]:
for doc in docs:
doc.metadata['source'] = source
doc.metadata['kind'] = "file"
return docs
def _load_file(self, dbx, file_path) -> List[Document]:
import dropbox
file_documents = []
file_extension = pathlib.Path(file_path).suffix.replace('.', '')
file_name = pathlib.Path(file_path).stem
# Calculate source link (use https and preview link format - dropbox:// protocol isn't guaranteed to work)
path_obj = pathlib.Path(file_path[1:])
folders = path_obj.parts
folders = '/'.join(folders[:-1])
source = f"https://www.dropbox.com/home/{folders}?preview={path_obj.name}"
if file_extension in ALLOWED_EXTENSIONS:
if file_extension == "paper":
# Download file
with tempfile.TemporaryDirectory() as temp_dir:
download_path = f"{temp_dir}/{file_name}"
try:
dbx.files_export_to_file(download_path=download_path, path=file_path, export_format="markdown")
file_documents = file_documents + self._load_md_file(file_path, download_path, source)
except dropbox.exceptions.DropboxException as error:
self.errors.append({ "message": error.error, "file": file_path })
else:
# Download file
with tempfile.TemporaryDirectory() as temp_dir:
download_path = f"{temp_dir}/{file_name}"
try:
dbx.files_download_to_file(download_path=download_path, path=file_path)
if file_extension == "txt":
file_documents = file_documents + self._load_text_file(file_path, download_path, source)
if file_extension in [ "htm", "html" ]:
file_documents = file_documents + self._load_html_file(file_path, download_path, source)
elif file_extension == "pdf":
file_documents = file_documents + self._load_pdf_file(file_path, download_path, source)
elif file_extension == "docx":
file_documents = file_documents + self._load_docx_file(file_path, download_path, source)
elif file_extension in [ "xlsx", "xls" ]:
file_documents = file_documents + self._load_excel_file(file_path, download_path, source)
elif file_extension == "pptx":
file_documents = file_documents + self._load_pptx_file(file_path, download_path, source)
elif file_extension == "md":
file_documents = file_documents + self._load_md_file(file_path, download_path, source)
elif file_extension == "rtf":
file_documents = file_documents + self._load_rtf_file(file_path, download_path, source)
except dropbox.exceptions.DropboxException as error:
self.errors.append({ "message": error.error, "file": file_path })
else:
self.invalid_files.append()
# Replace null character with space
for doc in file_documents:
doc.page_content = doc.page_content.replace('\x00', ' ')
return file_documents
def _load_files_from_folder_path(self, dbx, folder_path) -> List[Document]:
import dropbox
file_documents = []
files = None
found_all_records = False
file_paths = []
try:
while found_all_records is False:
if files is None:
files = dbx.files_list_folder(folder_path,
recursive=True,
include_deleted=False,
)
else:
files = dbx.files_list_folder_continue(files.cursor)
for file in files.entries:
if isinstance(file, dropbox.files.FileMetadata):
file_extension = pathlib.Path(file.name).suffix.replace('.', '')
if file_extension in ALLOWED_EXTENSIONS:
file_paths.append(file.path_display)
else:
self.invalid_files.append(file.path_display)
if files.has_more is False:
found_all_records = True
file_documents = self._load_files_from_paths(
dbx = dbx,
file_paths = file_paths
)
except dropbox.exceptions.DropboxException as error:
self.errors.append({ "message": error.error, "folder": folder_path })
return file_documents
def _load_files_from_paths(self, dbx, file_paths) -> List[Document]:
file_documents = []
for file_path in file_paths:
file_documents = file_documents + self._load_file(
dbx = dbx,
file_path = file_path
)
return file_documents
def load(self) -> List[Document]:
"""Load files."""
try:
# Import the Dropbox SDK
import dropbox
except ImportError as exp:
raise ImportError(
"Could not import dropbox python package. "
"Please install it with `pip install dropbox`."
) from exp
# earlier versions of this library used `access`, but the dropbox api returns `access_token`
if 'access' in self.auth:
args = { "oauth2_access_token": self.auth['access'] }
# preferred
if 'access_token' in self.auth:
args = { "oauth2_access_token": self.auth['access_token'] }
# If an app_key + secret is specified, pass in refresh token, app_key, app_secret
if self.app_key is not None and self.app_secret is not None:
# earlier versions of this library used `refresh`, but the dropbox api returns `refresh_token`
if 'refresh' in self.auth:
args['oauth2_refresh_token'] = self.auth['refresh']
# preferred
if 'refresh_token' in self.auth:
args['oauth2_refresh_token'] = self.auth['refresh_token']
args['app_key'] = self.app_key
args['app_secret'] = self.app_secret
# Initialize a new Dropbox object
try:
with dropbox.Dropbox(
**args
# =self.token[''],
# oauth2_access_token_expiration=self.token['expire'],
) as dbx:
if self.folder_path is not None:
return self._load_files_from_folder_path(
dbx = dbx,
folder_path = self.folder_path
)
if self.file_paths is not None:
return self._load_files_from_paths(
dbx = dbx,
file_paths = self.file_paths
)
return self._load_file(
dbx = dbx,
file_path = self.file_path
)
except dropbox.exceptions.DropboxException as error:
self.errors.append({ "message": error.error })
return []
| [] |
2024-01-10 | lufixSch/lufixSch.github.io | bin~update_articles | #!/usr/bin/env python3
"""
Load all .md files in the src directory. If an update link is provided in
the front matter, update the content of the file with the content of the link.
"""
import frontmatter as fr
import regex as re
from typing import Iterable
import os
from os import walk
from argparse import ArgumentParser
from requests import get, post
import openai
API_ENDPOINT = "http://localhost:5000/v1"
DESCRIPTION_PROMPT = """###Instruction Your goal is to create an one sentence description of a project or guide based on its documentation. A good description formulates the main goal and aim of the project. It starts directly with the main goal or use case. A good description is the direct answer to "What is the goal of project <project_name>?"
Good examples of descriptions are:
A python Implementation of the Snake Game
Setup guide for a Sphinx based documentation page
Library for easier access to the GitHub API
Circuits and PCB designs for a smartome LED stripe controller
Remember: The description should be in plain text (no markdown), in the language of the documentation and only one short sentence
Documentation:
{content}
### Response:"""
class LLM:
"""LLM Interface calling the oobabooga api for text generation"""
def __init__(
self,
api_endpoint: str,
temperature: float = 0.5,
max_new_tokens: int = 200,
):
self.api_endpoint = api_endpoint
self.temperature = temperature
self.max_new_tokens = max_new_tokens
self.client = openai.OpenAI(api_key="NONE", base_url=self.api_endpoint)
def completion(
self,
prompt: str,
) -> str:
res = self.client.completions.create(
model="NONE",
prompt=prompt,
temperature=self.temperature,
max_tokens=self.max_new_tokens,
)
return res.choices[0].text
def find_articles(base_path: str):
"""
Find all .md files in the src directory and subdirectories.
Return a list of paths to the files.
"""
return (
os.path.join(dirpath, filename)
for (dirpath, _, filenames) in walk(base_path)
for filename in filenames
if filename.endswith(".md")
)
def load_articles(paths: Iterable[str]):
"""
Load Articles with the `frontmatter` package
"""
return ((path, fr.load(path)) for path in paths)
def filter_articles(articles: Iterable[tuple[str, fr.Post]]):
"""
Filter the list of articles to only those with an update link.
"""
for path, article in articles:
print(f"Found article: {path}")
if "update" in article.keys():
yield path, article
def pull_updates(articles: Iterable[tuple[str, fr.Post]]):
"""
Pull the content from the update link
"""
return ((path, article, get(article["update"]).text) for path, article in articles)
def fix_urls(articles: Iterable[tuple[str, fr.Post, str]]):
"""
Fix the urls in the content of the article based on the repository link
"""
for path, article, new_content in articles:
print("> Fixing URLs")
base_url = article["repository"].rstrip("/")
if "github" in base_url.lower():
image_url = base_url + (
"/raw/master/" if "master" in article["update"] else "/raw/main/"
)
link_url = base_url + (
"/blob/master/" if "master" in article["update"] else "/blob/main/"
)
elif "gitlab" in base_url.lower():
image_url = base_url + (
"/raw/master/" if "master" in article["update"] else "/raw/main/"
)
link_url = base_url + (
"/raw/master/" if "master" in article["update"] else "/raw/main/"
)
# Find and replace all relative image links with absolute links
new_content = re.sub(
r"!\[([^\]]+)\]\((?!http)([^\)]+)\)",
rf"",
new_content,
)
# Find and replace all relative markdown links with absolute links
new_content = re.sub(
r"\[([^\]]+)\]\((?!http)([^\)]+)\)",
rf"[\1]({link_url}\2)",
new_content,
)
yield path, article, new_content
def replace_content(articles: Iterable[tuple[str, fr.Post, str]]):
"""
Replace the content of the article with the content of the update link
"""
for path, article, new_content in articles:
print(f"> Updating")
article.content = new_content
yield path, article
def generate_description(
articles: Iterable[tuple[str, fr.Post]],
llm: LLM,
update: bool = False,
):
"""
Generate a description for an article using the language model.
"""
for path, article in articles:
if article.get("description", None) and not update:
yield path, article
continue
print(f"> Generating Description")
prompt = DESCRIPTION_PROMPT.format(content=article.content)
try:
description = llm.completion(prompt)
except ValueError:
print(f"Failed to generate description for {article['title']}")
article["description"] = description.strip()
yield path, article
def save_articles(articles: Iterable[tuple[str, fr.Post]]):
for path, article in articles:
print("> saving...")
with open(path, mode="w") as f:
f.write(fr.dumps(article))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"-p",
"--base-path",
type=str,
default="src",
help="Path in which to search for markdown files",
)
parser.add_argument(
"-u",
"--update-description",
action="store_true",
help="Update the LLM generated descriptions (If false only missing descriptions are generated)",
)
parser.add_argument(
"-a",
"--api-endpoint",
type=str,
default=API_ENDPOINT,
help="API Endpoint for the Oobabooga WebUI",
)
args = parser.parse_args()
llm = LLM(args.api_endpoint, temperature=0.2, max_new_tokens=50)
paths = find_articles(args.base_path)
load_articles = load_articles(paths)
filtered_articles = filter_articles(load_articles)
updates = pull_updates(filtered_articles)
fixed_updates = fix_urls(updates)
new_articles = replace_content(fixed_updates)
final_articles = generate_description(new_articles, llm, args.update_description)
save_articles(final_articles)
print("Done!")
| [
"###Instruction Your goal is to create an one sentence description of a project or guide based on its documentation. A good description formulates the main goal and aim of the project. It starts directly with the main goal or use case. A good description is the direct answer to \"What is the goal of project <project_name>?\"\n\nGood examples of descriptions are:\nA python Implementation of the Snake Game\nSetup guide for a Sphinx based documentation page\nLibrary for easier access to the GitHub API\nCircuits and PCB designs for a smartome LED stripe controller\n\nRemember: The description should be in plain text (no markdown), in the language of the documentation and only one short sentence\n\nDocumentation:\n{content}\n\n\n### Response:"
] |
2024-01-10 | yu-jeffy/ink-jet | dataset~contract_gen_helpers.py | import openai
from dotenv import load_dotenv
import os
import subprocess
###############################################
# HELPER FUNCTIONS
###############################################
# gpt call
def generate_smart_contract(prompt):
# Load openai api key
load_dotenv()
# Create the chat completion
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content": ""},
{"role": "user", "content": prompt}
],
temperature=0.8,
max_tokens=3000, # larger token size to fit full smart contract
)
content = response.choices[0].message.content
# print(content)
return content
# Removes ```rust from the beginning and ``` from the end of the string (gpt response).
def remove_mardown_markers(text):
# Check if the string starts with ```rust and ends with ```
if text.startswith("```rust") and text.endswith("```"):
# Remove ```rust from the beginning (7 characters) and ``` from the end (3 characters)
return text[7:-3]
else:
# Return the original string if it doesn't have the specific markers
return text
# Create a new cargo contract project
def create_cargo_contract_project(folder_name):
return subprocess.run(["cargo", "contract", "new", folder_name], capture_output=True, text=True)
# Write your Rust code to the lib.rs file in the new project folder
def write_to_lib_rs(folder_name, rust_code):
lib_rs_path = os.path.join(folder_name, "lib.rs")
with open(lib_rs_path, 'w') as file:
file.write(rust_code)
# Build the cargo contract
def build_cargo_contract(folder_name):
orig_dir = os.getcwd()
os.chdir(folder_name)
result = subprocess.run(["cargo", "contract", "build"], capture_output=True, text=True)
os.chdir(orig_dir)
return result
# Run Coinfabrik Scout
def run_coinfabrik_scout(folder_name):
orig_dir = os.getcwd()
os.chdir(folder_name)
result = subprocess.run(["cargo", "scout-audit", "--output-format", "json"], capture_output=True, text=True)
os.chdir(orig_dir)
return result
# Write 'success' or errors to a file in the project folder
def write_build_result_to_file(folder_name, result):
result_file_path = os.path.join(folder_name, "build_result.txt")
with open(result_file_path, 'w') as file:
if result.returncode == 0:
file.write("success\n")
else:
file.write(result.stdout)
file.write(result.stderr)
# Write Coinfabrik Scout run results to a file in the project folder
def write_audit_result_to_file(folder_name, result):
result_file_path = os.path.join(folder_name, "audit_result.txt")
with open(result_file_path, 'w') as file:
if result.returncode == 0:
file.write("success\n")
else:
file.write(result.stdout)
file.write(result.stderr)
| [] |
2024-01-10 | arthurtang888/dify | api~tasks~enable_segment_to_index_task.py | import datetime
import logging
import time
import click
from celery import shared_task
from langchain.schema import Document
from werkzeug.exceptions import NotFound
from core.index.index import IndexBuilder
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
@shared_task(queue='dataset')
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts([document], duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts([document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key)
| [] |
2024-01-10 | hfattahi/PySAR | mintpy~plot_coherence_matrix.py | #!/usr/bin/env python3
############################################################
# Program is part of MintPy #
# Copyright(c) 2018-2019, Zhang Yunjun #
# Author: Zhang Yunjun, Nov 2018 #
############################################################
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mintpy.objects import ifgramStack
from mintpy.utils import readfile, plot as pp, utils as ut
from mintpy import view
########################### Sub Function #############################
EXAMPLE = """example:
plot_coherence_matrix.py inputs/ifgramStack.h5
plot_coherence_matrix.py inputs/ifgramStack.h5 --yx 277 1069
plot_coherence_matrix.py inputs/ifgramStack.h5 --lalo -0.8493 -91.1510 -c RdBu
# left: map view
plot_coherence_matrix.py inputs/ifgramStack.h5 --view-cmd "view.py {} --dem inputs/gsi10m.dem.wgs84"
plot_coherence_matrix.py inputs/ifgramStack.h5 --view-cmd 'view.py {} --wrap --wrap-range -3 3"
plot_coherence_matrix.py inputs/ifgramStack.h5 --view-cmd 'view.py {} --sub-x 900 1400 --sub-y 0 500'
# right: matrix view
# show color jump same as the coherence threshold in network inversion with pixel-wised masking
plot_coherence_matrix.py inputs/ifgramStack.h5 --cmap-vlist 0 0.4 1
"""
def create_parser():
parser = argparse.ArgumentParser(description='Plot the coherence matrix of one pixel (interactive)',
formatter_class=argparse.RawTextHelpFormatter,
epilog=EXAMPLE)
parser.add_argument('ifgram_file', help='interferogram stack file')
parser.add_argument('--yx', type=int, metavar=('Y', 'X'), nargs=2,
help='Point of interest in y(row)/x(col)')
parser.add_argument('--lalo', type=float, metavar=('LAT','LON'), nargs=2,
help='Point of interest in lat/lon')
parser.add_argument('--lookup','--lut', dest='lookup_file',
help='Lookup file to convert lat/lon into y/x')
parser.add_argument('-c','--cmap', dest='cmap_name', default='RdBu_truncate',
help='Colormap for coherence matrix.\nDefault: RdBu_truncate')
parser.add_argument('--cmap-vlist', dest='cmap_vlist', type=float, nargs=3, default=[0.0, 0.7, 1.0],
help='start/jump/end fraction for truncated colormap. Default: 0.0 0.7 1.0')
parser.add_argument('--figsize','--fs', dest='fig_size', metavar=('WID', 'LEN'), type=float, nargs=2,
help='figure size in inches. Default: [8, 4]')
parser.add_argument('--img-file', dest='img_file',
help='dataset to show in map to facilitate point selection. Default: velocity.h5')
parser.add_argument('--view-cmd', dest='view_cmd', default='view.py {} --wrap --noverbose ',
help='view.py command to plot the input map file\n'+
'Default: view.py img_file --wrap --noverbose')
# aux files
parser.add_argument('--tcoh', dest='tcoh_file', default='temporalCoherence.h5',
help='temporal coherence file.')
parser.add_argument('-t','--template', dest='template_file',
help='temporal file.')
parser.add_argument('--save', dest='save_fig',
action='store_true', help='save the figure')
parser.add_argument('--nodisplay', dest='disp_fig',
action='store_false', help='save and do not display the figure')
parser.add_argument('--noverbose', dest='print_msg', action='store_false',
help='Disable the verbose message printing.')
return parser
def cmd_line_parse(iargs=None):
parser = create_parser()
inps = parser.parse_args(args=iargs)
# default aux file:
mintpy_dir = os.path.dirname(os.path.dirname(inps.ifgram_file))
if not inps.img_file:
inps.img_file = os.path.join(mintpy_dir, 'velocity.h5')
if not inps.template_file:
inps.template_file = os.path.join(mintpy_dir, 'smallbaselineApp.cfg')
if not os.path.isfile(inps.img_file):
raise SystemExit('ERROR: input image file not found: {}'.format(inps.img_file))
if not os.path.isfile(inps.tcoh_file):
inps.tcoh_file = None
if not os.path.isfile(inps.template_file):
inps.tcoh_file = None
# verbose print using --noverbose option
global vprint
vprint = print if inps.print_msg else lambda *args, **kwargs: None
if not inps.disp_fig:
inps.save_fig = True
plt.switch_backend('Agg')
return inps
def read_network_info(inps):
k = readfile.read_attribute(inps.ifgram_file)['FILE_TYPE']
if k != 'ifgramStack':
raise ValueError('input file {} is not ifgramStack: {}'.format(inps.ifgram_file, k))
obj = ifgramStack(inps.ifgram_file)
obj.open(print_msg=inps.print_msg)
inps.date12_list = obj.get_date12_list(dropIfgram=False)
date12_kept = obj.get_date12_list(dropIfgram=True)
inps.ex_date12_list = sorted(list(set(inps.date12_list) - set(date12_kept)))
inps.date_list = obj.get_date_list(dropIfgram=False)
vprint('number of all interferograms: {}'.format(len(inps.date12_list)))
vprint('number of dropped interferograms: {}'.format(len(inps.ex_date12_list)))
vprint('number of kept interferograms: {}'.format(len(inps.date12_list) - len(inps.ex_date12_list)))
vprint('number of acquisitions: {}'.format(len(inps.date_list)))
if inps.lalo:
if not inps.lookup_file:
lookup_file = os.path.join(os.path.dirname(inps.ifgram_file), 'geometry*.h5')
inps.lookup_file = ut.get_lookup_file(filePattern=lookup_file)
coord = ut.coordinate(obj.metadata, lookup_file=inps.lookup_file)
inps.yx = coord.geo2radar(inps.lalo[0], inps.lalo[1])[0:2]
if not inps.yx:
inps.yx = (obj.refY, obj.refX)
vprint('plot initial coherence matrix at reference pixel: {}'.format(inps.yx))
return inps
class coherenceMatrixViewer():
"""class for plot_coherence_matrix
Example:
from mintpy.plot_coherence_matrix import coherenceMatrixViewer
cmd = 'plot_coherence_matrix.py ./inputs/ifgramStack.h5 --noverbose --figsize 9 3 --yx 216 310'
obj = coherenceMatrixViewer(cmd)
obj.configure()
obj.plot()
"""
def __init__(self, cmd=None, iargs=None):
if cmd:
iargs = cmd.split()[1:]
self.cmd = cmd
self.iargs = iargs
# figure variables
self.figname = 'Coherence matrix'
self.fig_size = None
self.fig = None
self.ax_img = None
self.ax_mat = None
return
def configure(self):
inps = cmd_line_parse(self.iargs)
# read network info
inps = read_network_info(inps)
# copy inps to self object
for key, value in inps.__dict__.items():
setattr(self, key, value)
# auto figure size
if not self.fig_size:
ds_shape = readfile.read(self.img_file)[0].shape
fig_size = pp.auto_figure_size(ds_shape, disp_cbar=True, ratio=0.7)
self.fig_size = [fig_size[0]+fig_size[1], fig_size[1]]
vprint('create figure in size of {} inches'.format(self.fig_size))
# read aux data
# 1. temporal coherence value
self.tcoh = None
if self.tcoh_file:
self.tcoh = readfile.read(self.tcoh_file)[0]
# 2. minimum used coherence from template file
self.min_coh_used = 0.0
if self.template_file:
template = readfile.read_template(self.template_file)
template = ut.check_template_auto_value(template)
if template['mintpy.networkInversion.maskDataset'] == 'coherence':
self.min_coh_used = float(template['mintpy.networkInversion.maskThreshold'])
vprint('Pixel-wised masking is applied in invert_network step')
return
def plot(self):
# Figure 1
self.fig = plt.figure(self.figname, figsize=self.fig_size)
# Axes 1 - Image
self.ax_img = self.fig.add_axes([0.05, 0.1, 0.4, 0.8])
view_cmd = self.view_cmd.format(self.img_file)
d_img, atr, inps_img = view.prep_slice(view_cmd)
if all(i is not None for i in self.yx):
inps_img.pts_marker = 'r^'
inps_img.pts_yx = np.array(self.yx).reshape(-1, 2)
# point yx --> lalo for geocoded product
if 'Y_FIRST' in atr.keys():
coord = ut.coordinate(atr)
inps_img.pts_lalo = np.array(coord.radar2geo(self.yx[0], self.yx[1])[0:2]).reshape(-1,2)
inps_img.print_msg = self.print_msg
self.ax_img = view.plot_slice(self.ax_img, d_img, atr, inps_img)[0]
# coordinate info
self.coord = ut.coordinate(atr)
self.fig_coord = inps_img.fig_coord
# Axes 2 - coherence matrix
self.ax_mat = self.fig.add_axes([0.55, 0.125, 0.40, 0.75])
self.colormap = pp.ColormapExt(self.cmap_name, vlist=self.cmap_vlist).colormap
if all(i is not None for i in self.yx):
self.plot_coherence_matrix4pixel(self.yx)
# Link the canvas to the plots.
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.update_coherence_matrix)
if self.disp_fig:
plt.show()
return
def plot_coherence_matrix4pixel(self, yx):
"""Plot coherence matrix for one pixel
Parameters: yx : list of 2 int
"""
self.ax_mat.cla()
# read coherence
box = (yx[1], yx[0], yx[1]+1, yx[0]+1)
coh = readfile.read(self.ifgram_file, datasetName='coherence', box=box)[0]
# ex_date for pixel-wise masking during network inversion
ex_date12_list = self.ex_date12_list[:] #local copy
if self.min_coh_used > 0.:
ex_date12_list += np.array(self.date12_list)[coh < self.min_coh_used].tolist()
ex_date12_list = sorted(list(set(ex_date12_list)))
# prep metadata
plotDict = {}
plotDict['fig_title'] = 'Y = {}, X = {}'.format(yx[0], yx[1])
# display temporal coherence value of the pixel
if self.tcoh_file:
tcoh = self.tcoh[yx[0], yx[1]]
plotDict['fig_title'] += ', tcoh = {:.2f}'.format(tcoh)
plotDict['colormap'] = self.colormap
plotDict['cmap_vlist'] = self.cmap_vlist
plotDict['disp_legend'] = False
# plot
coh_mat = pp.plot_coherence_matrix(self.ax_mat,
date12List=self.date12_list,
cohList=coh.tolist(),
date12List_drop=ex_date12_list,
plot_dict=plotDict)[1]
self.ax_mat.annotate('ifgrams\navailable', xy=(0.05, 0.05), xycoords='axes fraction', fontsize=12)
self.ax_mat.annotate('ifgrams\nused', ha='right', xy=(0.95, 0.85), xycoords='axes fraction', fontsize=12)
# status bar
def format_coord(x, y):
row, col = int(y+0.5), int(x+0.5)
date12 = sorted([self.date_list[row], self.date_list[col]])
date12 = ['{}-{}-{}'.format(i[0:4], i[4:6], i[6:8]) for i in date12]
return 'x={}, y={}, v={:.3f}'.format(date12[0], date12[1], coh_mat[row, col])
self.ax_mat.format_coord = format_coord
# info
msg = 'pixel in yx = {}, '.format(tuple(yx))
msg += 'min/max spatial coherence: {:.2f} / {:.2f}, '.format(np.min(coh), np.max(coh))
if self.tcoh_file:
msg += 'temporal coherence: {:.2f}'.format(tcoh)
vprint(msg)
self.fig.canvas.draw()
return
def update_coherence_matrix(self, event):
if event.inaxes == self.ax_img:
if self.fig_coord == 'geo':
yx = [self.coord.lalo2yx(event.ydata, coord_type='lat'),
self.coord.lalo2yx(event.xdata, coord_type='lon')]
else:
yx = [int(event.ydata+0.5),
int(event.xdata+0.5)]
self.plot_coherence_matrix4pixel(yx)
return
########################## Main Function ##############################
def main(iargs=None):
obj = coherenceMatrixViewer(iargs=iargs)
obj.configure()
obj.plot()
obj.fig.canvas.mpl_disconnect(obj.cid)
return
############################################################
if __name__ == '__main__':
main(sys.argv[1:])
| [] |
2024-01-10 | tukru/How-to-build-an-AndrewTate-Chatbot-GPT3.5_turbo | Tate-Bot.py | import openai
# Set up OpenAI API credentials
openai.api_key = "YOUR_API_KEY"
# Define the conversation history
conversation = [
{"role": "system", "content": "You are chatting with Andrew Tate."},
{"role": "user", "content": "Hello, Andrew!"},
]
# Main loop for user interaction
while True:
# Prompt user for input
user_input = input("You: ")
# Add user message to the conversation history
conversation.append({"role": "user", "content": user_input})
# Generate response from the chatbot
response = openai.Completion.create(
engine="text-davinci-003",
prompt=conversation,
temperature=0.7,
max_tokens=100,
n=1,
stop=None,
temperature=0.7
)
# Extract the chatbot's reply
chatbot_reply = response.choices[0].text.strip()
# Add chatbot's reply to the conversation history
conversation.append({"role": "assistant", "content": chatbot_reply})
# Display chatbot's response
print("Andrew Tate: " + chatbot_reply)
| [
"Hello, Andrew!",
"You are chatting with Andrew Tate."
] |
2024-01-10 | voyager2009/chainlit | src~chainlit~server.py | import mimetypes
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("text/css", ".css")
import os
import json
import webbrowser
from contextlib import asynccontextmanager
from watchfiles import awatch
from fastapi import FastAPI
from fastapi.responses import (
HTMLResponse,
JSONResponse,
FileResponse,
PlainTextResponse,
)
from fastapi_socketio import SocketManager
from starlette.middleware.cors import CORSMiddleware
import asyncio
from chainlit.config import config, load_module, reload_config, DEFAULT_HOST
from chainlit.session import Session, sessions
from chainlit.user_session import user_sessions
from chainlit.client import CloudClient
from chainlit.emitter import ChainlitEmitter
from chainlit.markdown import get_markdown_str
from chainlit.action import Action
from chainlit.message import Message, ErrorMessage
from chainlit.telemetry import trace_event
from chainlit.logger import logger
from chainlit.types import CompletionRequest
@asynccontextmanager
async def lifespan(app: FastAPI):
host = config.run.host
port = config.run.port
if not config.run.headless:
if host == DEFAULT_HOST:
url = f"http://localhost:{port}"
else:
url = f"http://{host}:{port}"
logger.info(f"Your app is available at {url}")
# Add a delay before opening the browser
await asyncio.sleep(1)
webbrowser.open(url)
watch_task = None
stop_event = asyncio.Event()
if config.run.watch:
async def watch_files_for_changes():
extensions = [".py"]
files = ["chainlit.md", "config.toml"]
async for changes in awatch(config.root, stop_event=stop_event):
for change_type, file_path in changes:
file_name = os.path.basename(file_path)
file_ext = os.path.splitext(file_name)[1]
if file_ext.lower() in extensions or file_name.lower() in files:
logger.info(
f"File {change_type.name}: {file_name}. Reloading app..."
)
try:
reload_config()
except Exception as e:
logger.error(f"Error reloading config: {e}")
break
# Reload the module if the module name is specified in the config
if config.run.module_name:
try:
load_module(config.run.module_name)
except Exception as e:
logger.error(f"Error reloading module: {e}")
break
await socket.emit("reload", {})
break
watch_task = asyncio.create_task(watch_files_for_changes())
try:
yield
finally:
if watch_task:
try:
stop_event.set()
watch_task.cancel()
await watch_task
except asyncio.exceptions.CancelledError:
pass
root_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(root_dir, "frontend/dist")
app = FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Define max HTTP data size to 100 MB
max_http_data_size = 100 * 1024 * 1024
socket = SocketManager(
app,
cors_allowed_origins=[],
async_mode="asgi",
max_http_buffer_size=max_http_data_size,
)
"""
-------------------------------------------------------------------------------
HTTP HANDLERS
-------------------------------------------------------------------------------
"""
def get_html_template():
PLACEHOLDER = "<!-- TAG INJECTION PLACEHOLDER -->"
default_url = "https://github.com/Chainlit/chainlit"
url = config.ui.github or default_url
tags = f"""<title>{config.ui.name}</title>
<meta name="description" content="{config.ui.description}">
<meta property="og:type" content="website">
<meta property="og:title" content="{config.ui.name}">
<meta property="og:description" content="{config.ui.description}">
<meta property="og:image" content="https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png">
<meta property="og:url" content="{url}">"""
index_html_file_path = os.path.join(build_dir, "index.html")
with open(index_html_file_path, "r", encoding="utf-8") as f:
content = f.read()
content = content.replace(PLACEHOLDER, tags)
return content
html_template = get_html_template()
@app.post("/completion")
async def completion(completion: CompletionRequest):
"""Handle a completion request from the prompt playground."""
import openai
trace_event("completion")
api_key = completion.userEnv.get("OPENAI_API_KEY", os.environ.get("OPENAI_API_KEY"))
model_name = completion.settings.model_name
stop = completion.settings.stop
# OpenAI doesn't support an empty stop array, clear it
if isinstance(stop, list) and len(stop) == 0:
stop = None
if model_name in ["gpt-3.5-turbo", "gpt-4"]:
response = await openai.ChatCompletion.acreate(
api_key=api_key,
model=model_name,
messages=[{"role": "user", "content": completion.prompt}],
stop=stop,
**completion.settings.to_settings_dict(),
)
return PlainTextResponse(content=response["choices"][0]["message"]["content"])
else:
response = await openai.Completion.acreate(
api_key=api_key,
model=model_name,
prompt=completion.prompt,
stop=stop,
**completion.settings.to_settings_dict(),
)
return PlainTextResponse(content=response["choices"][0]["text"])
@app.get("/project/settings")
async def project_settings():
"""Return project settings. This is called by the UI before the establishing the websocket connection."""
return JSONResponse(
content={
"chainlitServer": config.chainlit_server,
"prod": bool(config.chainlit_prod_url),
"ui": config.ui.to_dict(),
"project": config.project.to_dict(),
"markdown": get_markdown_str(config.root),
}
)
@app.get("/{path:path}")
async def serve(path: str):
"""Serve the UI."""
path_to_file = os.path.join(build_dir, path)
if path != "" and os.path.exists(path_to_file):
return FileResponse(path_to_file)
else:
return HTMLResponse(content=html_template, status_code=200)
"""
-------------------------------------------------------------------------------
WEBSOCKET HANDLERS
-------------------------------------------------------------------------------
"""
def need_session(id: str):
"""Return the session with the given id."""
session = sessions.get(id)
if not session:
raise ValueError("Session not found")
return session
@socket.on("connect")
async def connect(sid, environ):
user_env = environ.get("HTTP_USER_ENV")
authorization = environ.get("HTTP_AUTHORIZATION")
cloud_client = None
# Check authorization
if not config.project.public and not authorization:
# Refuse connection if the app is private and no access token is provided
trace_event("no_access_token")
logger.error("Connection refused: No access token provided")
return False
elif authorization and config.project.id:
# Create the cloud client
cloud_client = CloudClient(
project_id=config.project.id,
session_id=sid,
access_token=authorization,
)
is_project_member = await cloud_client.is_project_member()
if not is_project_member:
logger.error("Connection refused: You are not a member of this project")
return False
# Check user env
if config.project.user_env:
# Check if requested user environment variables are provided
if user_env:
user_env = json.loads(user_env)
for key in config.project.user_env:
if key not in user_env:
trace_event("missing_user_env")
logger.error(
"Connection refused: Missing user environment variable: " + key
)
return False
else:
logger.error("Connection refused: Missing user environment variables")
return False
# Create the session
# Function to send a message to this particular session
def emit_fn(event, data):
if sid in sessions:
if sessions[sid]["should_stop"]:
sessions[sid]["should_stop"] = False
raise InterruptedError("Task stopped by user")
return socket.emit(event, data, to=sid)
# Function to ask the user a question
def ask_user_fn(data, timeout):
if sessions[sid]["should_stop"]:
sessions[sid]["should_stop"] = False
raise InterruptedError("Task stopped by user")
return socket.call("ask", data, timeout=timeout, to=sid)
session = {
"id": sid,
"emit": emit_fn,
"ask_user": ask_user_fn,
"client": cloud_client,
"user_env": user_env,
"running_sync": False,
"should_stop": False,
} # type: Session
sessions[sid] = session
trace_event("connection_successful")
return True
@socket.on("connection_successful")
async def connection_successful(sid):
session = need_session(sid)
__chainlit_emitter__ = ChainlitEmitter(session)
if config.code.lc_factory:
"""Instantiate the langchain agent and store it in the session."""
agent = await config.code.lc_factory(__chainlit_emitter__=__chainlit_emitter__)
session["agent"] = agent
if config.code.on_chat_start:
"""Call the on_chat_start function provided by the developer."""
await config.code.on_chat_start(__chainlit_emitter__=__chainlit_emitter__)
@socket.on("disconnect")
async def disconnect(sid):
if sid in sessions:
# Clean up the session
sessions.pop(sid)
if sid in user_sessions:
# Clean up the user session
user_sessions.pop(sid)
@socket.on("stop")
async def stop(sid):
if sid in sessions:
trace_event("stop_task")
session = sessions[sid]
__chainlit_emitter__ = ChainlitEmitter(session)
await Message(author="System", content="Task stopped by the user.").send()
session["should_stop"] = True
if config.code.on_stop:
await config.code.on_stop()
async def process_message(session: Session, author: str, input_str: str):
"""Process a message from the user."""
try:
__chainlit_emitter__ = ChainlitEmitter(session)
await __chainlit_emitter__.task_start()
if session["client"]:
# If cloud is enabled, persist the message
await session["client"].create_message(
{
"author": author,
"content": input_str,
"authorIsUser": True,
}
)
langchain_agent = session.get("agent")
if langchain_agent:
from chainlit.lc.agent import run_langchain_agent
# If a langchain agent is available, run it
if config.code.lc_run:
# If the developer provided a custom run function, use it
await config.code.lc_run(
langchain_agent,
input_str,
__chainlit_emitter__=__chainlit_emitter__,
)
return
else:
# Otherwise, use the default run function
raw_res, output_key = await run_langchain_agent(
langchain_agent, input_str, use_async=config.code.lc_agent_is_async
)
if config.code.lc_postprocess:
# If the developer provided a custom postprocess function, use it
await config.code.lc_postprocess(
raw_res, __chainlit_emitter__=__chainlit_emitter__
)
return
elif output_key is not None:
# Use the output key if provided
res = raw_res[output_key]
else:
# Otherwise, use the raw response
res = raw_res
# Finally, send the response to the user
await Message(author=config.ui.name, content=res).send()
elif config.code.on_message:
# If no langchain agent is available, call the on_message function provided by the developer
await config.code.on_message(
input_str, __chainlit_emitter__=__chainlit_emitter__
)
except InterruptedError:
pass
except Exception as e:
logger.exception(e)
await ErrorMessage(author="Error", content=str(e)).send()
finally:
await __chainlit_emitter__.task_end()
@socket.on("ui_message")
async def message(sid, data):
"""Handle a message sent by the User."""
session = need_session(sid)
session["should_stop"] = False
input_str = data["content"].strip()
author = data["author"]
await process_message(session, author, input_str)
async def process_action(session: Session, action: Action):
__chainlit_emitter__ = ChainlitEmitter(session)
callback = config.code.action_callbacks.get(action.name)
if callback:
await callback(action, __chainlit_emitter__=__chainlit_emitter__)
else:
logger.warning("No callback found for action %s", action.name)
@socket.on("action_call")
async def call_action(sid, action):
"""Handle an action call from the UI."""
session = need_session(sid)
__chainlit_emitter__ = ChainlitEmitter(session)
action = Action(**action)
await process_action(session, action)
| [
"Task stopped by the user."
] |
2024-01-10 | keskarnitish/transformers | examples~run_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from transformers import XLNetLMHeadModel, XLNetTokenizer
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from transformers import CTRLLMHeadModel, CTRLTokenizer
from transformers import XLMWithLMHeadModel, XLMTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'ctrl': (CTRLLMHeadModel, CTRLTokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
'xlm': (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0, is_xlnet=False, xlm_lang=None, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
if xlm_lang is not None:
inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1)
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / (temperature if temperature > 0 else 1.)
# reptition penalty from CTRL (https://arxiv.org/abs/1909.05858)
for _ in set(generated):
next_token_logits[_] /= repetition_penalty
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
if temperature == 0: #greedy sampling:
next_token = torch.argmax(filtered_logits).unsqueeze(0)
else:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0,
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, default=1.0,
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stop_token', type=str, default=None,
help="Token at which text generation is stopped")
args = parser.parse_args()
if args.model_type in ["ctrl"]:
if args.temperature > 0.7 :
print('CTRL typically works better with lower temperatures (and lower top_k).')
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
model.eval()
if args.length < 0 and model.config.max_position_embeddings > 0:
args.length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < args.length:
args.length = model.config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
print(args)
while True:
xlm_lang = None
# XLM Language usage detailed in the issues #1414
if args.model_type in ["xlm"] and hasattr(tokenizer, 'lang2id') and hasattr(model.config, 'use_lang_emb') \
and model.config.use_lang_emb:
if args.xlm_lang:
language = args.xlm_lang
else:
language = None
while language not in tokenizer.lang2id.keys():
language = input("Using XLM. Select language in " + str(list(tokenizer.lang2id.keys())) + " >>> ")
xlm_lang = tokenizer.lang2id[language]
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
if args.model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=args.length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
is_xlnet=bool(args.model_type == "xlnet"),
xlm_lang=xlm_lang,
device=args.device,
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True, skip_special_tokens=True)
text = text[: text.find(args.stop_token) if args.stop_token else None]
print(text)
if args.prompt:
break
return text
if __name__ == '__main__':
main()
| [] |
2024-01-10 | thiswillbeyourgithub/DocToolsLLM | DocToolsLLM.py | import tldextract
from joblib import Parallel, delayed
from threading import Lock
from pathlib import Path
import time
from datetime import datetime
import re
import textwrap
import fire
import os
from tqdm import tqdm
import signal
import pdb
from ftlangdetect import detect as language_detect
from langchain.globals import set_verbose, set_debug
from langchain.chains import ConversationalRetrievalChain
from langchain.chains import LLMChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain.docstore.document import Document
from langchain.document_transformers import EmbeddingsRedundantFilter
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain.retrievers import ContextualCompressionRetriever, KNNRetriever, SVMRetriever
from langchain.prompts.prompt import PromptTemplate
from utils.llm import load_llm, AnswerConversationBufferMemory
from utils.file_loader import load_doc, load_embeddings, create_hyde_retriever, get_tkn_length, average_word_length, wpm, get_splitter, check_docs_tkn_length, create_parent_retriever, markdownlink_regex
from utils.logger import whi, yel, red
from utils.cli import ask_user
from utils.tasks import do_summarize
os.environ["TOKENIZERS_PARALLELISM"] = "true"
d = datetime.today()
today = f"{d.day:02d}/{d.month:02d}/{d.year:04d}"
class DocToolsLLM:
VERSION = 0.9
def __init__(
self,
model="openai",
task="query",
query=None,
filetype="infer",
local_llm_path=None,
# embed_model="openai",
embed_model = "paraphrase-multilingual-mpnet-base-v2",
# embed_model = "distiluse-base-multilingual-cased-v1",
# embed_model = "msmarco-distilbert-cos-v5",
# embed_model = "all-mpnet-base-v2",
saveas=".cache/latest_docs_and_embeddings",
loadfrom=None,
top_k=10,
n_recursive_summary=0,
n_summaries_target=-1,
debug=False,
llm_verbosity=True,
help=False,
h=False,
**kwargs,
):
"""
Parameters
----------
--task str, default query
possibilities:
* query means to load the input files then wait for user question.
* search means only return the document corresponding to the search
* summarize means the input will be passed through a summarization prompt.
* summarize_then_query
* summarize_link_file takes in --filetype must be link_file
--query str, default None
if str, will be directly used for the first query if task in ["query", "search"]
--filetype str, default infer
the type of input. Depending on the value, different other parameters
are needed. If json_list is used, the line of the input file can contain
any of those parameters as long as they are as json. You can find
an example of json_list file in utils/json_list_example.txt
Supported values => relevant parameters
* youtube => --path must be a link to youtube --language=["fr","en"] to use french transcripts if possible, english otherwise --translation=en to use the transcripts after translation to english
* youtube_playlist => --path must link to a youtube playlist. language and translation are set to their default value of fr,en and en
* pdf => --path is path to pdf
* txt => --path is path to txt
* url => --path must be a valid http(s) link
* anki => --anki_profile is the name of the profile --anki_deck the beginning of the deckname --anki_notetype the beginning of the notetype to keep --anki_fields list of fields to keep
* string => no other parameters needed, will ask to provide a string
* local_audio => needs whisper_prompt and whisper_lang
* json_list => --path is path to a txt file that contains a json for each line containing at least a filetype and a path key/value but can contain any parameters described here
* recursive => --path is the starting path --pattern is the globbing patterns to append --exclude and --include can be a list of regex applying to found paths (include is run first then exclude, if the pattern is only lowercase it will be case insensitive) --recursed_filetype is the filetype to use for each of the found path
* link_file => --path must point to a file where each line is a link that will be summarized. The resulting summary will be added to --out_file. Links that have already been summarized in out_file will be skipped (the out_file is never overwritten). If a line is a markdown linke like [this](link) then it will be parsed as a link. Empty lines and starting with # are ignored. If argument --out_file_logseq_mode is present, the formatting will be compatible with logseq.
* "infer" => can often be used in the backend to try to guess the proper filetype. Experimental.
--model str, default openai
either gpt4all, llama, openai or fake/test/testing to use a fake answer.
--local_llm_path str
if model is not openai, this needs to point to a compatible model
--embed_model str, default "openai"
Either 'openai' or sentence_transformer embedding model to use.
If you change this, the embedding cache will be usually
need to be recomputed with new elements (the hash
used to check for previous values includes the name of the model
name)
--saveas str, default .cache/latest_docs_and_embeddings
only used if task is query
save the latest 'inputs' to a file. Can be loaded again with
--loadfrom to speed up loading time. This loads both the
split documents and embeddings but will not update itself if the
original files have changed.
--loadfrom str, default None
path to the file saved using --saveas
--top_k int, default 10
number of chunks to look for when querying
--n_recursive_summary int, default 0
will always recursively summarize
--n_summaries_target int, default -1
Only active if query is 'summarize_link_file'. Set a limit to
the number of links that will be summarized. If the number of
TODO in the output is higher, exit. If it's lower, only do the
difference. -1 to disable.
--debug bool, default False
if True will open a debugger instead before crashing, also use
sequential processing instead of multithreading and enable
langchain tracing.
--llm_verbosity, default True
if True, will print the intermediate reasonning steps of LLMs
--help or -h, default False
if True, will return this documentation.
"""
if help or h:
print(self.__init__.__doc__)
return
# checking argument validity
assert "loaded_docs" not in kwargs, "'loaded_docs' cannot be an argument as it is used internally"
assert "loaded_embeddings" not in kwargs, "'loaded_embeddings' cannot be an argument as it is used internally"
assert task in ["query", "search", "summarize", "summarize_then_query", "summarize_link_file"], "invalid task value"
assert isinstance(filetype, str), "filetype must be a string"
if task in ["summarize", "summarize_then_query"]:
assert not loadfrom, "can't use loadfrom if task is summary"
assert (task == "summarize_link_file" and filetype == "link_file"
) or (task != "summarize_link_file" and filetype != "link_file"
), "summarize_link_file must be used with filetype link_file"
if task == "summarize_link_file":
assert "path" in kwargs, 'missing path arg for summarize_link_file'
assert "out_file" in kwargs, 'missing "out_file" arg for summarize_link_file'
assert kwargs["out_file"] != kwargs["path"], "can't use same 'path' and 'out_file' arg"
assert "/" not in embed_model, "embed model can't contain slash"
assert isinstance(n_summaries_target, int), "invalid type of n_summaries_target"
for k in kwargs:
if k not in [
"anki_profile", "anki_notetype", "anki_fields", "anki_deck",
"whisper_lang", "whisper_prompt",
"path", "include", "exclude",
"out_file", "out_file_logseq_mode",
"language", "translation",
"out_check_file",
]:
red(f"Found unexpected keyword argument: '{k}'")
if filetype == "string":
top_k = 1
red("Input is 'string' so setting 'top_k' to 1")
# storing as attributes
self.model = model
self.task = task
self.query = query
self.filetype = filetype
self.local_llm_path = local_llm_path
self.embed_model = embed_model
self.saveas = saveas
self.loadfrom = loadfrom
self.top_k = top_k
self.debug = debug
self.kwargs = kwargs
self.llm_verbosity = llm_verbosity
self.n_recursive_summary = n_recursive_summary
self.n_summaries_target = n_summaries_target
if self.debug:
# make the script interruptible
signal.signal(signal.SIGINT, (lambda signal, frame : pdb.set_trace()))
os.environ["LANGCHAIN_TRACING"] = "true"
set_verbose(True)
set_verbose(True)
# compile include / exclude regex
if "include" in self.kwargs:
for i, inc in enumerate(self.kwargs["include"]):
if inc == inc.lower():
self.kwargs["include"][i] = re.compile(inc, flags=re.IGNORECASE)
else:
self.kwargs["include"][i] = re.compile(inc)
if "exclude" in self.kwargs:
for i, exc in enumerate(self.kwargs["exclude"]):
if exc == exc.lower():
self.kwargs["exclude"][i] = re.compile(exc, flags=re.IGNORECASE)
else:
self.kwargs["exclude"][i] = re.compile(exc)
# loading llm
self.llm, self.callback = load_llm(model, local_llm_path)
# if task is to summarize lots of links, check first if there are
# links already summarized as it would greatly reduce the number of
# documents to load
if self.task == "summarize_link_file":
if not Path(self.kwargs["out_file"]).exists():
Path(self.kwargs["out_file"]).touch()
with open(self.kwargs["out_file"], "r") as f:
output_content = f.read()
if self.n_summaries_target > 0:
self.n_todos_present = output_content.count("- TODO ")
if "out_check_file" in self.kwargs:
# this is an undocumented function for the author. It
# allows to specify a second path for which to check if
# a document has already been summaried. I use this because
# I made a script to automatically move my DONE tasks
# from logseq to another near by file.
assert Path(self.kwargs["out_check_file"]).exists()
with open(self.kwargs["out_check_file"], "r") as f:
output_content += f.read()
# parse just the links already present in the output
doclist = output_content.splitlines()
doclist = [p[1:].strip() if p.startswith("-") else p.strip() for p in doclist]
doclist = [p.strip() for p in doclist if p.strip() and not p.strip().startswith("#") and "http" in p]
links_regex = re.compile(r'(https?://\S+)')
doclist = [re.findall(links_regex, d)[0].strip() if re.search(links_regex, d) else d for d in doclist]
self.done_links = " ".join(doclist)
self.kwargs["done_links"] = doclist
# loading documents
if not loadfrom:
self.loaded_docs = load_doc(
filetype=self.filetype,
debug=self.debug,
task=self.task,
**self.kwargs)
# check that the hash are unique
if len(self.loaded_docs) > 1:
ids = [id(d.metadata) for d in self.loaded_docs]
assert len(ids) == len(set(ids)), (
"Same metadata object is used to store information on "
"multiple documents!")
hashes = [d.metadata["hash"] for d in self.loaded_docs]
removed_paths = []
if len(hashes) != len(set(hashes)):
red("Found duplicate hashes after loading documents:")
for i, doc in enumerate(self.loaded_docs):
n = hashes.count(doc.metadata["hash"])
if n > 1:
if not doc.metadata["path"].startswith("Anki_profile="):
removed_paths.append(self.loaded_docs[i].metadata["path"])
# allow partially removed when it's from anki
self.loaded_docs[i] = None
self.loaded_docs = [d for d in self.loaded_docs if d is not None]
present_path = [d.metadata["path"] for d in self.loaded_docs]
intersect = set(removed_paths).intersection(set(present_path))
if intersect:
red(f"Found {len(intersect)} documents that were only partially removed, this results in incomplete documents.")
for i, inte in enumerate(intersect):
red(f" * #{i + 1}: {inte}")
# raise Exception()
else:
red(f"Removed {len(removed_paths)}/{len(hashes)} documents because they had the same hash")
else:
self.loaded_docs = None # will be loaded when embeddings are loaded
_ = self.process_task()
whi("Done with tasks.")
if self.debug:
breakpoint()
def process_task(self):
red(f"\nProcessing task '{self.task}'")
if self.task in ["summarize_link_file", "summarize", "summarize_then_query"]:
# storing links in dict instead of set to keep the original ordering
links_todo = {}
# failed = []
# get the list of documents from the same source. Also checks if
# it's not part of the output file if task is "summarize_link_file"
if self.task == "summarize_link_file":
for d in self.loaded_docs:
assert "subitem_link" in d.metadata, "missing 'subitem_link' in a doc metadata"
link = d.metadata["subitem_link"]
if link in self.done_links or link in links_todo:
continue
if len(links_todo) < self.n_summaries_target:
links_todo[link] = None
else:
yel("'n_summaries_target' limit reached, will not add more links to summarize for this run.")
break
# comment out the links that are marked as already done
# if self.done_links:
# with open(self.kwargs["path"], "r") as f:
# temp = f.read().split("\n")
# with open(self.kwargs["path"], "w") as f:
# for t in temp:
# for done_link in self.done_links:
# if done_link in t:
# t = f"#already done as of {today}# {t}"
# break
# f.write(t.strip() + "\n")
if self.n_summaries_target > 0:
# allows to run DocTools to summarise from a link file
# only if there are less than 'n_summaries_target' TODOS
# blocks in the target file. This way we can have a
# list of TODOS that will never be larger than this.
# Avoiding both having too many summaries and not enough
# as it allows to run this frequently
n_todos_desired = self.n_summaries_target
assert isinstance(n_todos_desired, int)
if self.n_todos_present >= n_todos_desired:
return red(f"Found {self.n_todos_present} in the output file(s) which is >= {n_todos_desired}. Exiting without summarising.")
else:
self.n_summaries_target = n_todos_desired - self.n_todos_present
red(f"Found {self.n_todos_present} in output file(s) which is under {n_todos_desired}. Will summarize only {self.n_summaries_target}")
assert self.n_summaries_target > 0
while len(links_todo) > self.n_summaries_target:
del links_todo[list(links_todo.keys())[-1]]
# estimate price before summarizing, in case you put the bible in there
docs_tkn_cost = {}
for doc in self.loaded_docs:
meta = doc.metadata["subitem_link"]
if meta in links_todo:
if meta not in docs_tkn_cost:
docs_tkn_cost[meta] = get_tkn_length(doc.page_content)
else:
docs_tkn_cost[meta] += get_tkn_length(doc.page_content)
else:
for d in self.loaded_docs:
links_todo[d.metadata["path"]] = None
assert len(links_todo) == 1, f"Invalid length of links_todo for this task: '{len(links_todo)}'"
docs_tkn_cost = {}
for doc in self.loaded_docs:
meta = doc.metadata["path"]
if meta not in docs_tkn_cost:
docs_tkn_cost[meta] = get_tkn_length(doc.page_content)
else:
docs_tkn_cost[meta] += get_tkn_length(doc.page_content)
full_tkn = sum(list(docs_tkn_cost.values()))
red("Token price of each document:")
for k, v in docs_tkn_cost.items():
red(f"- {v:>6}: {k}")
red(f"Total number of tokens in documents to summarize: '{full_tkn}'")
# a conservative estimate is that it takes 4 times the number
# of tokens of a document to summarize it
estimate_tkn = 2.4 * full_tkn
if self.n_recursive_summary > 0:
estimate_tkn += sum([full_tkn / ((i + 1) * 4) for i, ii in enumerate(range(self.n_recursive_summary))])
estimate_dol = estimate_tkn / 1000 * 0.0016
red(f"Conservative estimate of the cost to summarize: ${estimate_dol:.4f} for {estimate_tkn} tokens.")
if estimate_dol > 1:
raise Exception(red("Cost estimate > $1 which is absurdly high. Has something gone wrong? Quitting."))
if self.model == "openai":
# increase likelyhood that chatgpt will use indentation by
# biasing towards adding space.
logit_val = 4
self.llm.model_kwargs["logit_bias"] = {
12: logit_val, # '-'
# 220: logit_val, # ' '
# 532: logit_val, # ' -'
# 9: logit_val, # '*'
# 1635: logit_val, # ' *'
197: logit_val, # '\t'
334: logit_val * 2, # '**'
25: logit_val, # ':'
551: logit_val, # ' :'
13: -1, # '.'
}
self.llm.model_kwargs["frequency_penalty"] = 0.5
self.llm.model_kwargs["temperature"] = 0.0
def threaded_summary(link, lock):
if self.task == "summarize_link_file":
# get only the docs that match the link
relevant_docs = [d for d in self.loaded_docs if d.metadata["subitem_link"] == link]
else:
relevant_docs = self.loaded_docs
assert relevant_docs, 'Empty relevant_docs!'
# parse metadata from the doc
metadata = []
if "title" in relevant_docs[0].metadata:
# only give to summarizer the top level domain if url
# or the filename if file
if "http" in link:
domain = tldextract.extract(link).registered_domain
item_name = f"{relevant_docs[0].metadata['title'].strip()} - {domain}"
elif "/" in link and Path(link).exists():
filename = Path(link).name
item_name = f"{relevant_docs[0].metadata['title'].strip()} - {filename}"
else:
item_name = f"{relevant_docs[0].metadata['title'].strip()} - {link}"
metadata.append(f"Title: '{item_name.strip()}'")
else:
item_name = link
if "docs_reading_time" in relevant_docs[0].metadata:
doc_reading_length = relevant_docs[0].metadata["docs_reading_time"]
metadata.append(f"Reading length: {doc_reading_length:.1f} minutes")
else:
doc_reading_length = None
if "author" in relevant_docs[0].metadata:
author = relevant_docs[0].metadata["author"].strip()
metadata.append(f"Author: '{author}'")
else:
author = None
# detect language
lang_info = language_detect(relevant_docs[0].page_content.replace("\n", "<br>"))
if lang_info["score"] >= 0.8:
lang = lang_info['lang']
if lang == "fr":
lang = "FRENCH"
else: # prefer english to anything other than french
lang = "ENGLISH"
else:
lang = "ENGLISH"
red(f"Language detection failed: '{lang_info}'")
if metadata:
metadata = "- Text metadata:\n\t- " + "\n\t- ".join(metadata) + "\n"
metadata += "\t- Section number: [PROGRESS]\n"
else:
metadata = ""
# summarize each chunk of the link and return one text
summary, n_chunk, doc_total_tokens, doc_total_cost = do_summarize(
docs=relevant_docs,
metadata=metadata,
language=lang,
model=self.model,
llm=self.llm,
callback=self.callback,
verbose=self.llm_verbosity,
)
# get reading length of the summary
sum_reading_length = len(summary) / average_word_length / wpm
whi(f"{item_name} reading length is {sum_reading_length:.1f}")
n_recursion_done = 0
if self.n_recursive_summary > 0:
splitter = get_splitter(self.task)
summary_text = summary
if metadata:
metadata = metadata.strip() + "\n\t- New task: enhance this summary while respecting the rules\n"
for n_recur in range(self.n_recursive_summary):
red(f"Doing recursive summary #{n_recur} of {item_name}")
# remove any chunk count that is not needed to summarize
sp = summary_text.split("\n")
for i, l in enumerate(sp):
if l.strip() == "- ---":
sp[i] = None
elif re.search(r"- Chunk \d+/\d+", l):
sp[i] = None
summary_text = "\n".join([s.rstrip() for s in sp if s])
summary_docs = [Document(page_content=summary_text)]
summary_docs = splitter.transform_documents(summary_docs)
try:
check_docs_tkn_length(summary_docs, item_name)
except Exception as err:
red(f"Exception when checking if {item_name} could be recursively summarized for the #{n_recur} time: {err}")
break
summary_text, n_chunk, new_doc_total_tokens, new_doc_total_cost = do_summarize(
docs=summary_docs,
metadata=metadata,
language=lang,
model=self.model,
llm=self.llm,
callback=self.callback,
verbose=self.llm_verbosity,
)
doc_total_tokens += new_doc_total_tokens
doc_total_cost += new_doc_total_cost
n_recursion_done += 1
sum_reading_length = len(summary_text) / average_word_length / wpm
whi(f"{item_name} reading length after recursion #{n_recur} is {sum_reading_length:.1f}")
summary = summary_text
with lock:
red(f"\n\nSummary of '{link}':\n{summary}")
red(f"Tokens used for {link}: '{doc_total_tokens}' (${doc_total_cost:.5f})")
if "out_file_logseq_mode" in self.kwargs:
header = f"\n- TODO {item_name}"
header += "\n\tcollapsed:: true"
header += "\n\tblock_type:: DocToolsLLM_summary"
header += f"\n\tDocToolsLLM_version:: {self.VERSION}"
header += f"\n\tDocToolsLLM_model:: {self.model}"
header += f"\n\tDocToolsLLM_parameters:: n_recursion_summary={self.n_recursive_summary};n_recursion_done={n_recursion_done}"
header += f"\n\tsummary_date:: {today}"
header += f"\n\tsummary_timestamp:: {int(time.time())}"
header += f"\n\ttoken_cost:: {doc_total_tokens}"
header += f"\n\tdollar_cost:: {doc_total_cost:.5f}"
header += f"\n\tsummary_reading_time:: {sum_reading_length:.1f}"
header += f"\n\tlink:: {link}"
if doc_reading_length:
header += f"\n\tdoc_reading_time:: {doc_reading_length:.1f}"
header += f"\n\treading_time_prct_speedup:: {int(sum_reading_length/doc_reading_length * 100)}%"
if n_chunk > 1:
header += f"\n\tchunks:: {n_chunk}"
if author:
header += f"\n\tauthor:: {author}"
if lang:
header += f"\n\tlanguage:: {lang}"
else:
header = f"\n- {item_name} cost: {doc_total_tokens} (${doc_total_cost:.5f})"
if doc_reading_length:
header += f" {doc_reading_length:.1f} minutes"
if author:
header += f" by '{author}'"
header += f" original link: '{link}'"
header += f" DocToolsLLM version {self.VERSION} with model {self.model}"
header += f" parameters: n_recursion_summary={self.n_recursive_summary};n_recursion_done={n_recursion_done}"
# save to output file
if "out_file" in self.kwargs:
with lock:
with open(self.kwargs["out_file"], "a") as f:
f.write(header)
for bulletpoint in summary.split("\n"):
f.write("\n")
bulletpoint = bulletpoint.rstrip()
# # make sure the line begins with a bullet point
# if not bulletpoint.lstrip().startswith("- "):
# begin_space = re.search(r"^(\s+)", bulletpoint)
# if not begin_space:
# begin_space = [""]
# bulletpoint = begin_space[0] + "- " + bulletpoint
f.write(f"\t{bulletpoint}")
f.write("\n\n\n")
return {
"link": link,
"sum_reading_length": sum_reading_length,
"doc_reading_length": doc_reading_length,
"doc_total_tokens": doc_total_tokens,
"doc_total_cost": doc_total_cost,
"summary": summary,
}
lock = Lock()
results = Parallel(
n_jobs=3 if not self.debug else 1,
backend="threading",
)(delayed(threaded_summary)(
link=link,
lock=lock,
) for link in tqdm(
links_todo,
desc="Summarizing documents",
# disable=(not len(links_todo) - 1) or self.debug,
colour="magenta",
))
total_tkn_cost = sum([x["doc_total_tokens"] for x in results])
total_dol_cost = sum([x["doc_total_cost"] for x in results])
total_docs_length = sum([x["doc_reading_length"] for x in results])
# total_summary_length = sum([x["sum_reading_length"] for x in results])
red(f"Total cost of this run: '{total_tkn_cost}' (${total_dol_cost:.5f})")
red(f"Total time saved by this run: {total_docs_length:.1f} minutes")
# if "out_file" in self.kwargs:
# # after summarizing all links, append to output file the total cost
# if total_tkn_cost != 0 and total_dol_cost != 0:
# with open(self.kwargs["out_file"], "a") as f:
# f.write(f"- Total cost of this run: '{total_tkn_cost}' (${total_dol_cost:.5f})\n")
# f.write(f"- Total time saved by this run: {total_docs_length - total_summary_length:.1f} minutes\n\n\n")
# and write to input file a summary too
# if "out_file" in self.kwargs:
# try:
# with open(self.kwargs["path"], "a") as f:
# f.write(f"\n\n")
# f.write(f"- Done with summaries of {today}\n")
# f.write(f" - Number of links summarized: {len(links_todo) - len(failed)}/{len(links_todo) + len(self.done_links)}\n")
# if failed:
# f.write(f" - Number of links failed: {len(failed)}:\n")
# for f in failed:
# f.write(f" - {f}\n")
# # f.write(f" - Total cost of this run: '{total_tkn_cost}' (${total_dol_cost:.5f})\n")
# # f.write(f" - Total time saved by this run: plausibly {total_docs_length:.1f} minutes\n")
# except Exception as err:
# red(f"Exception when writing end of run details to input file: '{err}'")
if self.task == "summary_then_query":
whi("Done summarizing. Switching to query mode.")
if self.model == "openai":
del self.llm.model_kwargs["logit_bias"]
else:
whi("Done summarizing. Exiting.")
raise SystemExit()
# load embeddings for querying
self.loaded_embeddings, self.embeddings = load_embeddings(
self.embed_model,
self.loadfrom,
self.saveas,
self.debug,
self.loaded_docs,
self.kwargs)
assert self.task in ["query", "search", "summarize_then_query"]
# set default ask_user argument
multiline = False
# conversational memory
memory = AnswerConversationBufferMemory(
memory_key="chat_history",
return_messages=True)
cli_commands = {
"top_k": self.top_k,
"multiline": multiline,
"retriever": "all",
"task": self.task,
}
while True:
try:
with self.callback() as cb:
if not self.query:
query, cli_commands = ask_user(
"\n\nWhat is your question? (Q to quit)\n",
cli_commands,
)
else:
query = self.query
self.query = None
whi(f"Query: {query}")
retrievers = []
if cli_commands["retriever"] in ["hyde", "all"]:
retrievers.append(
create_hyde_retriever(
query=query,
filetype=self.filetype,
llm=self.llm,
top_k=cli_commands["top_k"],
embed_model=self.embed_model,
embeddings=self.loaded_embeddings,
embeddings_engine=self.embeddings,
loadfrom=self.loadfrom,
kwargs=self.kwargs,
debug=self.debug,
)
)
# retrievers.append(
# KNNRetriever.from_texts(
# [d.page_content for d in self.loaded_docs],
# self.embeddings,
# )
# )
# retrievers.append(
# SVMRetriever.from_texts(
# [d.page_content for d in self.loaded_docs],
# self.embeddings,
# )
# )
# retrievers.append(
# create_parent_retriever(
# task=self.task,
# loaded_embeddings=self.loader_embeddings,
# loaded_docs=self.loaded_docs,
# )
# )
if cli_commands["retriever"] in ["simple", "all"]:
retrievers.append(
self.loaded_embeddings.as_retriever(
search_kwargs={
"k": cli_commands["top_k"],
"distance_metric": "cos",
})
)
if len(retrievers) == 1:
retriever = retrievers[0]
else:
whi("Merging multiple retrievers")
retriever = MergerRetriever(retrievers)
# remove redundant results from the merged retrievers:
filtered = EmbeddingsRedundantFilter(embeddings=self.embeddings)
pipeline = DocumentCompressorPipeline(transformers=[filtered])
retriever = ContextualCompressionRetriever(
base_compressor=pipeline, base_retriever=retriever
)
if self.task == "search":
docs = retriever.get_relevant_documents(query)
else:
_template = textwrap.dedent("""Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:""")
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=retriever,
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
return_generated_question=True,
verbose=self.llm_verbosity,
memory=memory,
)
ans = chain(
inputs={
"question": query,
},
return_only_outputs=False,
include_run_info=True,
)
red(f"Answer:\n{ans['answer']}\n")
docs = ans["source_documents"]
whi("\n\nSources:")
for doc in docs:
whi(" * content:")
content = doc.page_content.strip()
wrapped = "\n".join(textwrap.wrap(content, width=240))
whi(f"{wrapped:>10}")
for k, v in doc.metadata.items():
yel(f" * {k}: {v}")
print("\n")
yel(f"Tokens used: '{cb.total_tokens}' (${cb.total_cost:.5f})")
except Exception as err:
whi(f"Error: '{err}'")
raise
if __name__ == "__main__":
instance = fire.Fire(DocToolsLLM)
| [
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\n Chat History:\n {chat_history}\n\n Follow Up Input: {question}\n\n Standalone question:"
] |
2024-01-10 | thiswillbeyourgithub/DocToolsLLM | utils~file_loader.py | import tldextract
import uuid
import threading
import queue
import copy
import pdb
import time
import tempfile
import requests
import youtube_dl
from youtube_dl.utils import DownloadError, ExtractorError
import random
import shutil
import ankipandas as akp
import ftfy
from bs4 import BeautifulSoup
from goose3 import Goose
from pathlib import Path
import re
from tqdm import tqdm
import json
from prompt_toolkit import prompt
from joblib import Parallel, delayed
import tiktoken
from ftlangdetect import detect as language_detect
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.document_loaders import PyPDFium2Loader
from langchain.document_loaders import PyMuPDFLoader
# from langchain.document_loaders import PDFMinerPDFasHTMLLoader
from langchain.document_loaders import PDFMinerLoader
from langchain.document_loaders import PDFPlumberLoader
from langchain.document_loaders import OnlinePDFLoader
from langchain.document_loaders import YoutubeLoader
from langchain.document_loaders import SeleniumURLLoader
from langchain.document_loaders import PlaywrightURLLoader
from langchain.document_loaders import WebBaseLoader
from langchain.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, HypotheticalDocumentEmbedder
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import LocalFileStore
from langchain.embeddings import CacheBackedEmbeddings
from .misc import loaddoc_cache, html_to_text, hasher
from .logger import whi, yel, red, log
from .llm import RollingWindowEmbeddings, transcribe
# rules used to attribute input to proper filetype. For example
# any link containing youtube will be treated as a youtube link
inference_rules = {
# format:
# key is output filtype, value is list of regex that if match
# will return the key
# the order of the keys is important
"youtube_playlist": ["youtube.*playlist"],
"youtube": ["youtube", "invidi"],
"txt": [".txt$", ".md$"],
"online_pdf": ["^http.*pdf.*"],
"pdf": [".*pdf$"],
"url": ["^http"],
"local_audio": [r".*(mp3|m4a|ogg|flac)$"],
}
# compile the inference rules as regex
for k, v in inference_rules.items():
for i, vv in enumerate(v):
inference_rules[k][i] = re.compile(vv)
# for reading length estimation
wpm = 250
average_word_length = 6
clozeregex = re.compile(r"{{c\d+::|}}") # for removing clozes in anki
markdownlink_regex = re.compile(r'\[.*?\]\((.*?)\)') # to parse markdown links"
yt_link_regex = re.compile("youtube.*watch") # to check that a youtube link is valid
emptyline_regex = re.compile(r'^\s*$', re.MULTILINE)
emptyline2_regex = re.compile(r'\n\n+', re.MULTILINE)
linebreak_before_letter = re.compile(r'\n([a-záéíóúü])', re.MULTILINE) # match any linebreak that is followed by a lowercase letter
tokenize = tiktoken.encoding_for_model("gpt-3.5-turbo").encode # used to get token length estimation
max_threads = 20
threads = {}
lock = threading.Lock()
n_recursive = 0 # global var to keep track of the number of recursive loading threads. If there are many recursions they can actually get stuck
def get_tkn_length(tosplit):
return len(tokenize(tosplit))
def get_splitter(task):
"we don't use the same text splitter depending on the task"
if task == "query":
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n\n\n", "\n\n\n", "\n\n", "\n", " ", ""],
chunk_size=3000, # default 4000
chunk_overlap=386, # default 200
length_function=get_tkn_length,
)
elif task in ["summarize_link_file", "summarize_then_query", "summarize"]:
text_splitter = RecursiveCharacterTextSplitter(
separators=[".\n", ". ", " ", ""],
chunk_size=2048,
chunk_overlap=300,
length_function=get_tkn_length,
)
else:
raise Exception(task)
return text_splitter
def cloze_stripper(clozed):
clozed = re.sub(clozeregex, " ", clozed)
return clozed
min_token = 200
max_token = 1_000_000
max_lines = 100_000
min_lang_prob = 0.50
def check_docs_tkn_length(docs, name):
"""checks that the number of tokens in the document is high enough,
not too low, and has a high enough language probability,
otherwise something probably went wrong."""
size = sum([get_tkn_length(d.page_content) for d in docs])
nline = len("\n".join([d.page_content for d in docs]).splitlines())
if nline > max_lines:
red(f"Example of page from document with too many lines : {docs[len(docs)//2].page_content}")
raise Exception(f"The number of lines from '{name}' is {nline} > {max_lines}, probably something went wrong?")
if size <= min_token:
red(f"Example of page from document with too many tokens : {docs[len(docs)//2].page_content}")
raise Exception(f"The number of token from '{name}' is {size} <= {min_token}, probably something went wrong?")
if size >= max_token:
red(f"Example of page from document with too many tokens : {docs[len(docs)//2].page_content}")
raise Exception(f"The number of token from '{name}' is {size} >= {max_token}, probably something went wrong?")
# check if language check is above a threshold
prob = language_detect(docs[0].page_content.replace("\n", "<br>"))["score"]
if len(docs) > 1:
prob += language_detect(docs[-1].page_content.replace("\n", "<br>"))["score"]
if len(docs) > 2:
prob += language_detect(docs[len(docs)//2].page_content.replace("\n", "<br>"))["score"]
prob /= 3
else:
prob /= 2
if prob <= min_lang_prob:
red(f"Low language probability for {name}: prob={prob}<{min_lang_prob}.\nExample page: {docs[len(docs)//2]}")
raise Exception(f"Low language probability for {name}: prob={prob}.\nExample page: {docs[len(docs)//2]}")
return prob
def get_url_title(url):
"""if the title of the url is not loaded from the loader, trying as last
resort with this one"""
loader = WebBaseLoader(url, raise_for_status=True)
docs = loader.load()
if "title" in docs[0].metadata and docs[0].metadata["title"]:
return docs[0].metadata["title"]
else:
return None
def load_doc(filetype, debug, task, **kwargs):
"""load the input"""
text_splitter = get_splitter(task)
if "path" in kwargs and isinstance(kwargs["path"], str):
kwargs["path"] = kwargs["path"].strip()
if filetype == "infer":
assert "path" in kwargs, "if filetype is infer, path should be supplied"
for k, v in inference_rules.items():
for vv in inference_rules[k]:
if re.search(vv, kwargs["path"]):
filetype = k
break
if filetype != "infer":
break
if filetype in ["json_list", "recursive", "link_file", "youtube_playlist"]:
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
if filetype == "recursive":
whi(f"Loading recursive filetype: '{path}'")
assert "pattern" in kwargs, "missing 'pattern' key in args"
assert "recursed_filetype" in kwargs, "missing 'recursed_filetype' in args"
assert kwargs["recursed_filetype"] not in [
"recursive", "json_list", "youtube", "anki",
], "'recursed_filetype' cannot be 'recursive', 'json_list', 'anki' or 'youtube'"
pattern = kwargs["pattern"]
doclist = [p for p in Path(path).rglob(pattern)]
doclist = [str(p).strip() for p in doclist if p.is_file()]
doclist = [p for p in doclist if p]
doclist = [p[1:].strip() if p.startswith("-") else p.strip() for p in doclist]
# randomize order to even out the progress bar
doclist = sorted(doclist, key=lambda x: random.random())
def threaded_load_item(filetype, item, kwargs, pbar, q, lock):
kwargs["path"] = item
kwargs["filetype"] = kwargs["recursed_filetype"]
assert Path(kwargs["path"]).exists(), f"file '{item}' does not exist"
del kwargs["pattern"]
try:
res = load_doc(
task=task,
debug=debug,
**kwargs,
)
with lock:
pbar.update(1)
q.put(res)
return res
except Exception as err:
red(f"Error when loading '{item}': '{err}'")
if debug:
pdb.post_mortem()
else:
with lock:
pbar.update(1)
q.put(f"{item}: {err}")
return item
elif filetype == "json_list":
whi(f"Loading json_list: '{path}'")
doclist = str(Path(path).read_text()).splitlines()
doclist = [p[1:].strip() if p.startswith("-") else p.strip() for p in doclist]
doclist = [p.strip() for p in doclist if p.strip() and not p.strip().startswith("#")]
def threaded_load_item(filetype, item, kwargs, pbar, q, lock):
meta = json.loads(item.strip())
for k, v in kwargs.items():
if k not in meta:
meta[k] = v
assert isinstance(meta, dict), f"meta from line '{item}' is not dict but '{type(meta)}'"
assert "filetype" in meta, "no key 'filetype' in meta"
try:
res = load_doc(
task=task,
debug=debug,
**meta,
)
with lock:
pbar.update(1)
q.put(res)
return res
except Exception as err:
red(f"Error when loading '{item}': '{err}'")
if debug:
pdb.post_mortem()
else:
with lock:
pbar.update(1)
q.put(f"{item}: {err}")
return item
elif filetype == "link_file":
whi(f"Loading link_file: '{path}'")
doclist = str(Path(path).read_text()).splitlines()
doclist = [p[1:].strip() if p.startswith("-") else p.strip() for p in doclist]
doclist = [p.strip() for p in doclist if p.strip() and not p.strip().startswith("#") and "http" in p]
doclist = [re.findall(markdownlink_regex, d)[0] if re.search(markdownlink_regex, d) else d for d in doclist]
if task == "summarize_link_file":
# if summarize, start from bottom
doclist.reverse()
if "done_links" in kwargs:
# discard any links that are already present in the output
doclist = [d for d in doclist if d not in kwargs["done_links"]]
def threaded_load_item(filetype, item, kwargs, pbar, q, lock):
kwargs["path"] = item
if "http" not in item:
red(f"item does not appear to be a link: '{item}'")
q.put(f"{item}: does not appear to be a link")
return item
kwargs["filetype"] = "infer"
kwargs["subitem_link"] = item
try:
res = load_doc(
task=task,
debug=debug,
**kwargs,
)
with lock:
pbar.update(1)
q.put(res)
return res
except Exception as err:
red(f"Error when loading '{item}': '{err}'")
if debug:
pdb.post_mortem()
else:
with lock:
pbar.update(1)
q.put(f"{item}: {err}")
return item
elif filetype == "youtube_playlist":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
whi(f"Loading youtube playlist: '{path}'")
video = load_youtube_playlist(path)
kwargs["playlist_title"] = video['title'].strip().replace("\n", "")
assert "duration" not in video, f'"duration" found when loading youtube playlist. This might not be a playlist: {path}'
doclist = [ent["webpage_url"] for ent in video["entries"]]
doclist = [li for li in doclist if re.search(yt_link_regex, li)]
def threaded_load_item(filetype, item, kwargs, pbar, q, lock):
kwargs["path"] = item
assert "http" in item, f"item does not appear to be a link: '{item}'"
kwargs["filetype"] = "youtube"
kwargs["subitem_link"] = item
try:
res = load_doc(
task=task,
debug=debug,
**kwargs,
)
with lock:
pbar.update(1)
q.put(res)
return res
except Exception as err:
red(f"Error when loading '{item}': '{err}'")
if debug:
pdb.post_mortem()
else:
with lock:
pbar.update(1)
q.put(f"{item}: {err}")
return item
else:
raise ValueError(filetype)
if "include" in kwargs:
for i, d in enumerate(doclist):
keep = True
for inc in kwargs["include"]:
if not re.search(inc, d):
keep = False
if not keep:
doclist[i] = None
doclist = [d for d in doclist if d]
del kwargs["include"]
if "exclude" in kwargs:
for exc in kwargs["exclude"]:
doclist = [d for d in doclist if not re.search(exc, d)]
del kwargs["exclude"]
# remove duplicate documents
temp = []
for d in doclist:
if d in temp:
red(f"Removed document {d} (duplicate)")
else:
temp.append(d)
doclist = temp
assert doclist, f"empty list of documents to load from filetype '{filetype}'"
q = queue.Queue()
global threads, lock, n_recursive
if "depth" in kwargs:
depth = kwargs["depth"]
kwargs["depth"] += 1
else:
depth = 0
kwargs["depth"] = 1
# if debugging, don't multithread
if not debug:
message = f"Loading documents using {max_threads} threads (depth={depth})"
pbar = tqdm(total=len(doclist), desc=message)
recursion_id = str(uuid.uuid4())
for doc in doclist:
thread = threading.Thread(
target=threaded_load_item,
args=(filetype, doc, kwargs.copy(), pbar, q, lock),
daemon=True, # exit when the main program exits
)
if depth > 0 and sum([t.is_alive() for t in threads.values() if t.is_started]) > max_threads:
thread.is_started = False
else:
if depth == 0:
n_recursive += 1
thread.start()
thread.is_started = True
thread.recursion_id = recursion_id
assert doc not in threads, f"{doc} already present as thread"
with lock:
threads[doc] = thread
# waiting for threads to finish
with lock:
n_threads_alive = sum([t.is_alive() for t in threads.values() if t.is_started])
n_subthreads_alive = sum([t.is_alive() for t in threads.values() if t.is_started and t.recursion_id == recursion_id])
n_subthreads_todo = len([t for t in threads.values() if not t.is_started and t.recursion_id == recursion_id])
i = 0
while n_subthreads_alive or n_subthreads_todo:
with lock:
n_subthreads_alive = sum([t.is_alive() for t in threads.values() if t.is_started and t.recursion_id == recursion_id])
n_threads_alive = sum([t.is_alive() for t in threads.values() if t.is_started])
n_subthreads_todo = len([t for t in threads.values() if not t.is_started and t.recursion_id == recursion_id])
if n_threads_alive < max_threads + n_recursive and n_subthreads_todo:
# launch one more thread
[t for t in threads.values() if not t.is_started and t.recursion_id == recursion_id][0].start()
[t for t in threads.values() if not t.is_started and t.recursion_id == recursion_id][0].is_started = True
continue
time.sleep(1)
# display progress every 10s
i += 1
if i % 10 == 0:
with lock:
doc_print = [k for k, v in threads.items() if v.is_alive() and v.recursion_id == recursion_id]
for ii, d in enumerate(doc_print):
d = d.strip()
if d.startswith("http"): # print only domain name
doc_print[ii] = tldextract.extract(d).registered_domain
continue
if d.startswith("{") and d.endswith("}"):
# print only path if recursive
try:
doc_print[ii] = json.loads(d)["path"].replace("../", "")
continue
except:
try: # for other recursion, show all key:values
temp = json.loads(d)
doc_print[ii] = ""
for k, v in temp.items():
doc_print[ii] += f"{k}:{v},"
doc_print[ii] = doc_print[ii][:-1] # remove comma
except:
pass
if "/" in d:
# print filename
try:
doc_print[ii] = Path(d).name
continue
except:
pass
whi(f"(Depth={depth}) Waiting for {n_subthreads_alive} subthreads to finish: {','.join(doc_print)}")
# check that all its subthreads are done
with lock:
assert sum([t.is_alive() for t in threads.values() if t.is_started and t.recursion_id == recursion_id]) == 0
assert len([t for t in threads.values() if not t.is_started and t.recursion_id == recursion_id]) == 0
# remove old finished threads
threads = {k: t for k, t in threads.items() if t.recursion_id != recursion_id}
# get the values from the queue
results = []
failed = []
while not q.empty():
doc = q.get()
if not isinstance(doc, str):
results.append(doc)
else:
# when failed: we returned the name of the item
failed.append(doc)
else:
message = "Loading documents without multithreading because debug is on"
pbar = tqdm(total=len(doclist), desc=message)
temp = []
for doc in doclist:
res = threaded_load_item(
filetype,
doc,
kwargs.copy(),
pbar,
q,
lock,
)
temp.append(res)
# get the values from the queue
results = []
failed = []
for doc in temp:
if not isinstance(doc, str):
results.append(doc)
else:
# when failed: we returned the name of the item
failed.append(doc)
n = len(doclist) - len(results)
if depth == 0 and failed:
red(f"List of {n} failed documents:\n")
for f in sorted(failed):
red(f"* {f}")
assert results, "Empty results after loading documents"
assert n == len(failed), "Unexpected number of failed documents"
docs = []
[docs.extend(x) for x in results if x]
pbar.close()
size = sum([get_tkn_length(d.page_content) for d in docs])
if size <= min_token:
raise Exception(f"The number of token from '{path}' is {size} <= {min_token} tokens, probably something went wrong?")
elif filetype == "youtube":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
if "\\" in path:
red(f"Removed backslash found in '{path}'")
path = path.replace("\\", "")
assert re.search(yt_link_regex, path), f"youtube link is not valid: '{path}'"
if "language" not in kwargs:
lang = ["fr-FR", "fr", "en", "en-US", "en-UK"]
else:
lang = kwargs["language"]
if "translation" not in kwargs:
transl = "en"
else:
transl = kwargs["translation"]
whi(f"Loading youtube: '{path}'")
fyu = YoutubeLoader.from_youtube_url
docs = cached_yt_loader(
loader=fyu,
path=path,
add_video_info=True,
language=lang,
translation=transl,
)
docs = text_splitter.transform_documents(docs)
elif filetype == "online_pdf":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
whi(f"Loading online pdf: '{path}'")
try:
loader = OnlinePDFLoader(path)
docs = loader.load()
docs = text_splitter.transform_documents(docs)
check_docs_tkn_length(docs, path)
except Exception as err:
red(f"Failed parsing online PDF {path} using only OnlinePDFLoader. Will try downloading it directly.")
response = requests.get(path)
with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as temp_file:
temp_file.write(response.content)
temp_file.flush()
meta = kwargs.copy()
meta["filetype"] = "pdf"
meta["path"] = temp_file.name
try:
return load_doc(
task=task,
debug=debug,
**meta,
)
except Exception as err:
red(f"Error when parsing online pdf from {path} downloaded to {temp_file.name}: '{err}'")
raise
elif filetype == "pdf":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
whi(f"Loading pdf: '{path}'")
assert Path(path).exists(), f"file not found: '{path}'"
docs = cached_pdf_loader(
path=path,
text_splitter=text_splitter,
splitter_chunk_size=text_splitter._chunk_size,
debug=debug
)
elif filetype == "anki":
for nk in ["anki_deck", "anki_notetype", "anki_profile", "anki_fields"]:
assert nk in kwargs, f"Missing '{nk}' in arguments from load_doc"
profile = kwargs["anki_profile"]
deck = kwargs["anki_deck"]
notetype = kwargs["anki_notetype"]
fields = kwargs["anki_fields"]
whi(f"Loading anki profile: '{profile}'")
original_db = akp.find_db(user=profile)
name = f"{profile}".replace(" ", "_")
random_val = str(uuid.uuid4()).split("-")[-1]
new_db_path = f"./.cache/anki_collection_{name.replace('/', '_')}_{random_val}"
assert not Path(new_db_path).exists(), f"{new_db_path} already existing!"
shutil.copy(original_db, new_db_path)
col = akp.Collection(path=new_db_path)
cards = col.cards.merge_notes()
cards.loc[cards['codeck']=="", 'codeck'] = cards['cdeck'][cards['codeck']==""]
cards["codeck"] = cards["codeck"].apply(lambda x: x.replace("\x1f", "::"))
cards = cards[cards["codeck"].str.startswith(deck)]
cards["nmodel"] = cards["nmodel"].apply(lambda x: x.lower())
cards = cards[cards["nmodel"].str.startswith(notetype)]
cards["mid"] = col.cards.mid.loc[cards.index]
mid2fields = akp.raw.get_mid2fields(col.db)
mod2mid = akp.raw.get_model2mid(col.db)
cards["fields_name"] = cards["mid"].apply(lambda x: mid2fields[x])
assert cards.index.tolist(), "empty dataframe!"
cards["fields_dict"] = cards.apply(
lambda x: {
k: html_to_text(cloze_stripper(v), issoup=False).strip()
for k, v in zip(x["fields_name"], x["nflds"])
if k.lower() in fields
},
axis=1)
cards["text"] = cards["fields_dict"].apply(
lambda x: "\n".join(
f"{k}: {x[k]}" for k in fields
if x[k]
))
cards = cards[~cards["text"].str.contains("[IMAGE]")]
cards["text"] = cards["text"].apply(lambda x: x.strip())
cards.drop_duplicates(subset="text", inplace=True)
cards = cards.sort_index()
docs = []
# # load each card as a single document
# for cid in cards.index:
# c = cards.loc[cid, :]
# docs.append(
# Document(
# page_content=c["text"],
# metadata={
# "anki_tags": " ".join(c["ntags"]),
# "cid": cid,
# }
# )
# )
# # turn all cards into a single wall of text then use text_splitter
# pro: fill the context window as much I possible I guess
# con: - editing cards will force re-embedding a lot of cards
# - ignores tags
chunksize = text_splitter._chunk_size
full_text = ""
spacer = "\n\n#####\n\n"
metadata = {"anki_tags": "", "anki_cid": "", "anki_deck": ""}
for cid in cards.index:
c = cards.loc[cid, :]
tags = c["ntags"]
text = ftfy.fix_text(c["text"].strip())
card_deck = c["codeck"]
assert card_deck, f"empty card_deck for cid {cid}"
if not full_text: # always add first
full_text = text
metadata = {"anki_tags": " ".join(tags), "anki_cid": cid, "anki_deck": card_deck}
continue
# if too many token, add the current chunk of text and start
# the next chunk with this card
if get_tkn_length(full_text + spacer + text) >= chunksize:
assert full_text, f"An anki card is too large for the text splitter: {text}"
assert metadata["anki_cid"], "No anki_cid in metadata"
docs.append(
Document(
page_content=full_text,
metadata=metadata,
)
)
metadata = {"anki_tags": " ".join(tags), "anki_cid": cid, "anki_deck": card_deck}
full_text = text
else:
for t in tags:
if t not in metadata["anki_tags"]:
metadata["anki_tags"] += f" {t}"
metadata["anki_cid"] += " " + cid
if card_deck not in metadata["anki_deck"]:
metadata["anki_deck"] += " " + card_deck
full_text += spacer + text
if full_text: # add latest chunk
docs.append(
Document(
page_content=full_text,
metadata=metadata,
)
)
# # set window_size to X turn each X cards into one document, overlapping
# window_size = 5
# index_list = cards.index.tolist()
# n = len(index_list)
# cards["text_concat"] = ""
# cards["tags_concat"] = ""
# cards["ntags_t"] = cards["ntags"].apply(lambda x: " ".join(x))
# for i in tqdm(range(len(index_list)), desc="combining anki cards"):
# text_concat = ""
# tags_concat = ""
# skip = 0
# for w in range(0, window_size):
# if i + window_size + skip >= n:
# s = -1 # when at the end of the list, apply the window in reverse
# # s for 'sign'
# else:
# s = 1
# if cards.at[index_list[i+w*s], "text"] in cards.at[index_list[i], "text_concat"]:
# # skipping this card because it's a duplicate
# skip += 1
# text_concat += "\n\n" + cards.at[index_list[i+(w+skip)*s], "text"]
# tags_concat += cards.at[index_list[i+(w+skip)*s], "ntags_t"]
# cards.at[index_list[i], "text_concat"] = text_concat
# cards.at[index_list[i], "tags_concat"] = tags_concat
# for cid in cards.index:
# c = cards.loc[cid, ]
# docs.append(
# Document(
# page_content=c["text_concat"].strip(),
# metadata={
# "anki_tags": " ".join(list(set(c["tags_concat"].split(" ")))),
# "cid": cid,
# }
# )
# )
assert docs, "List of loaded anki document is empty!"
path = f"Anki_profile='{profile}',deck='{deck}'notetype={notetype},fields={','.join(fields)}"
for i in range(len(docs)):
docs[i].metadata["anki_profile"] = profile
docs[i].metadata["anki_topdeck"] = deck
docs[i].metadata["anki_notetype"] = notetype
docs[i].metadata["path"] = path
# try:
# check_docs_tkn_length(docs, f"{filetype}: {profile}")
# except Exception as err:
# red(f"Number of token in anki document is surprising. Not quitting because anki can cause this: '{err}'")
# delete temporary db file
Path(new_db_path).unlink()
Path(new_db_path + "-shm").unlink(missing_ok=True)
Path(new_db_path + "-wal").unlink(missing_ok=True)
elif filetype == "string":
whi("Loading string")
content = prompt(
"Paste your text content here then press esc+enter or meta+enter:\n>",
multiline=True,
)
log.info(f"Pasted string input:\n{content}")
texts = text_splitter.split_text(content)
docs = [Document(page_content=t) for t in texts]
path = "user_string"
elif filetype == "txt":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
whi(f"Loading txt: '{path}'")
assert Path(path).exists(), f"file not found: '{path}'"
with open(path) as f:
content = f.read()
texts = text_splitter.split_text(content)
docs = [Document(page_content=t) for t in texts]
check_docs_tkn_length(docs, path)
elif filetype == "local_audio":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
assert Path(path).exists(), f"file not found: '{path}'"
cache_transcribe = loaddoc_cache.cache(transcribe, ignore=["audio_path"])
assert "whisper_lang" in kwargs, (
f"No whisper_lang argument found in kwargs but is needed "
f"to transcribe '{path}'")
assert "whisper_prompt" in kwargs, (
f"No whisper_prompt argument found in kwargs but is needed "
f"to transcribe '{path}'")
# get audio hash
with open(path, "rb") as f:
audio_hash = hasher(str(f.read()))
content = cache_transcribe(
audio_path=path,
audio_hash=audio_hash,
language=kwargs["whisper_lang"],
prompt=kwargs["whisper_prompt"],
)
texts = text_splitter.split_text(content["text"])
docs = [
Document(
page_content=t,
metadata={
"duration": content["duration"],
"language": content["language"],
"whisper_task": content["task"],
"source": path,
},
)
for t in texts]
check_docs_tkn_length(docs, path)
elif filetype == "url":
assert "path" in kwargs, "missing 'path' key in args"
path = kwargs["path"]
whi(f"Loading url: '{path}'")
# even if loading fails the title might be found so trying to keep
# the first working title across trials
if "title" not in kwargs or kwargs["title"] == "Untitled":
title = None
else:
title = kwargs["title"]
# try with playwright
try:
loader = PlaywrightURLLoader(urls=[path], remove_selectors=["header", "footer"])
docs = text_splitter.transform_documents(loader.load())
assert docs, f"Empty docs when using playwright"
if not title and "title" in docs[0].metadata:
title = docs[0].metadata["title"]
check_docs_tkn_length(docs, path)
# try with selenium firefox
except Exception as err:
red(f"Exception when using playwright to parse text: '{err}'\nUsing selenium firefox as fallback")
time.sleep(1)
try:
loader = SeleniumURLLoader(urls=[path], browser="firefox")
docs = text_splitter.transform_documents(loader.load())
assert docs, f"Empty docs when using selenium firefox"
if not title and "title" in docs[0].metadata and docs[0].metadata["title"] != "No title found.":
title = docs[0].metadata["title"]
check_docs_tkn_length(docs, path)
# try with selenium chrome
except Exception as err:
red(f"Exception when using selenium firefox to parse text: '{err}'\nUsing selenium chrome as fallback")
time.sleep(1)
try:
loader = SeleniumURLLoader(urls=[path], browser="chrome")
docs = text_splitter.transform_documents(loader.load())
assert docs, f"Empty docs when using selenium chrome"
if not title and "title" in docs[0].metadata and docs[0].metadata["title"] != "No title found.":
title = docs[0].metadata["title"]
check_docs_tkn_length(docs, path)
# try with goose
except Exception as err:
red(f"Exception when using selenium chrome to parse text: '{err}'\nUsing goose as fallback")
time.sleep(1)
try:
g = Goose()
article = g.extract(url=path)
text = article.cleaned_text
texts = text_splitter.split_text(text)
docs = [Document(page_content=t) for t in texts]
assert docs, f"Empty docs when using goose"
if not title:
if "title" in docs[0].metadata and docs[0].metadata["title"]:
title = docs[0].metadata["title"]
elif article.title:
title = article.title
check_docs_tkn_length(docs, path)
# try with html
except Exception as err:
red(f"Exception when using goose to parse text: '{err}'\nUsing html as fallback")
time.sleep(1)
loader = WebBaseLoader(path, raise_for_status=True)
docs = text_splitter.transform_documents(loader.load())
assert docs, f"Empty docs when using html"
if not title and "title" in docs[0].metadata and docs[0].metadata["title"]:
title = docs[0].metadata["title"]
check_docs_tkn_length(docs, path)
# last resort, try to get the title from the most basic loader
if not title:
title = get_url_title(path)
# store the title as metadata if missing
if title:
for d in docs:
if "title" not in d.metadata or not d.metadata["title"]:
d.metadata["title"] = title
else:
if d.metadata["title"] != title:
d.metadata["title"] = f"{title} - {d.metadata['title']}"
else:
raise Exception(red(f"Unsupported filetype: '{filetype}'"))
# add and format metadata
total_reading_length = None
for i in range(len(docs)):
# if html, parse it
soup = BeautifulSoup(docs[i].page_content, "html.parser")
if bool(soup.find()):
docs[i].page_content = html_to_text(soup, issoup=True)
# fix text just in case
docs[i].page_content = ftfy.fix_text(docs[i].page_content)
if "hash" not in docs[i].metadata or not docs[i].metadata["hash"]:
docs[i].metadata["hash"] = hasher(docs[i].page_content)
if "Author" in docs[i].metadata:
docs[i].metadata["author"] = docs[i].metadata["Author"]
del docs[i].metadata["Author"]
if "authors" in docs[i].metadata:
docs[i].metadata["author"] = docs[i].metadata["authors"]
del docs[i].metadata["authors"]
if "Authors" in docs[i].metadata:
docs[i].metadata["author"] = docs[i].metadata["Authors"]
del docs[i].metadata["Authors"]
if "filetype" not in docs[i].metadata:
docs[i].metadata["filetype"] = filetype
if "path" not in docs[i].metadata and "path" in locals():
docs[i].metadata["path"] = path
if "subitem_link" in kwargs and "subitem_link" not in docs[i].metadata:
docs[i].metadata["subitem_link"] = kwargs["subitem_link"]
if "title" not in docs[i].metadata or docs[i].metadata["title"] == "Untitled":
if "title" in kwargs and kwargs["title"] and kwargs["title"] != "Untitled":
docs[i].metadata["title"] = kwargs["title"]
elif "http" in docs[i].metadata["path"].lower():
docs[i].metadata["title"] = get_url_title(docs[i].metadata["path"])
if not docs[i].metadata["title"]:
docs[i].metadata["title"] = "Untitled"
red(f"Could not get title from {path}")
if "title" in kwargs and kwargs["title"] != docs[i].metadata["title"] and kwargs["title"] not in docs[i].metadata["title"]:
docs[i].metadata["title"] += " - " + kwargs["title"]
if "playlist_title" in kwargs:
docs[i].metadata["title"] = kwargs["playlist_title"] + " - " + docs[i].metadata["title"]
if "docs_reading_time" not in docs[i].metadata:
if not total_reading_length:
total_reading_length = sum([len(d.page_content) for d in docs]) / average_word_length / wpm
assert total_reading_length > 0.5, f"Failing doc: total reading length is suspiciously low for {docs[i].metadata}"
docs[i].metadata["docs_reading_time"] = total_reading_length
if "source" not in docs[i].metadata:
if "path" in docs[i].metadata:
docs[i].metadata["source"] = docs[i].metadata["path"]
else:
docs[i].metadata["source"] = docs[i].metadata["title"]
assert docs, "empty list of loaded documents!"
docs = [d for d in docs if d.page_content]
assert docs, "empty list of loaded documents after removing empty docs!"
return docs
def load_embeddings(embed_model, loadfrom, saveas, debug, loaded_docs, kwargs):
"""loads embeddings for each document"""
if embed_model == "openai":
red("Using openai embedding model")
assert Path("API_KEY.txt").exists(), "No API_KEY.txt found"
embeddings = OpenAIEmbeddings(
openai_api_key = str(Path("API_KEY.txt").read_text()).strip()
)
else:
embeddings = RollingWindowEmbeddings(
model_name=embed_model,
encode_kwargs={
"batch_size": 1,
"show_progress_bar": True,
"normalize_embeddings": True,
},
)
lfs = LocalFileStore(f".cache/embeddings/{embed_model}")
cache_content = list(lfs.yield_keys())
red(f"Found {len(cache_content)} embeddings in local cache")
# cached_embeddings = embeddings
cached_embeddings = CacheBackedEmbeddings.from_bytes_store(
embeddings,
lfs,
namespace=embed_model,
)
# reload passed embeddings
if loadfrom:
red("Reloading documents and embeddings from file")
path = Path(loadfrom)
assert path.exists(), f"file not found at '{path}'"
db = FAISS.load_local(str(path), cached_embeddings)
return db, cached_embeddings
red("\nLoading embeddings.")
docs = loaded_docs
if len(docs) >= 50:
docs = sorted(docs, key=lambda x: random.random())
embeddings_cache = Path(f".cache/faiss_embeddings/{embed_model}")
embeddings_cache.mkdir(exist_ok=True)
t = time.time()
whi(f"Creating FAISS index for {len(docs)} documents")
in_cache = [p for p in embeddings_cache.iterdir()]
whi(f"Found {len(in_cache)} embeddings in cache")
db = None
to_embed = []
# load previous faiss index from cache
for doc in tqdm(docs, desc="Loading embeddings from cache"):
fi = embeddings_cache / str(doc.metadata["hash"] + ".faiss_index")
if fi.exists():
temp = FAISS.load_local(fi, cached_embeddings)
if not db and temp:
db = temp
else:
try:
db.merge_from(temp)
except Exception as err:
red(f"Error when loading cache from {fi}: {err}\nDeleting {fi}")
[p.unlink() for p in fi.iterdir()]
fi.rmdir()
else:
to_embed.append(doc)
whi(f"Docs left to embed: {len(to_embed)}")
# check price of embedding
full_tkn = sum([get_tkn_length(doc.page_content) for doc in to_embed])
red(f"Total number of tokens in documents (not checking if already present in cache): '{full_tkn}'")
if embed_model == "openai":
dol_price = full_tkn * 0.0001 / 1000
red(f"With OpenAI embeddings, the total cost for all tokens is ${dol_price:.4f}")
if dol_price > 1:
ans = input(f"Do you confirm you are okay to pay this? (y/n)\n>")
if ans.lower() not in ["y", "yes"]:
red("Quitting.")
raise SystemExit()
# create a faiss index for batch of documents, then save them
# as 1 document faiss index to cache
if to_embed:
batch_size = 1000
batches = [
[i * batch_size, (i + 1) * batch_size]
for i in range(len(to_embed) // batch_size + 1)
]
pbar = tqdm(total=len(to_embed), desc="Saving to cache")
for batch in tqdm(batches, desc="Embedding by batch"):
temp = FAISS.from_documents(
to_embed[batch[0]:batch[1]],
cached_embeddings,
normalize_L2=True
)
recursive_faiss_saver(temp, to_embed[batch[0]:batch[1]], embeddings_cache, 0, pbar)
if not db:
db = temp
else:
db.merge_from(temp)
# to get vectors from a faiss index
# vecs = faiss.rev_swig_ptr(temp.index.get_xb(), len(to_embed) * temp.index.d).reshape(len(to_embed), temp.index.d)
whi(f"Done creating index in {time.time()-t:.2f}s")
# saving embeddings
if saveas:
db.save_local(saveas)
return db, cached_embeddings
def recursive_faiss_saver(index, documents, path, depth, pbar):
"""split the faiss index by hand into 1 docstore index and save
it to cache. To split it, as the copy.deepcopy is long we
use a recursive call to only copy fewer times the full index"""
doc_ids = [k for k in index.docstore._dict.keys()]
assert doc_ids, "unexpected empty doc_ids"
n = 10
threads = []
le = len(doc_ids)
nn = len(doc_ids) // n
if depth:
spacer = " " * depth * 2
else:
spacer = ""
info = f"(n={n}, nn={nn}, le={le}, d={depth})"
if nn > n: # more than 1 order of magnitude
for i in range(len(doc_ids) // nn + 1):
whi(f"{spacer}Creating larger subindex #{i} {info}")
sub_index = copy.deepcopy(index)
sub_docids = doc_ids[i * nn: (i + 1) * nn]
to_del = [d for d in doc_ids if d not in sub_docids]
if not to_del or not sub_docids:
continue
sub_index.delete(to_del)
threads.extend(
recursive_faiss_saver(
sub_index, documents[i * nn:(i + 1) * nn], path, depth + 1, pbar)
)
elif len(doc_ids) > n:
for i in range(len(doc_ids) // n + 1):
whi(f"{spacer}Creating subindex #{i} {info}")
sub_index = copy.deepcopy(index)
sub_docids = doc_ids[i * n: (i + 1) * n]
to_del = [d for d in doc_ids if d not in sub_docids]
if not to_del or not sub_docids:
continue
sub_index.delete(to_del)
threads.extend(
recursive_faiss_saver(
sub_index, documents[i * n:(i + 1) * n], path, depth + 1, pbar)
)
while sum([t.is_alive() for t in threads]) > 3 * n:
time.sleep(0.1)
else:
for i, did in enumerate(doc_ids):
whi(f"{spacer}Saving {documents[i].metadata['hash']}.faiss_index {info}")
to_del = [d for d in doc_ids if d != did]
if not to_del:
continue
file = (path / str(documents[i].metadata["hash"] + ".faiss_index"))
assert not file.exists(), "cache file already exists!"
thread = threading.Thread(
target=save_one_index,
args=(copy.deepcopy(index), to_del, file, pbar),
)
thread.start()
threads.append(thread)
return threads
[t.join() for t in threads]
return []
def save_one_index(index, to_del, file, pbar):
index.delete(to_del)
index.save_local(file)
pbar.update(1)
@loaddoc_cache.cache
def load_youtube_playlist(playlist_url):
with youtube_dl.YoutubeDL({"quiet": False}) as ydl:
try:
loaded = ydl.extract_info(playlist_url, download=False)
except (KeyError, DownloadError, ExtractorError) as e:
raise Exception(red(f"ERROR: Youtube playlist link skipped because : error during information \
extraction from {playlist_url} : {e}"))
return loaded
@loaddoc_cache.cache(ignore=["loader"])
def cached_yt_loader(loader, path, add_video_info, language, translation):
yel(f"Not using cache for youtube {path}")
docs = loader(
path,
add_video_info=add_video_info,
language=language,
translation=translation,
).load()
return docs
@loaddoc_cache.cache(ignore=["text_splitter"])
def cached_pdf_loader(path, text_splitter, splitter_chunk_size, debug):
assert splitter_chunk_size == text_splitter._chunk_size, "unexpected error"
loaders = {
"PDFMiner": PDFMinerLoader,
"PyPDFLoader": PyPDFLoader,
# "Unstructured": UnstructuredPDFLoader,
"PyPDFium2": PyPDFium2Loader,
"PyMuPDF": PyMuPDFLoader,
# "PdfPlumber": PDFPlumberLoader,
}
loaded_docs = {}
# using language detection to keep the parsing with the highest lang
# probability
probs = {}
for loader_name, loader_func in loaders.items():
try:
if debug:
red(f"Trying to parse {path} using {loader_name}")
loader = loader_func(path)
content = loader.load()
content = "\n".join([d.page_content.strip() for d in content])
# remove empty lines. frequent in pdfs
content = re.sub(emptyline_regex, '', content)
content = re.sub(emptyline2_regex, '\n', content)
content = re.sub(linebreak_before_letter, r'\1', content)
texts = text_splitter.split_text(content)
docs = [Document(page_content=t) for t in texts]
prob = check_docs_tkn_length(docs, path)
probs[loader_name] = prob
loaded_docs[loader_name] = docs
except Exception as err:
red(f"Error when parsing '{path}' with {loader_name}: {err}")
# no loader worked, exiting
if not loaded_docs:
raise Exception(f"No pdf parser worked for {path}")
max_prob = max([v for v in probs.values()])
if debug:
red(f"Language probability after parsing {path}: {probs}")
return loaded_docs[[name for name in probs if probs[name] == max_prob][0]]
def create_hyde_retriever(
query,
filetype,
llm,
top_k,
embed_model,
embeddings,
embeddings_engine,
kwargs,
loadfrom,
debug
):
"""
create a retriever only for the subset of documents from the
loaded_embeddings that were found using HyDE technique (i.e. asking
the llm to create a hypothetical answer and use the embedding of this
answer to search similar content)
The code is a little strange because it actually reloads only a portion
of the embeddings from cache if possible.
https://python.langchain.com/docs/use_cases/question_answering/how_to/hyde
"""
HyDE_template = """Please imagine the answer to the user's question about a document:
Document type: [[filetype]]
User question: {question}
Answer:""".replace("[[filetype]]", filetype)
hyde_prompt = PromptTemplate(
input_variables=["question"],
template=HyDE_template,
)
hyde_chain = LLMChain(
llm=llm,
prompt=hyde_prompt,
)
hyde_embeddings = HypotheticalDocumentEmbedder(
llm_chain=hyde_chain,
base_embeddings=embeddings_engine,
)
hyde_vector = hyde_embeddings.embed_query(query)
hyde_doc = embeddings.similarity_search_by_vector(
embedding=hyde_vector,
k=top_k,
)
vecstore, _ = load_embeddings(
embed_model=embed_model,
loadfrom = loadfrom,
saveas=None,
debug=debug,
loaded_docs=hyde_doc,
kwargs=kwargs,
)
retriever = vecstore.as_retriever(
search_kwargs={"k": top_k, "distance_metric": "cos"}
)
return retriever
def create_parent_retriever(
task,
loaded_embeddings,
loaded_docs,
):
"https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever"
csp = get_splitter(task)
psp = get_splitter(task)
psp._chunk_size *= 4
parent = ParentDocumentRetriever(
vectorstore=loaded_embeddings,
docstore=LocalFileStore(".cache/parent_retriever"),
child_splitter=csp,
parent_splitter=psp,
)
parent.add_documents(loaded_docs)
return parent
| [
"[[filetype]]",
"Please imagine the answer to the user's question about a document:\n Document type: [[filetype]]\n User question: {question}\n Answer:",
"question"
] |
2024-01-10 | thiswillbeyourgithub/DocToolsLLM | utils~tasks.py | from tqdm import tqdm
from langchain import LLMChain
from langchain.chains.summarize import load_summarize_chain
from langchain import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from utils.prompts import (
summary_rules,
system_summary_template, human_summary_template,
# checksummary_rules,
# system_checksummary_template, human_checksummary_template,
)
from utils.logger import whi, yel, red
# prompts to summarize
summarize_prompt = PromptTemplate(
template=system_summary_template + "\n\n" + human_summary_template,
input_variables=["text", "previous_summary", "metadata", "rules"],
)
chatgpt_summary_messages = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(system_summary_template),
HumanMessagePromptTemplate.from_template(human_summary_template),
],
)
# # prompt to check the summarization quality
# checksummary_prompt = PromptTemplate(
# template=system_checksummary_template + "\n\n" + human_checksummary_template,
# input_variables=["summary_to_check", "rules"],
# )
# chatgpt_checksummary_messages = ChatPromptTemplate.from_messages(
# [
# SystemMessagePromptTemplate.from_template(system_checksummary_template),
# HumanMessagePromptTemplate.from_template(human_checksummary_template),
# ],
# )
def do_summarize(
docs,
metadata,
language,
model,
llm,
callback,
verbose,
):
"summarize each chunk of a long document"
summaries = []
previous_summary = ""
summarize_chain = load_summarize_chain(
llm,
chain_type="stuff",
prompt=chatgpt_summary_messages if model == "openai" else summarize_prompt,
verbose=verbose,
)
# checksumm_chain = LLMChain(
# llm=llm,
# prompt=chatgpt_checksummary_messages if model == "openai" else checksummary_prompt,
# verbose=verbose,
# )
assert "[PROGRESS]" in metadata
with callback() as cb:
for ird, rd in tqdm(enumerate(docs), desc="Summarising splits"):
fixed_index = f"{ird + 1}/{len(docs)}"
out = summarize_chain(
{
"input_documents": [rd],
"metadata": metadata.replace("[PROGRESS]", fixed_index),
"rules": summary_rules.replace("LANGUAGE", language),
"previous_summary": previous_summary,
},
return_only_outputs=False,
)
summaries.append(out["output_text"].rstrip())
# finding the end of the summary to give as context to the next one
lines = "\n".join(summaries).splitlines()
end_of_latest_summary = []
# add the lines of the previous summary in reverse order
# and stop when there is no indentation
for line in lines[::-1]:
end_of_latest_summary.insert(0, line.rstrip())
if not line.startswith("\t"):
break
end_of_latest_summary = "\n".join(end_of_latest_summary)
previous_summary = f"Here's the end of the summary of the previous section. Take this into consideration to avoid repeating information (there is a huge overlap between both sections). If relevant, you can start with the same indentation.\n'''\{end_of_latest_summary}\n'''"
if metadata:
previous_summary = "\n\n" + previous_summary
# for each summary, remove any empty lines:
for i, s in enumerate(summaries):
splits = s.split("\n")
new_sum = "\n".join(
[ss.rstrip()
for ss in splits
if any(char.isalpha() for char in ss)
]
).rstrip()
if new_sum:
summaries[i] = new_sum
else:
summaries[i] = None
summaries = [s for s in summaries if s]
# combine summaries as one string separated by markdown separator
n = len(summaries)
if n > 1:
outtext = f"- Chunk 1/{n}\n"
for i, s in enumerate(summaries):
outtext += s + "\n"
if n > 1 and s != summaries[-1]:
outtext += f"- ---\n- Chunk {i + 2}/{n}\n"
else:
outtext = "\n".join(summaries)
return outtext.rstrip(), n, cb.total_tokens, cb.total_cost
| [
"\n\n",
"previous_summary",
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | Frorozcoloa/langchain_test | users.py | from typing import List, Dict
from langchain.chat_models import ChatOpenAI
import json
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from config import codegpt_api_key, code_gpt_agent_id, codegpt_api_base
from utils import text2json, save_csv
def get_tamplate() -> ChatPromptTemplate:
"""Returns a ChatPromptTemplate object with the following template"""
template = "You are a helpful assistant. Your task is to analyze the users of an ecommerce."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = """
Please, identify the main topics mentioned in these users profile.
Return a list of 3-5 topics.
Output is a JSON list with the following format
[
{{"topic_name": "<topic1>", "topic_description": "<topic_description1>"}},
{{"topic_name": "<topic2>", "topic_description": "<topic_description2>"}},
...
]
Users profile:
{users_profile}
"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
return chat_prompt
def get_model() -> ChatOpenAI:
# Create a ChatOpenAI object with the retrieved API key, API base URL, and agent ID
llm = ChatOpenAI(
openai_api_key=codegpt_api_key,
openai_api_base=codegpt_api_base,
model=code_gpt_agent_id,
)
return llm
# Create a list of messages to send to the ChatOpenAI object
def run(text: str) -> List[Dict]:
"""Returns a list of topics, given a description of a product"""
llm = get_model()
chat_prompt = get_tamplate()
messages = chat_prompt.format_prompt(users_profile=text)
response = llm(messages.to_messages())
list_desc = text2json(response.content)
return list_desc
def example():
text = "I love biking, hiking and walking. I like to get to know new towns, talk to people. I hate when plans don't happen, I'm very strict with times. I love to eat, I always like to go to good restaurants and try the food, I don't like to see many dishes and I hate the noise, I like the countryside and live there."
list_desc = run(text)
save_csv(list_desc, "users_description")
if __name__ == "__main__":
example()
| [
"<topic_description2>",
"\n Please, identify the main topics mentioned in these users profile. \n\n Return a list of 3-5 topics. \n Output is a JSON list with the following format\n [\n {{\"topic_name\": \"<topic1>\", \"topic_description\": \"<topic_description1>\"}}, \n {{\"topic_name\": \"<topic2>\", \"topic_description\": \"<topic_description2>\"}},\n ...\n ]\n Users profile:\n {users_profile}\n ",
"You are a helpful assistant. Your task is to analyze the users of an ecommerce.",
"<topic_description1>",
"[PLACEHOLDER, PLACEHOLDER]",
"topic_name",
"topic_description"
] |
2024-01-10 | Frorozcoloa/langchain_test | products.py | from typing import List, Dict
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
import os
import json
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from config import codegpt_api_key, code_gpt_agent_id, codegpt_api_base
from utils import text2json, save_csv
def get_tamplate() -> ChatPromptTemplate:
"""Returns a ChatPromptTemplate object with the following template:"""
template = "You are a helpful assistant. Your task is to analyze the products of an e-commerce."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = """
Identify the primary subject discussed in the product description. Categorize the product and provide a portrayal of the target audience.
Return a list of 3-6 classes.
Output is a JSON list with the following format
[
{{"type_classes": "<class1>", "class_description": "<class_description1>"}},
{{"type_classes": "<class3>", "class_description": "<class_description2>"}},
...
]
description product:
{description_product}
"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
return chat_prompt
def get_model() -> ChatOpenAI:
"""Returns a ChatOpenAI"""
llm = ChatOpenAI(
openai_api_key=codegpt_api_key,
openai_api_base=codegpt_api_base,
model=code_gpt_agent_id,
)
return llm
def run(customer_reviews: str) -> List[Dict]:
"""Returns a list of topics, given a description of a product"""
llm = get_model()
chat_prompt = get_tamplate()
message = chat_prompt.format_prompt(description_product=customer_reviews)
response = llm(message.to_messages())
values = text2json(response.content)
return values
def example():
"""Example of use of the function get_topics"""
description_product = """
Small 10-liter hiking backpack nh100 quechua black, BENEFITS
Carrying comfort, COMFORT CARRYING COMFORT Spalder and padded straps
1 main compartment with double zipper
VOLUME
Volume: 10 liters | Weight: 145 g | Dimensions: 39 x 21 x 12 cm.friction resistance
FRICTION RESISTANCE
Durable, abrasion-resistant materials and joints. 10-year warranty. Warranty 10 years.Ventilation
VENTILATION
Simple to use backrest
EASE OF USE
Easy access to the external pocket by placing the backpack in a horizontal position while hiking.
"""
topics = run(description_product)
save_csv(topics, "products_classes")
if __name__ == "__main__":
example()
| [
"<class_description1>",
"class_description",
"\n Identify the primary subject discussed in the product description. Categorize the product and provide a portrayal of the target audience. \n\n Return a list of 3-6 classes. \n Output is a JSON list with the following format\n [\n {{\"type_classes\": \"<class1>\", \"class_description\": \"<class_description1>\"}}, \n {{\"type_classes\": \"<class3>\", \"class_description\": \"<class_description2>\"}},\n ...\n ]\n description product:\n {description_product}\n ",
"type_classes",
"You are a helpful assistant. Your task is to analyze the products of an e-commerce.",
"<class_description2>",
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | Frorozcoloa/langchain_test | social_media.py | from openai import OpenAI
import base64
from pathlib import Path
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain.docstore.document import Document
from langchain.chat_models import ChatOpenAI
from tqdm import tqdm
from typing import List, Dict
from config import openai_api_key, codegpt_api_key, code_gpt_agent_id, codegpt_api_base
from utils import text2json, save_csv
model = OpenAI(api_key=openai_api_key)
def image_b64(image_path):
with open(image_path, "rb") as f:
return base64.b64encode(f.read()).decode()
def look(image_path, prompt="Describe this image"):
b64_image = image_b64(image_path)
response = model.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{b64_image}",
},
{
"type": "text",
"text": prompt,
}
]
}
],
max_tokens=1024,
)
message = response.choices[0].message
return message.content
def read_all_images():
images_paths = Path("images").iterdir()
description = {}
for image_path in tqdm(images_paths):
if image_path.is_dir():
read_images = image_path.glob("*.jpg")
for image in read_images:
describe = look(image)
doc = Document(page_content=describe, metadata={"source": image_path.name})
if description.get(image_path.name) is None:
description[image_path.name] = [doc]
else:
description[image_path.name].append(doc)
return description
def get_tamplate():
template = "You are a helpful assistant. Your task is to analyze to draw common topic from the given descriptions of the users"
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = """
Please, identify the main topics mentioned in the description of the users.
Return a list of 3-5 topics.
Output is a JSON list with the following format
[
{{"topic_name": "<topic1>", "topic_description": "<topic_description1>"}},
{{"topic_name": "<topic2>", "topic_description": "<topic_description2>"}},
...
]
user_1:
{user_1_description}
user_2:
{user_2_description}
"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
return chat_prompt
def get_model() -> ChatOpenAI:
# Create a ChatOpenAI object with the retrieved API key, API base URL, and agent ID
llm = ChatOpenAI(
openai_api_key=codegpt_api_key,
openai_api_base=codegpt_api_base,
model=code_gpt_agent_id,
)
return llm
# Create a list of messages to send to the ChatOpenAI object
def run(users_description: Dict[str,Document]) -> List[Dict]:
"""Returns a list of topics, given a description of a product"""
llm = get_model()
chat_prompt = get_tamplate()
messages = chat_prompt.format_prompt(user_1_description = users_description["user_1"], user_2_description = users_description["user_2"])
response = llm(messages.to_messages())
list_desc = text2json(response.content)
return list_desc
if __name__ == "__main__":
description = read_all_images()
topics = run(description)
save_csv(topics, "Anaslysis_social_meida.csv") | [
"\n Please, identify the main topics mentioned in the description of the users.\n\n Return a list of 3-5 topics. \n Output is a JSON list with the following format\n [\n {{\"topic_name\": \"<topic1>\", \"topic_description\": \"<topic_description1>\"}}, \n {{\"topic_name\": \"<topic2>\", \"topic_description\": \"<topic_description2>\"}},\n ...\n ]\n user_1:\n {user_1_description}\n user_2:\n {user_2_description}\n ",
"<topic_description2>",
"<topic_description1>",
"[{'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}, {'type': 'text', 'text': PLACEHOLDER}]",
"[PLACEHOLDER, PLACEHOLDER]",
"topic_name",
"topic_description",
"You are a helpful assistant. Your task is to analyze to draw common topic from the given descriptions of the users"
] |
2024-01-10 | Frorozcoloa/langchain_test | trends.py | from googlesearch import search
from typing import List, Dict
from newspaper import Article
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from config import codegpt_api_key, code_gpt_agent_id, codegpt_api_base
from utils import text2json, save_csv
def get_template() -> List[PromptTemplate]:
"""Returns a list of PromptTemplate objects with the following templates:"""
def get_topic():
"""Returns a PromptTemplate object with the following template:"""
template = """
Given the following docs about a sports e-commerce, conduct an analysis of potential future trends.
return a list of 10 topics.
Output is a JSON list with the following format
[
{{"product_decription": "<product_decription>", "product_to_sell": "<product_to_sell1>"}},}},
{{"product_decription": "<product_decription>", "product_to_sell": "<product_to_sell2>"}},}},
...
]
{docs}
"""
prompt = PromptTemplate(template=template, input_variables=["news"])
return prompt
def get_summary():
"""Returns a PromptTemplate object with the following template:"""
template = """
The following is a set of documents:
{docs}
Based on this list of docs, please identify the main themes
Helpful Answer:
"""
prompt = PromptTemplate(template=template, input_variables=["news"])
return prompt
template_summary = get_summary()
template_topic = get_topic()
return [template_summary, template_topic]
def get_model(prompt_summary, prompt_topic) -> List[LLMChain]:
"""Returns a list of LLMChain objects"""
llm = ChatOpenAI(
openai_api_key=codegpt_api_key,
openai_api_base=codegpt_api_base,
model=code_gpt_agent_id,
)
def get_chain(llm: ChatOpenAI, template: PromptTemplate):
"""Returns a LLMChain object"""
llm_chain = LLMChain(prompt=template, llm=llm)
return llm_chain
llm_summary = get_chain(llm, prompt_summary)
llm_topic = get_chain(llm, prompt_topic)
return [llm_summary, llm_topic]
def get_articles_trends(query: str = "Sports market trends", num_results: int = 50):
"""Found in google the articles related to the query and return a list of Document objects"""
list_text = []
for url in search(query, num_results=num_results):
article = Article(url)
article.download()
article.parse()
doc = Document(page_content=article.text, metadata={"source": url})
list_text.append(doc)
return list_text
def get_map_reduce(llm_summary: LLMChain):
"""Returns a summary of the list of documents"""
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_summary, document_variable_name="docs"
)
reduce_documents_chain = ReduceDocumentsChain(
# This is final chain that is called.
combine_documents_chain=combine_documents_chain,
# If documents exceed context for `StuffDocumentsChain`
collapse_documents_chain=combine_documents_chain,
# The maximum number of tokens to group documents into.
token_max=4000,
)
map_reduce_chain = MapReduceDocumentsChain(
# Map chain
llm_chain=llm_summary,
# Reduce chain
reduce_documents_chain=reduce_documents_chain,
# The variable name in the llm_chain to put the documents in
document_variable_name="docs",
# Return the results of the map steps in the output
return_intermediate_steps=False,
)
return map_reduce_chain
def get_splitter():
"""Returns a CharacterTextSplitter object"""
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=1000, chunk_overlap=0
)
return text_splitter
def get_summary_trends(llm_summary: LLMChain, list_docs: List[Document]) -> str:
"""Returns a summary of the list of documents"""
map_reduce_chain = get_map_reduce(llm_summary)
text_splitter = get_splitter()
split_docs = text_splitter.split_documents(list_docs)
text_summary = map_reduce_chain.run(split_docs)
return text_summary
def get_topics(llm_topic: LLMChain, text_summary: str) -> str:
"""Returns a list of topics"""
raw_topics = llm_topic.run(text_summary)
topics = text2json(raw_topics)
return topics
def get_analysis_trends(list_docs: list) -> List[Dict]:
"""Returns a list of topics, given a description of a product"""
llm_summary, llm_topic = get_model(*get_template())
text_summary = get_summary_trends(llm_summary, list_docs)
topics = get_topics(llm_topic, text_summary)
save_csv(topics, "trends")
return topics
def example():
"""Example of use"""
list_docs = get_articles_trends()
topics = get_analysis_trends(list_docs)
print(topics)
if __name__ == "__main__":
example()
| [
"\n Given the following docs about a sports e-commerce, conduct an analysis of potential future trends.\n return a list of 10 topics.\n Output is a JSON list with the following format\n [\n {{\"product_decription\": \"<product_decription>\", \"product_to_sell\": \"<product_to_sell1>\"}},}}, \n {{\"product_decription\": \"<product_decription>\", \"product_to_sell\": \"<product_to_sell2>\"}},}},\n ...\n ]\n {docs}\n ",
"news",
"\n The following is a set of documents:\n\n {docs}\n\n Based on this list of docs, please identify the main themes \n\n Helpful Answer:\n "
] |
2024-01-10 | TrentBrick/RewardConditionedUDRL | utils~buffer.py | # pylint: disable=no-member
import numpy as np
import torch
import random
import bisect
def combined_shape(length, shape=None):
# taken from openAI spinning up.
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
class SortedBuffer:
"""
Buffer that efficiently remains sorted.
"""
def __init__(self, obs_dim, act_dim, size,
use_td_lambda_buf=False ):
self.obs_buf = None
self.obs2_buf= None
self.act_buf= None
self.discounted_rew_to_go_buf= None
self.cum_rew= None
self.horizon= None
self.rollout_length = None
self.buffer_dict = dict(obs=self.obs_buf, obs2=self.obs2_buf,
act=self.act_buf,
discounted_rew_to_go=self.discounted_rew_to_go_buf,
cum_rew=self.cum_rew, horizon=self.horizon,
rollout_length=self.rollout_length
)
self.use_td_lambda_buf = use_td_lambda_buf
if self.use_td_lambda_buf:
self.rollout_end_ind_buf = None
self.raw_rew_buf = None
self.buffer_dict['rollout_end_ind'] = self.rollout_end_ind_buf
self.buffer_dict['raw_rew'] = self.raw_rew_buf
self.size, self.max_size = 0, size
self.total_num_steps_added = 0
def add_rollouts(self, list_of_rollout_dicts):
# sorted in ascending order. largest values at the back.
for rollout in list_of_rollout_dicts:
# dont bother adding rollouts that are worse than the worst in the buffer.
# but still count them to the overall number of rollouts seen.
len_rollout = len(rollout['terminal'])
self.total_num_steps_added += len_rollout
if self.size == self.max_size and rollout['cum_rew'][0] <= self.buffer_dict['cum_rew'][-1]:
continue
self.size = min(self.size+len_rollout, self.max_size)
if self.buffer_dict['obs'] is not None:
# find where everything from this rollout should be inserted into
# each of the numpy buffers. Uses the cumulative/terminal rewards
# minus so that highest values are at the front.
sort_ind = np.searchsorted(-self.buffer_dict['cum_rew'], -rollout['cum_rew'][0] )
end_ind = len_rollout+sort_ind
else:
end_ind = len_rollout
if self.use_td_lambda_buf:
# will be appended and treated like everything else.
end_ind = np.repeat(end_ind, len_rollout)
rollout['rollout_end_ind'] = end_ind
for key in self.buffer_dict.keys():
# NOTE: assumes that buffer and rollout use the same keys!
# needed at init!
if self.buffer_dict[key] is None:
self.buffer_dict[key] = rollout[key]
else:
self.buffer_dict[key] = np.insert(self.buffer_dict[key], sort_ind, rollout[key], axis=0)
if key == 'rollout_end_ind':
self.buffer_dict[key][end_ind[0]:] = self.buffer_dict[key][end_ind[0]:]+len_rollout
if self.size >= self.max_size:
# buffer is full. Need to trim!
# this will have a bias in that it will favour
# the longer horizons at the end of the training data
# but it shouldnt make a major diff.
self.buffer_dict[key] = self.buffer_dict[key][:self.max_size]
def retrieve_path(self, start_index):
end_index = self.buffer_dict['rollout_end_ind'][start_index]
if end_index<= start_index:
print("for sorted buffer shouldnt have looping here!")
# we have looping
obs = np.concatenate( [self.buffer_dict['obs'][start_index:], self.buffer_dict['obs'][:end_index]], axis=0)
rew = np.concatenate( [self.buffer_dict['raw_rew'][start_index:], self.buffer_dict['raw_rew'][:end_index]], axis=0)
else:
obs = self.buffer_dict['obs'][start_index:end_index]
rew = self.buffer_dict['raw_rew'][start_index:end_index]
return torch.as_tensor(obs, dtype=torch.float32), torch.as_tensor(rew, dtype=torch.float32)
def get_desires(self, last_few = 75):
"""
This function calculates the new desired reward and new desired horizon based on the replay buffer.
New desired horizon is calculted by the mean length of the best last X episodes.
New desired reward is sampled from a uniform distribution given the mean and the std calculated from the last best X performances.
where X is the hyperparameter last_few.
"""
# it is the first occurence of each cumulative ind.
unique_cum_rews, unique_cum_inds = np.unique(-self.buffer_dict['cum_rew'], return_index=True)
#unique returns a sorted dictionary need to reverse it.
unique_cum_rews, unique_cum_inds = -unique_cum_rews[:last_few], unique_cum_inds[:last_few]
#The exploratory desired horizon dh0 is set to the mean of the lengths of the selected episodes
new_desired_horizon = round( self.buffer_dict['rollout_length'][unique_cum_inds].mean() )
# from these returns calc the mean and std
mean_returns = np.mean(unique_cum_rews)
std_returns = np.std(unique_cum_rews)
return mean_returns, std_returns, new_desired_horizon
def __getitem__(self, idx):
# turn this into a random value!
#rand_ind = np.random.randint(0,self.size) # up to current max size.
return self.sample_batch(idxs=idx)
def __len__(self):
return self.size #self.num_batches_per_epoch
def sample_batch(self, idxs=None, batch_size=256):
if idxs is None:
idxs = np.random.randint(0, self.size, size=batch_size)
return {key:torch.as_tensor(arr[idxs],dtype=torch.float32) for key, arr in self.buffer_dict.items()}
class RingBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
# Taken from OpenAI spinning up.
"""
def __init__(self, obs_dim, act_dim, size, use_td_lambda_buf=False):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
if act_dim==1:
self.act_buf = np.zeros(size, dtype=np.float32)
else:
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.discounted_rew_to_go_buf = np.zeros(size, dtype=np.float32)
self.cum_rew = np.zeros(size, dtype=np.float32)
self.horizon_buf = np.zeros(size, dtype=np.float32)
self.buf_list = [self.obs_buf,
self.obs2_buf,
self.discounted_rew_to_go_buf,
self.act_buf,
self.cum_rew,
self.horizon_buf,
]
self.value_names = ['obs',
'obs2',
'discounted_rew_to_go',
'act',
'cum_rew',
'horizon',
]
self.use_td_lambda_buf = use_td_lambda_buf
if self.use_td_lambda_buf:
self.rollout_end_ind_buf = np.zeros(size, dtype=np.int32)
self.raw_rew_buf = np.zeros(size, dtype=np.float32)
self.buf_list.append(self.raw_rew_buf)
self.value_names.append('raw_rew')
self.ptr, self.size, self.max_size = 0, 0, size
self.total_num_steps_added = 0
def retrieve_path(self, start_index):
end_index = self.rollout_end_ind_buf[start_index]
if end_index<= start_index:
# we have looping
obs = np.concatenate( [self.obs_buf[start_index:], self.obs_buf[:end_index]], axis=0)
rew = np.concatenate( [self.raw_rew_buf[start_index:], self.raw_rew_buf[:end_index]], axis=0)
else:
obs = self.obs_buf[start_index:end_index]
rew = self.raw_rew_buf[start_index:end_index]
return torch.as_tensor(obs, dtype=torch.float32), torch.as_tensor(rew, dtype=torch.float32)
def add_to_buffer(self,np_buf, iters_adding, data ):
if (self.ptr+iters_adding)>self.max_size:
amount_pre_loop = self.max_size-self.ptr
amount_post_loop = iters_adding-amount_pre_loop
np_buf[self.ptr:] = data[:amount_pre_loop]
np_buf[:amount_post_loop] = data[amount_pre_loop:]
else:
np_buf[self.ptr:self.ptr+iters_adding] = data
def add_rollouts(self, list_of_rollout_dicts):
for rollout in list_of_rollout_dicts:
iters_adding = len(rollout['terminal'])
self.total_num_steps_added += iters_adding
for np_buf, key in zip(self.buf_list,
self.value_names ):
self.add_to_buffer(np_buf, iters_adding, rollout[key])
if self.use_td_lambda_buf:
end_ind = int((self.ptr+iters_adding) % self.max_size)
assert end_ind >=0
end_ind = np.repeat(end_ind, iters_adding)
self.add_to_buffer(self.rollout_end_ind_buf, iters_adding, end_ind)
self.ptr = (self.ptr+iters_adding) % self.max_size
self.size = min(self.size+iters_adding, self.max_size)
def __getitem__(self, idx):
return self.sample_batch(idxs=idx)
def __len__(self):
return self.size
def sample_batch(self, idxs=None, batch_size=32):
if idxs is None:
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
discounted_rew_to_go=self.discounted_rew_to_go_buf[idxs],
horizon=self.horizon_buf[idxs],
cum_rew=self.cum_rew[idxs],
)
if self.use_td_lambda_buf:
batch['start_index'] = idxs
batch['rollout_end_ind'] = self.rollout_end_ind_buf[idxs]
batch['raw_rew'] = self.raw_rew_buf[idxs]
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()} | [] |
2024-01-10 | popeyeGOEL/GPTForSlcak | slack~ai_ops.py | import threading
import time
import re
from typing import List, Dict, Any, Generator, Tuple, Union
from slack_bolt import BoltContext
from slack_sdk import WebClient
from slack.markdown import slack_to_markdown, markdown_to_slack
from slack.slack_ops import update_wip_message
import g4f
from slack.env import AI_MODEL, MAX_TOKENS
def get_answer( *, messages, stream: bool = False, ignored: List[str] = None, ignore_working: bool = False, ignore_stream_and_auth: bool = False, **kwargs ):
try:
for result in g4f.ChatCompletion.create(
model=g4f.models.default,
provider=g4f.Provider.Bing,
messages=messages,
stream=stream,
ignored=ignored,
ignore_working=ignore_working,
ignore_stream_and_auth=ignore_stream_and_auth,
**kwargs
):
yield result
except Exception as e:
# Return error message if an exception occurs
return (
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
)
def write_answer(
*,
client: WebClient,
wip_reply: dict,
context: BoltContext,
user_id: str,
answer: str,
messages: List[Dict[str, str]],
translate_markdown: bool
):
assistant_reply: Dict[str, str] = {"role": "assistant", "content": f"{answer}"}
messages.append(assistant_reply)
threads = []
try:
loading_character = " ... :writing_hand:"
def update_message():
assistant_reply_text = format_assistant_reply(
assistant_reply["content"], translate_markdown
)
wip_reply["message"]["text"] = assistant_reply_text
update_wip_message(
client=client,
channel=context.channel_id,
ts=wip_reply["message"]["ts"],
text=assistant_reply_text + loading_character,
messages=messages,
user=user_id,
)
thread = threading.Thread(target=update_message)
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
try:
if t.is_alive():
t.join()
except Exception:
pass
assistant_reply_text = format_assistant_reply(
assistant_reply["content"], translate_markdown
)
wip_reply["message"]["text"] = assistant_reply_text
update_wip_message(
client=client,
channel=context.channel_id,
ts=wip_reply["message"]["ts"],
text=assistant_reply_text,
messages=messages,
user=user_id,
)
finally:
for t in threads:
try:
if t.is_alive():
t.join()
except Exception:
pass
def consume_custom_stream_to_write_reply(
*,
client: WebClient,
wip_reply: dict,
context: BoltContext,
user_id: str,
messages: List[Dict[str, str]],
stream: Generator[Any, Any, None],
timeout_seconds: int,
translate_markdown: bool
):
start_time = time.time()
assistant_reply: Dict[str, str] = {"role": "assistant", "content": ""}
messages.append(assistant_reply)
threads = []
word_count = 0
try:
loading_character = " ... :writing_hand:"
for chunk in stream:
spent_seconds = time.time() - start_time
if timeout_seconds < spent_seconds:
raise TimeoutError("Stream response timeout")
# Process the chunk (adapt this part to your specific stream structure)
assistant_reply["content"] += (chunk) # Implement process_chunk
word_count += len(chunk.split())
# Update message periodically or based on a condition
if word_count >= 10:
def update_message():
assistant_reply_text = format_assistant_reply(
assistant_reply["content"], translate_markdown
)
wip_reply["message"]["text"] = assistant_reply_text
update_wip_message(
client=client,
channel=context.channel_id,
ts=wip_reply["message"]["ts"],
text=assistant_reply_text + loading_character,
messages=messages,
user=user_id,
)
thread = threading.Thread(target=update_message)
thread.daemon = True
thread.start()
threads.append(thread)
word_count = 0
# Finalize after stream completion
assistant_reply_text = format_assistant_reply(
assistant_reply["content"], translate_markdown
)
wip_reply["message"]["text"] = assistant_reply_text
update_wip_message(
client=client,
channel=context.channel_id,
ts=wip_reply["message"]["ts"],
text=assistant_reply_text,
messages=messages,
user=user_id,
)
finally:
# Ensure all threads are joined and the stream is closed
for t in threads:
try:
if t.is_alive():
t.join()
except Exception:
pass
try:
stream.close()
except Exception:
pass
# Format message from Slack to send to AI
def format_ai_message_content(content: str, translate_markdown: bool) -> str:
if content is None:
return None
# Unescape &, < and >, since Slack replaces these with their HTML equivalents
# See also: https://api.slack.com/reference/surfaces/formatting#escaping
content = content.replace("<", "<").replace(">", ">").replace("&", "&")
# Convert from Slack mrkdwn to markdown format
if translate_markdown:
content = slack_to_markdown(content)
return content
# Format message from OpenAI to display in Slack
def format_assistant_reply(content: str, translate_markdown: bool) -> str:
for o, n in [
# Remove leading newlines
("^\n+", ""),
# Remove prepended Slack user ID
("^<@U.*?>\\s?:\\s?", ""),
# Remove OpenAI syntax tags since Slack doesn't render them in a message
("```\\s*[Rr]ust\n", "```\n"),
("```\\s*[Rr]uby\n", "```\n"),
("```\\s*[Ss]cala\n", "```\n"),
("```\\s*[Kk]otlin\n", "```\n"),
("```\\s*[Jj]ava\n", "```\n"),
("```\\s*[Gg]o\n", "```\n"),
("```\\s*[Ss]wift\n", "```\n"),
("```\\s*[Oo]objective[Cc]\n", "```\n"),
("```\\s*[Cc]\n", "```\n"),
("```\\s*[Cc][+][+]\n", "```\n"),
("```\\s*[Cc][Pp][Pp]\n", "```\n"),
("```\\s*[Cc]sharp\n", "```\n"),
("```\\s*[Mm][Aa][Tt][Ll][Aa][Bb]\n", "```\n"),
("```\\s*[Jj][Ss][Oo][Nn]\n", "```\n"),
("```\\s*[Ll]a[Tt]e[Xx]\n", "```\n"),
("```\\s*bash\n", "```\n"),
("```\\s*zsh\n", "```\n"),
("```\\s*sh\n", "```\n"),
("```\\s*[Ss][Qq][Ll]\n", "```\n"),
("```\\s*[Pp][Hh][Pp]\n", "```\n"),
("```\\s*[Pp][Ee][Rr][Ll]\n", "```\n"),
("```\\s*[Jj]ava[Ss]cript\n", "```\n"),
("```\\s*[Ty]ype[Ss]cript\n", "```\n"),
("```\\s*[Pp]ython\n", "```\n"),
]:
content = re.sub(o, n, content)
# Convert from OpenAI markdown to Slack mrkdwn format
if translate_markdown:
content = markdown_to_slack(content)
return content
def build_system_text(
system_text_template: str, translate_markdown: bool, context: BoltContext
):
system_text = system_text_template.format(bot_user_id=context.bot_user_id)
# Translate format hint in system prompt
if translate_markdown is True:
system_text = slack_to_markdown(system_text)
return system_text
| [
"PLACEHOLDER"
] |
2024-01-10 | thinktecture-labs/llm-extract-structured-information-langchain-kor | server~filter.py | #! /usr/bin/env python
from kor.extraction import create_extraction_chain
from kor.nodes import Object
from langchain.chat_models import ChatOpenAI
from datetime import datetime
with open("schema.json", "r") as f:
schema = Object.parse_raw(f.read())
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
max_tokens=2000
)
chain = create_extraction_chain(
llm,
schema,
encoder_or_encoder_class="JSON",
verbose=False
)
def parse(query: str):
data = f"""
Current Date: {datetime.today()}
Query: {query}
"""
return chain.run(text=data)['data'] | [] |
2024-01-10 | HideLord/AlpacaDataCleaned | alpacaModifier.py | import gradio as gr
import openai
OAI_PROMPT = "You are a helpful assistant. You answer in a concise and accurate manner. Your responses are short and to the point."
class AlpacaModifier:
def __init__(self):
self.input = ''
self.instruction = ''
self.old_output = ''
self.modified_output = ''
def next_callback(self, instruction='', input='', old_output='', modified_output=''):
# returns the next instruction_text, input_text, old_output_text, modified_output_text.
pass
def save_callback(self, instruction='', input='', old_output='', modified_output=''):
# When this is called, all the changes done until this moment will be saved.
pass
def reset_callback(self, instruction='', input='', old_output='', modified_output=''):
# Reset to the begining of the file.
pass
def skip_ahead(self, steps, instruction='', input='', old_output='', modified_output=''):
while steps > 1:
steps -= 1
instruction, input, old_output, modified_output = self.next_callback(instruction, input, old_output, old_output)
if steps == 1:
return self.next_callback(instruction, input, old_output, old_output)
return instruction, input, old_output, modified_output
def ask_gpt(self, instruction='', input='', old_output='', modified_output='', key=''):
openai.api_key = key
composite_content = f"{instruction}\n\n{input}" if input else instruction
print(f'Sending:\n{composite_content}')
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": OAI_PROMPT},
{"role": "user", "content": composite_content}
]
)
modified_output = completion["choices"][0]["message"]["content"]
return instruction, input, old_output, modified_output
def modify_output(self):
# Automatically modify the output in some way or just return it as it is.
pass
def run(self):
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown("""
## 🦙 Alpaca Dataset Editor
Cleaned Dataset: [Github](https://github.com/gururise/AlpacaDataCleaned) - [Hugging Face](https://huggingface.co/datasets/yahma/alpaca-cleaned)
*To use GPT to generate answers, OpenAI API key is required*
""")
instruction_text = gr.Textbox(lines=2, label="Instruction", value=self.instruction, interactive=True)
input_text = gr.Textbox(lines=1, label="Input", value=self.input, interactive=True)
old_output_text = gr.Textbox(lines=2, label="Old Output", value=self.old_output, interactive=False)
modified_output_text = gr.Textbox(lines=10, label="Modified Output", value=self.modified_output, interactive=True)
with gr.Row():
button_next = gr.Button(value="Next")
button_next.click(self.next_callback,
inputs=[instruction_text, input_text, old_output_text, modified_output_text],
outputs=[instruction_text, input_text, old_output_text, modified_output_text])
button_save = gr.Button(value="Save")
button_save.click(self.save_callback,
inputs=[instruction_text, input_text, old_output_text, modified_output_text])
button_reset = gr.Button(value="Reset To Begining")
button_reset.click(self.reset_callback,
inputs=[instruction_text, input_text, old_output_text, modified_output_text],
outputs=[instruction_text, input_text, old_output_text, modified_output_text])
with gr.Row():
skip_ahead = gr.Number(label="Items to skip", value=0, interactive=True)
button_skip = gr.Button(value="Skip Ahead")
button_skip.click(self.skip_ahead,
inputs=[skip_ahead, instruction_text, input_text, old_output_text, modified_output_text],
outputs=[instruction_text, input_text, old_output_text, modified_output_text])
with gr.Row():
gpt_api_key = gr.Textbox(label="API key", placeholder="Enter your OpenAI API Key (optional)")
button_ask_gpt = gr.Button(value="Ask GPT")
button_ask_gpt.click(self.ask_gpt,
inputs=[instruction_text, input_text, old_output_text, modified_output_text, gpt_api_key],
outputs=[instruction_text, input_text, old_output_text, modified_output_text])
demo.launch() | [
"You are a helpful assistant. You answer in a concise and accurate manner. Your responses are short and to the point."
] |
2024-01-10 | dataprofessor/langchain-ask-the-doc | app-v1.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever)
return qa.run(query_text)
# Page title
st.set_page_config(page_title='🦜🔗 Ask the Doc App')
st.title('🦜🔗 Ask the Doc App')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file)
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text))
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(uploaded_file, openai_api_key, query_text)
result.append(response)
del openai_api_key
if len(result):
st.info(response)
| [] |
2024-01-10 | TOSKY94/Paddymi | utilities.py | import openai
from gtts import gTTS
from playsound import playsound
import config
from datetime import datetime
class Utilies():
def __init__(self) -> None:
pass
def datetime(self, mode):
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
months = ['','January','February','March','April','May','June','July','August','September','October','November','December']
date, time = now.split(' ')
year, month, day = date.split('-')
hour, mins, sec = time.split(':')
prompt = ['PM' if int(hour)>=12 else 'AM']
hour = [int(hour)%12 if int(hour)%12!=0 else 12]
today = f"Today's date is {day} of {months[int(month)]} {year}"
dTime = f"The time is {hour[0]} {mins} {prompt[0]}"
if mode=='time':
return dTime,hour[0], mins, prompt[0]
elif mode=='date':
return today
else:
return 'invalid prompt'
# Convert audio file to text using the Whisper ASR API
def audio2text(self, audio_file_path):
with open(audio_file_path, 'rb') as f:
audio_data = f.read()
response = openai.Transcription.create(
audio=audio_data,
model="whisper",
language="en-US"
)
return response['text']
# Use the transcribed text as a prompt to generate response from ChatGPT API
def getPromptResponce(self, prompt):
openai.api_key=config.API_KEY
# Generate a response from ChatGPT-3
response = openai.Completion.create(
engine='text-davinci-002', # Specify the engine to use
prompt=prompt,
max_tokens=100, # Set the maximum number of tokens for the response
n=1, # Specify the number of completions to generate
stop=None, # Specify a stop sequence to end the response (optional)
temperature=0.7 # Set the temperature for randomness in the response
)
return response.choices[0].text.strip()
# Convert text response to audio using gTTS
def text2audio(self, text):
tts = gTTS(text)
audio_file_path = 'response.mp3'
tts.save(audio_file_path)
return audio_file_path
# Play audio response to the user
def play_audio(self, audio_file_path):
playsound(audio_file_path)
| [] |
2024-01-10 | nyanp/chat2plot | tests~test_chat2plot.py | import pandas as pd
import pydantic
import pytest
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain.schema import FunctionMessage
from chat2plot import PlotConfig, chat2plot, schema
@pytest.mark.parametrize(
"prompt",
[
"Average price per category",
"カテゴリごとの平均価格",
"avg price for each category",
"Show me average price per category in bar chart.",
],
)
def test_plot_bar(prompt: str):
df = pd.DataFrame(
{
"category": ["A", "B", "C", "A", "B"],
"price": [100, 200, 100, 150, 250],
"x": [1, 2, 3, 4, 5],
}
)
plot = chat2plot(df)
ret = plot.query(prompt, config_only=True)
config = ret.config
assert isinstance(config, PlotConfig)
assert config.chart_type == schema.ChartType.BAR
assert config.x.column == "category"
assert config.y.column == "price"
assert config.y.aggregation == schema.AggregationType.AVG
def test_vega_json():
df = pd.DataFrame(
{
"date": [
"2021-01-01",
"2021-02-02",
"2021-02-03",
"2021-02-04",
"2021-02-05",
],
"price": [100, 200, 300, 400, 500],
"x": [1, 2, 3, 4, 5],
}
)
plot = chat2plot(df, schema_definition="vega")
ret = plot.query("Daily total sales in line chart", config_only=True)
assert isinstance(ret.config, dict)
# https://vega.github.io/vega-lite/docs/line.html#line-chart
expected = {
"mark": "line",
"encoding": {
"x": {"field": "date", "type": "temporal"},
"y": {"field": "price", "aggregate": "sum", "type": "quantitative"},
},
}
assert ret.config["mark"] == expected["mark"]
assert ret.config["encoding"]["x"] == expected["encoding"]["x"]
assert ret.config["encoding"]["y"] == expected["encoding"]["y"]
class CustomChartConfig(pydantic.BaseModel):
chart_type: str
x_axis_name: str
y_axis_name: str
y_axis_aggregate: str
def test_custom_schema():
df = pd.DataFrame(
{
"date": [
"2021-01-01",
"2021-02-02",
"2021-02-03",
"2021-02-04",
"2021-02-05",
],
"price": [100, 200, 300, 400, 500],
"x": [1, 2, 3, 4, 5],
}
)
plot = chat2plot(df, schema_definition=CustomChartConfig)
ret = plot.query("Daily total sales in line chart", config_only=True)
assert isinstance(ret.config, CustomChartConfig)
assert ret.config.chart_type == "line"
assert ret.config.x_axis_name == "date"
assert ret.config.y_axis_name == "price"
assert ret.config.y_axis_aggregate.lower() == "sum"
def test_function_call():
df = pd.DataFrame(
{
"date": [
"2021-01-01",
"2021-02-02",
"2021-02-03",
"2021-02-04",
"2021-02-05",
],
"price": [100, 200, 300, 400, 500],
"x": [1, 2, 3, 4, 5],
}
)
for function_call in [False, True, "auto"]:
plot = chat2plot(df, function_call=function_call)
if function_call == "auto":
assert plot.function_call
else:
assert plot.function_call == function_call
ret = plot.query("Daily total sales in line chart", config_only=True)
assert ret.config.chart_type == schema.ChartType.LINE
assert ret.config.x.column == "date"
assert ret.config.y.column == "price"
assert ret.config.y.aggregation == schema.AggregationType.SUM
if plot.function_call:
assert any(
isinstance(msg, FunctionMessage) for msg in ret.conversation_history
)
else:
assert not any(
isinstance(msg, FunctionMessage) for msg in ret.conversation_history
)
def test_function_call_auto():
chat = ChatOpenAI(model_name="gpt-3.5-turbo")
plot = chat2plot(pd.DataFrame(), chat=chat)
assert not plot.function_call
chat = ChatOpenAI(model_name="gpt-4")
plot = chat2plot(pd.DataFrame(), chat=chat)
assert not plot.function_call
chat = ChatOpenAI(model_name="gpt-3.5-turbo-0613")
plot = chat2plot(pd.DataFrame(), chat=chat)
assert plot.function_call
chat = AzureChatOpenAI(openai_api_base="azure", openai_api_version="dummy")
plot = chat2plot(pd.DataFrame(), chat=chat)
assert not plot.function_call
| [] |
2024-01-10 | nyanp/chat2plot | chat2plot~chat2plot.py | import copy
import re
import traceback
from dataclasses import dataclass
from logging import getLogger
from typing import Any, Callable, Literal, Type, TypeVar
import altair as alt
import commentjson
import jsonschema
import pandas as pd
import pydantic
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseMessage, FunctionMessage, HumanMessage, SystemMessage
from plotly.graph_objs import Figure
from chat2plot.dataset_description import description
from chat2plot.dictionary_helper import delete_null_field
from chat2plot.prompt import (
JSON_TAG,
error_correction_prompt,
explanation_prompt,
system_prompt,
)
from chat2plot.render import draw_altair, draw_plotly
from chat2plot.schema import PlotConfig, ResponseType, get_schema_of_chart_config
_logger = getLogger(__name__)
T = TypeVar("T", bound=pydantic.BaseModel)
ModelDeserializer = Callable[[dict[str, Any]], T]
# These errors are caught within the application.
# Other errors (e.g. openai.error.RateLimitError) are propagated to user code.
_APPLICATION_ERRORS = (
pydantic.ValidationError,
jsonschema.ValidationError,
ValueError,
KeyError,
AssertionError,
)
@dataclass(frozen=True)
class Plot:
figure: alt.Chart | Figure | None
config: PlotConfig | dict[str, Any] | pydantic.BaseModel | None
response_type: ResponseType
explanation: str
conversation_history: list[BaseMessage] | None
class ChatSession:
"""chat with conversasion history"""
def __init__(
self,
chat: BaseChatModel,
df: pd.DataFrame,
system_prompt_template: str,
user_prompt_template: str,
description_strategy: str = "head",
functions: list[dict[str, Any]] | None = None,
):
self._system_prompt_template = system_prompt_template
self._user_prompt_template = user_prompt_template
self._chat = chat
self._conversation_history: list[BaseMessage] = [
SystemMessage(
content=system_prompt_template.format(
dataset=description(df, description_strategy)
)
)
]
self._functions = functions
@property
def history(self) -> list[BaseMessage]:
return copy.deepcopy(self._conversation_history)
def query(self, q: str, raw: bool = False) -> BaseMessage:
prompt = q if raw else self._user_prompt_template.format(text=q)
response = self._query(prompt)
return response
def _query(self, prompt: str) -> BaseMessage:
self._conversation_history.append(HumanMessage(content=prompt))
kwargs = {}
if self._functions:
kwargs["functions"] = self._functions
response = self._chat(self._conversation_history, **kwargs) # type: ignore
self._conversation_history.append(response)
if response.additional_kwargs.get("function_call"):
name = response.additional_kwargs["function_call"]["name"]
arguments = response.additional_kwargs["function_call"]["arguments"]
self._conversation_history.append(
FunctionMessage(name=name, content=arguments)
)
return response
def last_response(self) -> str:
return self._conversation_history[-1].content
class Chat2PlotBase:
@property
def session(self) -> ChatSession:
raise NotImplementedError()
@property
def function_call(self) -> bool:
return False
def query(self, q: str, config_only: bool = False, show_plot: bool = False) -> Plot:
raise NotImplementedError()
def __call__(
self, q: str, config_only: bool = False, show_plot: bool = False
) -> Plot:
return self.query(q, config_only, show_plot)
class Chat2Plot(Chat2PlotBase):
def __init__(
self,
df: pd.DataFrame,
chart_schema: Literal["simple"] | Type[pydantic.BaseModel],
*,
chat: BaseChatModel | None = None,
function_call: bool | Literal["auto"] = False,
language: str | None = None,
description_strategy: str = "head",
verbose: bool = False,
custom_deserializer: ModelDeserializer | None = None,
):
self._target_schema: Type[pydantic.BaseModel] = (
PlotConfig if chart_schema == "simple" else chart_schema # type: ignore
)
chat_model = _get_or_default_chat_model(chat)
self._function_call = (
_has_function_call_capability(chat_model)
if function_call == "auto"
else function_call
)
self._session = ChatSession(
chat_model,
df,
system_prompt("simple", self._function_call, language, self._target_schema),
"<{text}>",
description_strategy,
functions=[
get_schema_of_chart_config(self._target_schema, as_function=True)
]
if self._function_call
else None,
)
self._df = df
self._verbose = verbose
self._custom_deserializer = custom_deserializer
self._language = language
@property
def session(self) -> ChatSession:
return self._session
@property
def function_call(self) -> bool:
return self._function_call
def query(self, q: str, config_only: bool = False, show_plot: bool = False) -> Plot:
raw_response = self._session.query(q)
try:
if self._verbose:
_logger.info(f"request: {q}")
_logger.info(f"first response: {raw_response}")
return self._parse_response(q, raw_response, config_only, show_plot)
except _APPLICATION_ERRORS as e:
if self._verbose:
_logger.warning(traceback.format_exc())
msg = e.message if isinstance(e, jsonschema.ValidationError) else str(e)
error_correction = error_correction_prompt(self._function_call).format(
error_message=msg,
)
corrected_response = self._session.query(error_correction)
if self._verbose:
_logger.info(f"retry response: {corrected_response}")
try:
return self._parse_response(
q, corrected_response, config_only, show_plot
)
except _APPLICATION_ERRORS as e:
if self._verbose:
_logger.warning(e)
_logger.warning(traceback.format_exc())
return Plot(
None,
None,
ResponseType.FAILED_TO_RENDER,
"",
self._session.history,
)
def __call__(
self, q: str, config_only: bool = False, show_plot: bool = False
) -> Plot:
return self.query(q, config_only, show_plot)
def _parse_response(
self, q: str, response: BaseMessage, config_only: bool, show_plot: bool
) -> Plot:
if self._function_call:
if not response.additional_kwargs.get("function_call"):
raise ValueError("Function should be called")
function_call = response.additional_kwargs["function_call"]
json_data = commentjson.loads(function_call["arguments"])
explanation = self._session.query(
explanation_prompt(self._language, q), raw=True
).content
else:
explanation, json_data = parse_json(response.content)
try:
if self._custom_deserializer:
config = self._custom_deserializer(json_data)
else:
config = self._target_schema.parse_obj(json_data)
except _APPLICATION_ERRORS:
_logger.warning(traceback.format_exc())
# To reduce the number of failure cases as much as possible,
# only check against the json schema when instantiation fails.
jsonschema.validate(json_data, self._target_schema.schema())
raise
if self._verbose:
_logger.info(config)
if config_only or not isinstance(config, PlotConfig):
return Plot(
None, config, ResponseType.SUCCESS, explanation, self._session.history
)
figure = draw_plotly(self._df, config, show_plot)
return Plot(
figure, config, ResponseType.SUCCESS, explanation, self._session.history
)
class Chat2Vega(Chat2PlotBase):
def __init__(
self,
df: pd.DataFrame,
chat: BaseChatModel | None = None,
language: str | None = None,
description_strategy: str = "head",
verbose: bool = False,
):
self._session = ChatSession(
_get_or_default_chat_model(chat),
df,
system_prompt("vega", False, language, None),
"<{text}>",
description_strategy,
)
self._df = df
self._verbose = verbose
@property
def session(self) -> ChatSession:
return self._session
def query(self, q: str, config_only: bool = False, show_plot: bool = False) -> Plot:
res = self._session.query(q)
try:
explanation, config = parse_json(res.content)
if "data" in config:
del config["data"]
if self._verbose:
_logger.info(config)
except _APPLICATION_ERRORS:
_logger.warning(f"failed to parse LLM response: {res}")
_logger.warning(traceback.format_exc())
return Plot(
None, None, ResponseType.UNKNOWN, res.content, self._session.history
)
if config_only:
return Plot(
None, config, ResponseType.SUCCESS, explanation, self._session.history
)
try:
plot = draw_altair(self._df, config, show_plot)
return Plot(
plot, config, ResponseType.SUCCESS, explanation, self._session.history
)
except _APPLICATION_ERRORS:
_logger.warning(traceback.format_exc())
return Plot(
None,
config,
ResponseType.FAILED_TO_RENDER,
explanation,
self._session.history,
)
def __call__(
self, q: str, config_only: bool = False, show_plot: bool = False
) -> Plot:
return self.query(q, config_only, show_plot)
def chat2plot(
df: pd.DataFrame,
schema_definition: Literal["simple", "vega"] | Type[pydantic.BaseModel] = "simple",
chat: BaseChatModel | None = None,
function_call: bool | Literal["auto"] = "auto",
language: str | None = None,
description_strategy: str = "head",
custom_deserializer: ModelDeserializer | None = None,
verbose: bool = False,
) -> Chat2PlotBase:
"""Create Chat2Plot instance.
Args:
df: Data source for visualization.
schema_definition: Type of json format; "vega" for vega-lite compliant json, "simple" for chat2plot built-in
data structure. If you want a custom schema definition, pass a type inheriting from pydantic.BaseModel
as your own chart setting.
chat: The chat instance for interaction with LLMs.
If omitted, `ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-0613")` will be used.
function_call:
language: Language of explanations. If not specified, it will be automatically inferred from user prompts.
description_strategy: Type of how the information in the dataset is embedded in the prompt.
Defaults to "head" which embeds the contents of df.head(5) in the prompt.
"dtypes" sends only columns and types to LLMs and does not send the contents of the dataset,
which allows for privacy but may reduce accuracy.
custom_deserializer: A custom function to convert the json returned by the LLM into a object.
verbose: If `True`, chat2plot will output logs.
Returns:
Chat instance.
"""
if schema_definition == "simple":
return Chat2Plot(
df,
"simple",
chat=chat,
language=language,
description_strategy=description_strategy,
verbose=verbose,
custom_deserializer=custom_deserializer,
function_call=function_call,
)
if schema_definition == "vega":
return Chat2Vega(df, chat, language, description_strategy, verbose)
elif issubclass(schema_definition, pydantic.BaseModel):
return Chat2Plot(
df,
schema_definition,
chat=chat,
language=language,
description_strategy=description_strategy,
verbose=verbose,
custom_deserializer=custom_deserializer,
function_call=function_call,
)
else:
raise ValueError(
f"schema_definition should be one of [simple, vega] or pydantic.BaseClass (given: {schema_definition})"
)
def _extract_tag_content(s: str, tag: str) -> str:
m = re.search(rf"<{tag}>(.*)</{tag}>", s, re.MULTILINE | re.DOTALL)
if m:
return m.group(1)
else:
m = re.search(rf"<{tag}>(.*)<{tag}>", s, re.MULTILINE | re.DOTALL)
if m:
return m.group(1)
return ""
def parse_json(content: str) -> tuple[str, dict[str, Any]]:
"""parse json and split contents by pre-defined tags"""
json_part = _extract_tag_content(content, "json") # type: ignore
if not json_part:
raise ValueError(f"failed to find {JSON_TAG[0]} and {JSON_TAG[1]} tags")
explanation_part = _extract_tag_content(content, "explain")
if not explanation_part:
explanation_part = _extract_tag_content(content, "explanation")
# LLM rarely generates JSON with comments, so use the commentjson package instead of json
return explanation_part.strip(), delete_null_field(commentjson.loads(json_part))
def _get_or_default_chat_model(chat: BaseChatModel | None) -> BaseChatModel:
if chat is None:
return ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-0613") # type: ignore
return chat
def _has_function_call_capability(chat: BaseChatModel) -> bool:
if not isinstance(chat, ChatOpenAI):
return False
return any(
chat.model_name.startswith(prefix)
for prefix in ["gpt-4-0613", "gpt-3.5-turbo-0613"]
)
| [] |
2024-01-10 | velocitatem/ai-hackathon-ie-2023 | clai.py | """
A collection of functions used to extract data from a PDF file and return a JSON object for the given schema under term-sheets.
"""
from rag import turn_path_to_json
from pydantic import BaseModel, Field
from typing import List, Optional
import json
from langchain.document_loaders import PyPDFLoader
from openai import OpenAI
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import pandas as pd
class Beta(BaseModel):
Isin: str = Field(..., description="Unique identifier for the structured product, following the International Securities Identification Number (ISIN) format.")
Issuer: str = Field(..., description="Name of the entity issuing the structured product. This should be the full legal name of the issuer.")
Ccy: str = Field(..., description="The three-letter currency code representing the currency of the product, as per ISO 4217 standard. Example: 'EUR'.")
Underlying: List[str] = Field(..., description="List of underlying assets or indices associated with the product. Provide up to five valid tickers. Example: ['SX5E', 'UKX', 'SPX'].")
Launchdate: str = Field(..., description="The launch or initial valuation date of the product, marking the start of its lifecycle, in 'dd/mm/yyyy' format. Example: '31/12/2021'. This date sets the initial conditions for the product. Also called the Trade Date or Initial Valuation Date. ")
Maturity: str = Field(..., description="The maturity date of the product, indicating its expiration and the end of its term, in 'dd/mm/yyyy' format. It's the date when final settlements are made based on the final valuation. Example: '31/12/2023'.")
Barrier: int = Field(..., description="The barrier level of the product, specified in percentage terms. This represents a critical price level for features like knock-in. Example: 70 (indicating 70% of the initial price).")
def betas_to_csv(items: list, file_name : str) -> None:
"""
Takes a list of Beta objects and saves them to a csv file with the given name.
"""
beta_field_to_csv = {
"Isin": "Isin",
"Issuer": "Issuer",
"Ccy": "Ccy",
"Underlying": "Underlying(s)",
"Strike": "Strike",
"Launchdate": "Launch Date",
"Finalvalday": "Final Val. Day",
"Maturity": "Maturity",
"Barrier": "Barrier",
"Cap": "Cap"
}
# some items might be missing fields
# we need to add them
for item in items:
for field in beta_field_to_csv:
if field not in item:
item[field] = "Nan"
# maintain the order of the fields as specified in the schema
beta_field_order = beta_field_to_csv.keys()
# create a dataframe
df = pd.DataFrame(items, columns=beta_field_order)
# rename to dict vals
df.rename(columns=beta_field_to_csv, inplace=True)
# save it to a csv file
df.to_csv(file_name, index=False)
# keywords = ['isin','issuer','ccy','currency','underlying','underlyings','strike','strikes','launch','date','dates','final valuation','day','maturity','cap','barrier','redemption','amount']
keywords = [
'issuer', 'issuing','issuing entity', 'issuing company', 'issuing corporation', 'issuer firm', 'issuing institution',
'currency', 'ccy', 'money','monetary', 'monetary unit', 'legal tender', 'fiat currency', 'exchange medium',
'underlying', 'assests' 'underlying assets', 'base assets', 'core assets', 'fundamental assets',
'strike date', 'strike day', 'exercise date', 'option strike date', 'option exercise date', 'strike',
'final valuation date', 'last valuation date', 'ultimate valuation date', 'end valuation date',
'launch date', 'start date', 'inception date', 'commencement date', 'beginning date', 'opening date',
'maturity date', 'expiration date', 'expiry date', 'termination date', 'end date', 'last date', 'due date',
'isin', 'international securities identification number', 'security identifier', 'stock identifier','instrument identifier',
'strike', 'strikes', 'strike price', 'exercise price', 'option price', 'target price',
'laung','launch date', 'initiation date', 'start date','inception date' 'commence launch', 'begin launch', 'inaugurate launch',
'date', 'dates', 'day', 'days','time', 'period', 'periods', 'moment', 'calendar day',
'final valuation', 'last valuation', 'ultimate valuation', 'final assessment', 'end valuation',
'business day', 'trading day', 'working day',
'cap','cap level','boundary', 'ceiling', 'limit', 'maximum', 'upper bound', 'upper limit','top level',
'barrier', 'threshold', 'limit', 'boundary', 'obstacle', 'hindrance', 'trigger level','barrier point',
# hard coded values
'percent', 'max', ' x ', ' × ', 'redemption date', 'redemption amount', 'usd', 'eur', 'barrier event',
"%"
]
def count_words(text : str) -> int:
"""
Counts the number of words in a string.
"""
words = re.findall(r'\w+', text)
return len(words)
def count_file_words(data : list) -> int:
"""
Counts the number of words in a list of pages.
"""
word_count = 0
for page in data:
word_count += count_words(page.page_content)
print(word_count)
return word_count
def format_response_to_json(response_string : str, gpt4 : bool = False) -> dict:
"""
Takes a string and formats it into a JSON object. This is used to parse the output of the previous model.
"""
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106" if not gpt4 else "gpt-4-1106-preview",
messages=[
{
"role": "system",
"content": "You are an assistant specialized in financial data analysis and extraction. Your task is to meticulously process a structured product schema and accurately populate a form with relevant data extracted from a provided document. It is your job to to extract a solid JSON from the provided message. If any values are speculative or uncertain, you should not include them in the JSON. If anything is yet to be extracted, ignore it."
},
{
"role": "user",
"content": "This is the message you need to extract a JSON from: " + response_string
},
{
"role": "user",
"content": "The following are fields that need to be extracted from the document: " + Beta.schema_json(indent=2)
},
{
"role": "user",
"content": "Think carefully and think step by step. Take a deep breath and give me an accurate JSON. DO NOT create any new fields. If you are not sure about a value, leave it blank."
}
],
response_format={'type': "json_object"}
)
data = completion.choices[0].message.content
parsed = json.loads(data)
return parsed
def extract_data(file_name : str, gpt4: bool = False) -> dict:
"""
Extracts data from a PDF file and returns a JSON object.
"""
questions = [
"Can you list the strike prices for each underlying asset for this product? The strike price is the set price at which an option contract can be bought or sold when it is exercised.",
"What is the final valuation day for this product? This is the date set for the final assessment of the product's value before its maturity.",
"Is there a cap on the product's return mentioned in the document? If so, what is it? The cap is the maximum limit on the return that the product can generate.",
]
# strike, final valuation, cap
# ['completed', 'completed', 'completed', 'completed', 'in_progress', 'completed', 'in_progress', 'completed', 'in_progress', 'completed']
hard = turn_path_to_json(file_name, questions)
client = OpenAI()
path = file_name
loader = PyPDFLoader(path)
data=loader.load()
#r'\b(?:\d{1,2}[-\/.]\d{1,2}[-\/.]\d{2,4}|\d{2,4}[-\/.]\d{1,2}[-\/.]\d{1,2}|(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+\d{1,2}[,.]?[-\s]*\d{2,4}|\d{1,2}\s+(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)[,.\s]+\d{2,4})\b'
stop_words = set(stopwords.words('english'))
regex_pattern = r'\b(?: '+'|'.join(map(re.escape, keywords)) + r')\b|\b(?:\d{1,2}[-\/.]\d{1,2}[-\/.]\d{2,4}|\d{2,4}[-\/.]\d{1,2}[-\/.]\d{1,2}|(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+\d{1,2}[,.]?[-\s]*\d{2,4}|\d{1,2}\s+(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)[,.\s]+\d{2,4})\b'
seen = set()
raw = ""
# TODO Here hte issue is that we are minifying all the pages, which is not optimal
# we should check if a whole document is too long, and only then minify it
# We might be able to do this quickly with the document object but im not sure
if gpt4 or count_file_words(data) < 10000:
# pass everything to the model
for page in data:
hasOccurence = page.page_content is not None
shouldAdd = hasOccurence is not None
if shouldAdd:
raw += page.page_content + " "
else:
print("Minifying")
# trim the data
for page in data:
filtered_page = re.search(regex_pattern, page.page_content, re.IGNORECASE)
hasOccurence = filtered_page is not None
shouldAdd = hasOccurence is not None
if shouldAdd:
raw += page.page_content + " "
raw = raw.replace("\n", " ")
# add stop words
tokenized_raw = word_tokenize(raw)
raw = ""
for w in tokenized_raw:
if w not in stop_words:
raw += w
print("New length: ", count_words(raw))
print("Running Query")
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106" if not gpt4 else "gpt-4-1106-preview",
messages=[
{
"role": "system",
"content": "You are an assistant specialized in financial data analysis and extraction. Your task is to meticulously process a structured product schema and accurately populate a form with relevant data extracted from a provided document."
},
{
"role": "user",
"content": "The structured product schema is defined as follows:" + Beta.schema_json(indent=2)
},
{
"role": "user",
"content": "Here is the document with the necessary data:"
},
{
"role": "user",
"content": raw
},
{
"role": "user",
"content": "Please extract the data from the document"
}
],
)
# get the status of the completion
print(completion)
# combine the data
combined = {}
ct = completion.choices[0].message.content
parsed = format_response_to_json(ct, gpt4=gpt4)
for key in parsed:
combined[key] = parsed[key]
for key in hard:
combined[key] = hard[key]
return combined
| [
"Here is the document with the necessary data:",
"You are an assistant specialized in financial data analysis and extraction. Your task is to meticulously process a structured product schema and accurately populate a form with relevant data extracted from a provided document. It is your job to to extract a solid JSON from the provided message. If any values are speculative or uncertain, you should not include them in the JSON. If anything is yet to be extracted, ignore it.",
"This is the message you need to extract a JSON from: PLACEHOLDER",
"Please extract the data from the document",
"You are an assistant specialized in financial data analysis and extraction. Your task is to meticulously process a structured product schema and accurately populate a form with relevant data extracted from a provided document.",
"Think carefully and think step by step. Take a deep breath and give me an accurate JSON. DO NOT create any new fields. If you are not sure about a value, leave it blank."
] |
2024-01-10 | sghpjuikit/player | app~speech-recognition-whisper~util_llm.py | import gpt4all.gpt4all
from gpt4all import GPT4All # https://docs.gpt4all.io/index.html
from gpt4all.gpt4all import empty_chat_session
from threading import Thread
from queue import Queue
from util_tty_engines import Tty
from util_write_engine import Writer
from util_itr import teeThreadSafe, teeThreadSafeEager, progress, chain, SingleLazyIterator
class ChatProceed:
def __init__(self, sysPrompt: str, userPrompt: str | None):
self.sysPrompt = sysPrompt
self.userPrompt = userPrompt
self.messages = [ ]
self.messages.append({ "role": "system", "content": self.sysPrompt })
if (userPrompt is not None): self.messages.append({ "role": "user", "content": self.userPrompt })
@classmethod
def start(cls, sysPrompt: str):
return cls(sysPrompt, None)
class ChatIntentDetect(ChatProceed):
def __init__(self, userPrompt: str):
super().__init__(
"From now on, identify user intent by returning one of following functions. " +
"Only respond in format function: `COM-function-COM`. " +
"Funs: \n" +
"- open-weather-info\n" +
"- play-music\n" +
"- stop-music\n" +
"- play-previous-song\n" +
"- play-next-song\n" +
"- what-time-is-it\n" +
"- what-date-is-it\n" +
"- unidentified // no other intent seems probable",
userPrompt
)
class Chat:
def __init__(self, userPrompt: str):
self.userPrompt = userPrompt
class ChatStart:
def __init__(self):
pass
class ChatStop:
def __init__(self):
pass
class LlmBase:
def __init__(self):
self._stop = False
self.queue = Queue()
self.generating = False
def start(self):
pass
def stop(self):
"""
Stop processing all elements and release all resources
"""
self._stop = True
def __call__(self, prompt: ChatStart | Chat | ChatProceed | ChatStop):
self.queue.put(prompt)
def _loop(self):
pass
class LlmNone(LlmBase):
def __init__(self, speak: Tty, write: Writer):
super().__init__()
self.write = write
self.speak = speak
# home: https://github.com/nomic-ai/gpt4all
# doc https://docs.gpt4all.io/gpt4all_python.html
class LlmGpt4All(LlmBase):
def __init__(self, model: str, speak: Tty, write: Writer, sysPrompt: str, maxTokens: int, temp: float, topp: float, topk: int):
super().__init__()
self.write = write
self.speak = speak
# gpt4all.gpt4all.DEFAULT_MODEL_DIRECTORY = chatDir
self.sysPrompt = sysPrompt
self.maxTokens = maxTokens
self.temp = temp
self.topp = topp
self.topk = topk
def start(self):
Thread(name='LlmGpt4All', target=self._loop, daemon=True).start()
def _loop(self):
llm = None
while not self._stop:
e = self.queue.get()
# load model lazily
if llm is None: llm = GPT4All(model, allow_download=False)
if isinstance(e, ChatStart):
with llm.chat_session(self.sysPrompt):
while not self._stop:
t = self.queue.get()
if isinstance(t, ChatStart):
pass
if isinstance(t, ChatStop):
break
if isinstance(t, Chat):
def stop_on_token_callback(token_id, token_string):
return not self._stop and self.generating
# generate & stream response
self.generating = True
tokens = llm.generate(t.userPrompt, streaming=True, max_tokens=self.maxTokens, top_p=self.topp, top_k=self.topk, temp=self.temp, callback=stop_on_token_callback)
consumer, tokensWrite, tokensSpeech, tokensText = teeThreadSafeEager(tokens, 3)
self.write(chain(['CHAT: '], progress(consumer, tokensWrite)))
self.speak(tokensSpeech)
consumer()
text_all = ''.join(tokensText)
self.generating = False
# home https://github.com/openai/openai-python
# howto https://cookbook.openai.com/examples/how_to_stream_completions
class LlmHttpOpenAi(LlmBase):
def __init__(self, url: str, bearer: str, modelName: str, speak: Tty, write: Writer, sysPrompt: str, maxTokens: int, temp: float, topp: float, topk: int):
super().__init__()
self.write = write
self.speak = speak
self.url = url
self.bearer = bearer
self.modelName = modelName
self.sysPrompt = sysPrompt
self.maxTokens = maxTokens
self.temp = temp
self.topp = topp
self.topk = topk
def start(self):
Thread(name='LlmHttpOpenAi', target=self._loop, daemon=True).start()
def _loop(self):
try:
from openai import OpenAI
from httpx import Timeout
import openai
except ImportError as e:
self.write("OpenAi python module failed to load")
return
chat: ChatProceed | None = None
client = OpenAI(api_key=self.bearer, base_url=self.url)
while not self._stop:
e = self.queue.get()
if isinstance(e, ChatStart):
if chat is not None:
chat = ChatProceed.start(self.sysPrompt)
if isinstance(e, ChatStop):
chat = None
if isinstance(e, Chat | ChatProceed):
try:
self.generating = True
isCommand = isinstance(e, ChatIntentDetect)
if isinstance(e, Chat):
if (chat is None): chat = ChatProceed.start(self.sysPrompt)
chat.messages.append({ "role": "user", "content": e.userPrompt })
def process():
messages = []
if isinstance(e, Chat): messages = chat.messages
if isinstance(e, ChatProceed): messages = e.messages
stream = client.chat.completions.create(
model=self.modelName, messages=messages, max_tokens=self.maxTokens, temperature=self.temp, top_p=self.topp,
stream=True, timeout=Timeout(None, connect=5.0),
stop = "-COM" if isCommand else [],
)
try:
for chunk in stream:
if self._stop or not self.generating: break
if chunk.choices[0].delta.content is not None: yield chunk.choices[0].delta.content
finally:
stream.response.close()
consumer, tokensWrite, tokensSpeech, tokensText = teeThreadSafeEager(process(), 3)
commandIterator = SingleLazyIterator()
if not isCommand: self.write(chain(['CHAT: '], progress(consumer, tokensWrite)))
if isCommand: self.write(chain(['COM-DET: '], progress(commandIterator, commandIterator)))
if not isCommand: self.speak(tokensSpeech)
consumer()
text = ''.join(tokensText)
if len(text)==0:
self.write("ERR: chat responded with empty message")
else:
if isinstance(e, Chat):
chat.messages.append({ "role": "assistant", "content": text })
if isCommand:
command = text.strip().lstrip("COM-").rstrip("-COM").strip()
command = command.replace('-', ' ')
command = command.replace('unidentified', e.userPrompt)
commandIterator.put(command)
self.generating = False
except openai.APIConnectionError as e:
self.write("OpenAI server could not be reached")
self.write(e.__cause__)
except openai.RateLimitError as e:
self.write("OpenAI returned 429 status code - rate limit error")
except openai.APIStatusError as e:
self.write(f"OpenAI returned non {e.status_code} status code")
self.write(e.response)
| [] |
2024-01-10 | burcuyilmaz-app/https---github-com-burcuyilmaz-app-kaareai | Hello.py | import streamlit as st
from langchain import OpenAI
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
import openai
# Function to generate a response using Langchain and OpenAI
def analyze_chat(chat_text, openai_api_key):
openai.api_key = openai_api_key
prompt = (
"Here is a conversation between two people. Analyze this conversation and "
"give insights into their communication style, tell them their key personal traits, "
"and provide recommendations to enhance their relationship:\n\n"
f"{chat_text}"
)
try:
response = openai.Completion.create(
engine="text-davinci-003", # Replace with the appropriate engine
prompt=prompt,
temperature=0.5,
max_tokens=1024,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return response.choices[0].text.strip()
except Exception as e:
return f"An error occurred: {e}"
# Placeholder function to analyze chat using OpenAI's API
def analyze_chat(chat_text, openai_api_key):
openai.api_key = openai_api_key
try:
response = openai.Completion.create(
engine="text-davinci-003", # Replace with the appropriate engine
prompt="Analyze the following chat and provide insights:\n" + chat_text,
temperature=0.7,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return response.choices[0].text.strip()
except Exception as e:
return f"An error occurred: {e}"
# Main function where the Streamlit app is defined
def main():
st.set_page_config(page_title='👋 Welcome to the Chat Analyzer with Kaare')
st.title('👋 Welcome to the Chat Analyzer with Kaare')
# Intro and security assurance
st.markdown("""
## Introduction
Kaare AI provides insights into your personal patterns and communication styles by analyzing your chat history by using ChatGPTs APIs.
## Security and Privacy
**Your privacy is very important to us!** We ensure that:
- All conversations are processed securely.
- We do not store your conversations.
- We do not have access to any of the data you upload here.
For any questions or feedback, feel free to reach out at [[email protected]](mailto:[email protected]).
""")
# WhatsApp Chat Analyzer part
st.header('WhatsApp Chat Analyzer')
chat_file = st.file_uploader('Upload WhatsApp chat text file', type='txt')
openai_api_key = st.text_input('OpenAI API Key for Chat Analysis', type='password')
if st.button('Analyze Chat'):
if chat_file is not None and openai_api_key:
chat_text = chat_file.getvalue().decode("utf-8")
with st.spinner('Analyzing chat...'):
analysis_result = analyze_chat(chat_text, openai_api_key)
st.success('Chat analysis is complete!')
st.text_area("Analysis Result", analysis_result, height=300)
else:
st.error('Please upload a chat file and provide the OpenAI API key.')
# Part for summarization using Langchain and OpenAI
st.header('Analyze your Chat with Kaare AI')
txt_input = st.text_area('Enter your chat to analyze', '', height=200)
openai_api_key_summ = st.text_input('OpenAI API Key for Summarization', type='password')
if st.button('Submit for Analyze'):
if openai_api_key_summ and txt_input:
with st.spinner('Calculating...'):
response = generate_response(txt_input, openai_api_key_summ)
st.success('Analysis complete!')
st.text_area("Analysis Result", response, height=300)
else:
st.error('Please enter text to analyze and provide the OpenAI API key.')
# Run the main function
if __name__ == "__main__":
main() | [
"Here is a conversation between two people. Analyze this conversation and give insights into their communication style, tell them their key personal traits, and provide recommendations to enhance their relationship:\n\nPLACEHOLDER",
"Analyze the following chat and provide insights:\nPLACEHOLDER"
] |
2024-01-10 | iamcalledned/chatbot-with-login | delete_thread.py | from openai import OpenAI
from config import Config
def delete_thread():
# Initialize the OpenAI client with your API key
client = OpenAI()
OPENAI_API_KEY = Config.OPENAI_API_KEY
# Prompt the user to enter the thread ID
thread_id = input("Please enter the thread ID to delete: ")
# Attempt to delete the thread
try:
response = client.beta.threads.delete(thread_id)
print("Thread deleted successfully.")
print(response)
except Exception as e:
print(f"An error occurred: {e}")
if __name__ == "__main__":
delete_thread() | [] |
2024-01-10 | iamcalledned/chatbot-with-login | get_recipe_card.py | import time
import sys
import os
# Get the directory of the current script
current_script_path = os.path.dirname(os.path.abspath(__file__))
# Set the path to the parent directory (one folder up)
parent_directory = os.path.dirname(current_script_path)
# Add the config directory to sys.path
sys.path.append(os.path.join(parent_directory, 'database'))
sys.path.append(os.path.join(parent_directory, 'config'))
from openai_utils_new_thread import create_thread_in_openai, is_thread_valid
from openai_utils_send_message import send_message
from openai import OpenAI
from chat_bot_database import get_active_thread_for_user, insert_thread, insert_conversation, create_db_pool
import datetime
import logging
import asyncio
import aiomysql
from config import Config
from classify_content import classify_content
import re
import json
# Other imports as necessary
OPENAI_API_KEY = Config.OPENAI_API_KEY
# Initialize OpenAI client
openai_client = OpenAI()
openai_client.api_key = Config.OPENAI_API_KEY
client = OpenAI()
async def get_recipe_card(recipe_text):
prompt = """Format the following recipe into a JSON structure for database entry, with keys for 'name', 'time_requirements' (including 'prep', 'cook', 'total'), 'ingredients' (as an array, with separate arrays for multiple parts), and 'instructions' (as an array, with separate arrays for multiple parts)"""
# Append the prompt to the recipe text
modified_message = f"{prompt}{recipe_text}"
print("creating recipe card")
response = openai_client.chat.completions.create(
model='gpt-3.5-turbo',
messages=[
{"role": "user", "content": modified_message},
],
max_tokens=1000,
n=1,
stop=None,
temperature=0.7,
frequency_penalty=0.7,
presence_penalty=0.7
)
recipe_card = response.choices[0].message.content
print("recipe card", recipe_card)
return recipe_card
| [
"Format the following recipe into a JSON structure for database entry, with keys for 'name', 'time_requirements' (including 'prep', 'cook', 'total'), 'ingredients' (as an array, with separate arrays for multiple parts), and 'instructions' (as an array, with separate arrays for multiple parts)PLACEHOLDER",
"Format the following recipe into a JSON structure for database entry, with keys for 'name', 'time_requirements' (including 'prep', 'cook', 'total'), 'ingredients' (as an array, with separate arrays for multiple parts), and 'instructions' (as an array, with separate arrays for multiple parts)"
] |
2024-01-10 | iamcalledned/chatbot-with-login | chat_bot_socket.py | #chat_bot_socket.py
import sys
import os
import asyncio
import json
import websockets
import ssl
import logging
from uuid import uuid4
from openai_utils_generate_answer import generate_answer
from config import Config
# Importing database functions from database.py
from chat_bot_database import create_db_pool, get_user_info_by_session_id
# Other imports as necessary
OPENAI_API_KEY = Config.OPENAI_API_KEY
log_file_path = '/home/ubuntu/whattogrill-backend/logs/chat_bot_logs.txt'
logging.basicConfig(
filename=log_file_path,
level=print,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Dictionary to store user_id: websocket mapping
connections = {}
import traceback # Import traceback for detailed error logging
# Async function to create a connection pool
async def chatbot_handler(websocket, path):
userID = None # Initialize userID to None
try:
print(f"New WebSocket connection request from {websocket.remote_address}")
initial_data = await websocket.recv()
#print(f"Received initial data: {initial_data}") # Log the received data
initial_data = json.loads(initial_data)
session_id = initial_data.get('session_id', '')
if session_id:
user_info = await get_user_info_by_session_id(session_id, app_state.pool)
#print(f"User info retrieved: {user_info}") # Log user info
if user_info:
userID = user_info['username']
connections[userID] = websocket
print(f"User {userID} connected with WebSocket from {websocket.remote_address}") # Log successful connection
else:
print(f"Invalid session ID: {session_id}") # Log invalid session
await websocket.send(json.dumps({'error': 'Invalid session'}))
return
else:
await websocket.send(json.dumps({'error': 'Session ID required'}))
return
while True:
data = await websocket.recv()
try:
data = json.loads(data)
except json.JSONDecodeError:
print(f"Received malformed data from {websocket.remote_address}")
continue
userID = user_info.get('username', '')
print("userID from gernerate answer", userID)
uuid = str(uuid4())
message = data.get('message', '')
user_ip = websocket.remote_address[0]
response_text = await generate_answer(app_state.pool, userID, message, user_ip, uuid)
response = {'response': response_text}
await websocket.send(json.dumps(response))
print(f"Processed message from user {userID} at IP {user_ip}")
except websockets.exceptions.ConnectionClosed as e:
print(f"WebSocket connection closed with exception for user {userID}: {e}. Reason: {e.reason}. Code: {e.code}")
if userID in connections:
del connections[userID]
except Exception as e:
print(f"Unhandled exception in chatbot_handler for user {userID}: {e}")
print("Exception Traceback: " + traceback.format_exc())
finally:
# Log when a WebSocket disconnects
print(f"WebSocket disconnected for user {userID}")
# Main function
if __name__ == '__main__':
server_address = '172.31.91.113'
server_port = 8055
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain('/home/ubuntu/whattogrill-backend/bot/fullchain.pem', '/home/ubuntu/whattogrill-backend/bot/privkey.pem')
pool = asyncio.get_event_loop().run_until_complete(create_db_pool())
print("created db pool")
app_state = type('obj', (object,), {'pool': pool})
start_server = websockets.serve(chatbot_handler, server_address, server_port, ssl=ssl_context)
print('Starting WebSocket server...')
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
| [] |
2024-01-10 | iamcalledned/chatbot-with-login | name_recipe.py | # generate_answer.py
import time
import sys
import os
# Get the directory of the current script
current_script_path = os.path.dirname(os.path.abspath(__file__))
# Set the path to the parent directory (one folder up)
parent_directory = os.path.dirname(current_script_path)
# Add the config directory to sys.path
sys.path.append(os.path.join(parent_directory, 'database'))
sys.path.append(os.path.join(parent_directory, 'config'))
from openai_utils_new_thread import create_thread_in_openai, is_thread_valid
from openai_utils_send_message import send_message
from openai import OpenAI
from chat_bot_database import get_active_thread_for_user, insert_thread, insert_conversation, create_db_pool
import datetime
import logging
import asyncio
import aiomysql
from config import Config
from classify_content import classify_content
import re
# Other imports as necessary
OPENAI_API_KEY = Config.OPENAI_API_KEY
# Initialize OpenAI client
openai_client = OpenAI()
openai_client.api_key = Config.OPENAI_API_KEY
client = OpenAI()
def name_recipe(recipe_text):
prompt = "Please give this recipe a fun name and only respond with the recipe name you pick"
# Append the prompt to the recipe text
modified_message = f"{prompt}{recipe_text}"
print("Naming recipe")
response = openai_client.chat.completions.create(
model='gpt-3.5-turbo',
messages=[
{"role": "user", "content": modified_message},
],
max_tokens=1000,
n=1,
stop=None,
temperature=0.7,
frequency_penalty=0.7,
presence_penalty=0.7
)
title = response.choices[0].message.content
return title
| [
"Please give this recipe a fun name and only respond with the recipe name you pick",
"Please give this recipe a fun name and only respond with the recipe name you pickPLACEHOLDER"
] |
2024-01-10 | iamcalledned/chatbot-with-login | websocket.py | import asyncio
import json
import logging
import ssl
from uuid import uuid4
import traceback
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
from starlette.endpoints import WebSocketEndpoint
from openai_utils_generate_answer import generate_answer
from config import Config
from chat_bot_database import create_db_pool, get_user_info_by_session_id, save_recipe_to_db, clear_user_session_id, get_user_id, favorite_recipe, get_recipe_for_printing
from process_recipe import process_recipe
from fastapi import APIRouter
from fastapi import Request
import redis
from redis.exceptions import RedisError
from get_recipe_card import get_recipe_card
import spacy
import re
from starlette.websockets import WebSocket
# Initialize Redis client
redis_client = redis.Redis(host='localhost', port=6379, db=0)
# Initialize FastAPI app
app = FastAPI()
router = APIRouter()
OPENAI_API_KEY = Config.OPENAI_API_KEY
connections = {}
tasks = {} # Dictionary to track scheduled tasks for session cleanup
log_file_path = Config.LOG_PATH
LOG_FORMAT = 'WEBSOCKET - %(asctime)s - %(processName)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(
filename=log_file_path,
level=logging.DEBUG,
format=LOG_FORMAT
)
# Async function to create a connection pool
async def create_pool():
return await create_db_pool()
@router.post("/logout")
async def logout(request: Request):
print("session_id passed in logout", session_id)
session_id = request.json().get('session_id', '')
if session_id:
# Remove session data from Redis
redis_client.delete(session_id)
# Additional cleanup if necessary
username = connections.pop(session_id, None)
if username:
print(f"User {username} logged out and disconnected.")
return {"message": "Logged out successfully"}
@app.on_event("startup")
async def startup_event():
app.state.pool = await create_pool()
print("Database pool created")
# Function to schedule session data cleanup
async def clear_session_data_after_timeout(session_id, username):
try:
await asyncio.sleep(3600) # wait an hour before cleaning
# Check if the session still exists before clearing
if redis_client.exists(session_id):
redis_client.delete(session_id)
await clear_user_session_id(app.state.pool, session_id)
print(f"Session data cleared for user {username}")
# Send a WebSocket message to the client to log out
if username in connections:
websocket = connections[username]
await websocket.send_text(json.dumps({'action': 'force_logout'}))
except Exception as e:
print(f"Error in session cleanup task for {username}: {e}")
@app.websocket("/")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
cookies = websocket.cookies
session_id_from_cookies = cookies.get('session_id')
print("session ID from cookies", session_id_from_cookies)
username = None
async def ping_client():
while True:
try:
await websocket.send_text(json.dumps({'action': 'ping'}))
await asyncio.sleep(30) # Send a ping every 30 seconds
except Exception as e:
print(f"Error sending ping: {e}")
break
ping_task = asyncio.create_task(ping_client())
try:
initial_data = await websocket.receive_text()
initial_data = json.loads(initial_data)
#session_id = initial_data.get('session_id', '')
session_id = session_id_from_cookies
print("initial data", initial_data)
print("got a live one, welcome")
if session_id:
session_data = redis_client.get(session_id)
if session_data:
session_data = json.loads(session_data)
username = session_data['username']
# Renew the session expiry time upon successful connection
redis_client.expire(session_id, 3600) # Reset expiry to another hour
else:
await websocket.send_text(json.dumps({'action': 'redirect_login', 'error': 'Invalid session'}))
#await websocket.send_text(json.dumps({'error': 'Invalid session'}))
return
else:
await websocket.send_text(json.dumps({'action': 'redirect_login', 'error': 'Session ID required'}))
#await websocket.send_text(json.dumps({'error': 'Session ID required'}))
return
while True:
data = await websocket.receive_text()
data_dict = json.loads(data)
message = data_dict.get('message', '')
#session_id = data_dict.get('session_id', '')
session_id = session_id_from_cookies
# Validate session_id
if not session_id or not redis_client.exists(session_id):
await websocket.send_text(json.dumps({'action': 'redirect_login', 'error': 'Invalid or expired session'}))
# Renew the session expiry time
redis_client.expire(session_id, 3600)
print("data dict", data_dict)
print("data", data)
#print("data_dict from receive_text:", data_dict)
if data_dict.get('action') == 'pong':
redis_client.expire(session_id, 3600) # Reset expiry to another hour
continue
# Renew the session expiry time after receiving each message
redis_client.expire(session_id, 3600) # Reset expiry to another hour
print("extended redis")
if 'action' in data_dict and data_dict['action'] == 'save_recipe':
# Handle the save recipe action
# Initialize save_result with a default value
save_result = 'not processed' # You can set a default value that makes sense for your application
userID = await get_user_id(app.state.pool, username)
recipe_id = data_dict.get('content')
print("recipe _ID", recipe_id)
save_result = await favorite_recipe(app.state.pool, userID, recipe_id)
print("save result from websocket:", save_result)
if save_result == 'Success':
save_result = 'success'
print("save result:", save_result)
await websocket.send_text(json.dumps({'action': 'recipe_saved', 'status': save_result}))
continue
if 'action' in data_dict and data_dict['action'] == 'print_recipe':
# Handle the print_recipe recipe action
# Initialize save_result with a default value
recipe_id = data_dict.get('content')
print("recipe _ID for printing", recipe_id)
print_result = await get_recipe_for_printing(app.state.pool, recipe_id)
print("print result from websocket:", print_result)
await websocket.send_text(json.dumps({'action': 'recipe_printed', 'data': print_result}))
continue
else:
# Handle regular messages
message = data_dict.get('message', '')
user_ip = "User IP" # Placeholder for user IP
uuid = str(uuid4())
print("session ID", session_id)
response_text, content_type, recipe_id = await generate_answer(app.state.pool, username, message, user_ip, uuid)
response = {
'response': response_text,
'type': content_type,
'recipe_id': recipe_id
}
await websocket.send_text(json.dumps(response))
except WebSocketDisconnect:
print(f"WebSocket disconnected for user {username}")
print(f"Connections: {connections}")
print(f"sessionid:", session_id)
# Attempt to clear user data from Redis
if session_id:
# Schedule the task instead of immediate deletion
task = asyncio.create_task(clear_session_data_after_timeout(session_id, username))
tasks[session_id] = task
connections.pop(username, None)
except Exception as e:
print(f"Unhandled exception for user {username}: {e}")
print("Exception Traceback: " + traceback.format_exc())
finally:
ping_task.cancel()
async def on_user_reconnect(username, session_id):
if session_id in tasks:
tasks[session_id].cancel()
del tasks[session_id]
print(f"Clear data task canceled for user {username}")
@router.post("/validate_session")
async def validate_session(request: Request):
session_id = request.json().get('session_id', '')
if session_id and redis_client.exists(session_id):
return {"status": "valid"}
else:
return {"status": "invalid"}
# Run with Uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [] |
2024-01-10 | andrewhinh/scribble | gen~gen.py | # Libraries
import argparse
import cohere
import os
import pytesseract
import imutils
import cv2
from difflib import SequenceMatcher
from pathlib import Path
from typing import Union
from dotenv import load_dotenv
from PIL import Image
import numpy as np
# Main class
class Gen:
"""
Main inference class
"""
def __init__(self):
# Variables
self.model='small'
load_dotenv()
self.COHERE_APIKEY = os.getenv('COHERE_APIKEY')
self.co = cohere.Client(self.COHERE_APIKEY)
def is_good_word(self, s):
if len(s) == 0:
return False
if len(s) == 1 and s.lower() not in ['a', 'i']:
return False
return True
def predict(self, image: Union[str, Path, Image.Image], max_tokens: int) -> str:
if isinstance(image, Image.Image): img = np.asarray(image)
else: img = cv2.imread(image)
img = imutils.resize(img, width=500, height=500)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)
results = pytesseract.image_to_data(img, output_type=pytesseract.Output.DICT, lang='eng')
for i in range(0, len(results["text"])):
x = results["left"][i]
y = results["top"][i]
w = results["width"][i]
h = results["height"][i]
text = results["text"][i]
conf = int(results["conf"][i])
if conf > 0:
text = "".join(text).strip()
cv2.rectangle(img,
(x, y),
(x + w, y + h),
(0, 0, 255), 2)
prompt = ' '.join([i for i in results['text'] if self.is_good_word(i)])
response = self.co.generate(prompt=prompt, max_tokens=max_tokens, model=self.model)
return prompt, response.generations[0].text
def similar(self, a, b):
# return between 0 and 1
# 1 is identical, 0 is completely different
return SequenceMatcher(None, a, b).ratio()
def actual_text(self, path):
lines = []
with open(path, 'r') as f:
for line in f.readlines():
line = line[3:] # remove "1: "
line = line.strip()
lines.append(line)
return ' '.join(lines)
# i: 1-21
def comparing(self, i):
label = self.actual_text(f'./training-strips/labels/cartoon{i}.txt')
ocr = self.predict(f'./training-strips/images/cartoon{i}.png')
return self.similar(label, ocr)
# Running model
def main():
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument("--image", type=str, required=True)
parser.add_argument("--max_tokens", type=int, required=True)
args = parser.parse_args()
# Answering question
pipeline = Gen()
prompt, gen = pipeline.predict(args.image, args.max_tokens)
print("OCR:" + prompt + '\n' + "Generated text:" + gen)
"""
a = self.actual_text("./training-strips/labels/cartoon1.txt")
print(a)
accs = []
for i in range(1, 22):
acc = pipeline.comparing(i)
print(f"cartoon{i}: {acc}")
accs.append(acc)
print(f"\naverage: {sum(accs) / len(accs)}")
"""
if __name__ == '__main__':
main() | [
" "
] |
2024-01-10 | charlieWyatt/GPTCalendar | GPTCalendar.py | # Description: A chatbot that can organise your events for you!
from configs.config import OPENAI_API_KEY, PASSWORD, USERNAME, CALENDAR_URL, CREDENTIALS_FILE_PATH, TIMEZONE
import openai
import sys
import json
from openai_decorator import openaifunc, get_openai_funcs
import dateparser
from datetime import datetime, timedelta
from dateutil import parser
from openai_decorator import openaifunc
from Calendar import Calendar
import pytz
openai.api_key = OPENAI_API_KEY
@openaifunc
def check_calendar(startDate: str = "now", endDate: str = "tomorrow"):
"""
This function checks the calendar between two dates to see if the user is available or if they have anything planned
@param startDate: the start of the range
@param endDate: the end of the range
"""
credentials_file = CREDENTIALS_FILE_PATH
timezone = TIMEZONE
username = USERNAME
start_range = convert_conversation_dates_to_datetime(startDate)
if startDate == endDate:
end_range = start_range + timedelta(days=1)
else:
end_range = convert_conversation_dates_to_datetime(endDate)
calendar = Calendar(credentials_file, username, timezone)
events = calendar.get_calendar_events(start_range, end_range)
if events is None or len(events) == 0:
return "I'm free"
returnString = "I have "
i = 0
while i < len(events):
event = events[i]
returnString += event['summary'] + " from " + event['start'] + " to " + event['end']
i += 1
if i != len(events):
returnString += "and then "
return returnString
@openaifunc
def book_event(eventSummary: str = "AutoNamed", startDate: str = "NOT SET", endDate: str = "NOT SET", eventLocation: str = ""):
"""
When a person wants to organise a time, this function checks if the user is free and then books the event in the calendar if they are free
@param eventSummary: a summary of the event
@param startDate: the start of the range
@param endDate: the end of the range
@param eventLocation: the location where the event will be taking place
"""
if endDate == "NOT SET" or startDate == "NOT SET":
return "When do you want to start and finish?"
credentials_file = CREDENTIALS_FILE_PATH
timezone = TIMEZONE
username = USERNAME
calendar = Calendar(credentials_file, username, timezone)
availability = check_calendar(str(startDate), str(endDate))
if availability != "I'm free":
return "Sorry, I have " + availability
startDate = convert_conversation_dates_to_datetime(startDate)
endDate = convert_conversation_dates_to_datetime(endDate)
already_booked_events = calendar.get_calendar_events(startDate, endDate)
for event in already_booked_events:
# string_format = "%Y-%m-%d %H:%M:%S%z"
timezone = pytz.timezone(timezone)
formatted_event_end = parser.isoparse(event['end'])
formatted_event_start = parser.isoparse(event['start'])
if (formatted_event_end >= timezone.localize(startDate) and formatted_event_end <= timezone.localize(endDate)) or (formatted_event_start >= timezone.localize(startDate) and formatted_event_start <= timezone.localize(endDate)):
return "Sorry, I have " + event['summary'] # TO DO: Add in, "but i am free at ----"
calendar.add_event(eventSummary, startDate, endDate, eventLocation)
return "Great, booked in for " + str(startDate) + " to " + str(endDate)
@openaifunc
def edit_event(old_summary: str = "AutoNamed", old_start: str = "NOT SET", old_end: str = "NOT SET", old_location: str = "", new_summary: str = None, new_start: str = None, new_end: str = None, new_location: str = None):
"""
Updates an event which has already been put in the calendar. It can rename the event or change the time or place of an event. This should only be executed if the user confirms they want to change the event
@param old_summary: the old summary of the event
@param old_start: the old time the event started
@param old_end: the old time the event ended
@param old_location: the old location where the event was going to take place
@param new_summary: an updated summary of the event
@param new_start: the new time the event will start
@param new_end: the new time the event will end
@param new_location: the new location where the event will take place
"""
credentials_file = CREDENTIALS_FILE_PATH
timezone = TIMEZONE
username = USERNAME
calendar = Calendar(credentials_file, username, timezone)
calendar.update_event(old_summary, convert_conversation_dates_to_datetime(old_start).replace(tzinfo=None), convert_conversation_dates_to_datetime(old_end).replace(tzinfo=None), old_location, new_summary, convert_conversation_dates_to_datetime(new_start).replace(tzinfo=None), convert_conversation_dates_to_datetime(new_end).replace(tzinfo=None), new_location)
def convert_conversation_dates_to_datetime(natural_language_date):
parsed_date = dateparser.parse(natural_language_date)
# print(natural_language_date)
if parsed_date:
return parsed_date
else:
raise ValueError("Invalid date")
# ChatGPT API Function
def send_message(message, messages):
# add user message to message list
messages.append(message)
try:
# send prompt to chatgpt
response = openai.ChatCompletion.create(
# model="gpt-4-0613",
model="gpt-3.5-turbo-0613",
messages=messages,
functions=get_openai_funcs(),
function_call="auto",
)
except openai.error.AuthenticationError:
print("AuthenticationError: Check your API-key")
sys.exit(1)
# add response to message list
messages.append(response["choices"][0]["message"])
return messages
# MAIN FUNCTION
def run_conversation(prompt, messages=[]):
# add user prompt to chatgpt messages
messages = send_message({"role": "user", "content": prompt}, messages)
# get chatgpt response
message = messages[-1]
# loop until project is finished
while True:
if message.get("function_call"):
# get function name and arguments
function_name = message["function_call"]["name"]
arguments = json.loads(message["function_call"]["arguments"])
# call function dangerously
function_response = globals()[function_name](**arguments)
# send function result to chatgpt
messages = send_message(
{
"role": "function",
"name": function_name,
"content": function_response,
},
messages,
)
else:
# if chatgpt doesn't respond with a function call, ask user for input
print("ChatGPT: " + message["content"])
user_message = input("You: ")
# send user message to chatgpt
messages = send_message(
{
"role": "user",
"content": user_message,
},
messages,
)
# save last response for the while loop
message = messages[-1]
# ASK FOR PROMPT
print(
"I'm just a chatbot, but I can also organise your events for you!"
)
prompt = input("You: ")
# RUN CONVERSATION
run_conversation(prompt) | [
"You: "
] |
2024-01-10 | Gerqus/AI-chat-with-tools | src~send_query_to_open_ai.py | from dataclasses import dataclass
from datetime import datetime
import traceback
from typing import List
import openai
import torch
from src.constants import OpenAIRoles
from transformers import AutoTokenizer, pipeline, BitsAndBytesConfig, AutoModelForCausalLM
from src.constants import openai_system_message
from yaspin import yaspin
quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
device_map = {
"": "cpu",
}
# print("Loading tokenizer for chat bot...")
# tokenizer = AutoTokenizer.from_pretrained("gpt2")
# would "decapoda-research/llama-30b-hf" work?
print("Loading tokenizer for chat bot...")
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="EleutherAI/gpt-neox-20b")
print("Loading chat bot...")
chatbot = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neox-20b",
load_in_8bit=True,
device_map=device_map,
quantization_config=quantization_config,
)
pipe = pipeline(
"text-generation",
model=chatbot,
tokenizer=tokenizer,
max_length=2048,
temperature=0.6,
top_p=0.95,
repetition_penalty=1.2,
pad_token_id=tokenizer.eos_token_id,
)
# local_llm = HuggingFacePipeline(pipeline=pipe) # lang chain
system_message = {
"role": "system",
"content": openai_system_message,
}
TIMEOUT_SECS = 60
MESSAGES_COUNT_LIMIT = 4000
@dataclass
class MessageRepresentation:
content: str
role: OpenAIRoles
tokens_count: int
def __init__(self, content: str, role: OpenAIRoles, tokens_count: int = 0):
self.content = content
self.role = role
if tokens_count == 0:
self.tokens_count = count_message_tokens(content)
self.tokens_count = tokens_count
def to_msg(self):
return {
"role": self.role.name,
"content": self.content
}
def to_text_completion_msg(self):
return self.role.name + ": " + self.content
def count_message_tokens(message: str) -> int:
# input_ids = torch.tensor(tokenizer.encode(message)).unsqueeze(0)
# num_tokens = input_ids.shape[1]
# return num_tokens
return 0
total_history_tokens_count = count_message_tokens(system_message["content"])
def add_message_to_history(message: str, role: OpenAIRoles, messages_history: List[MessageRepresentation]):
global total_history_tokens_count
message_tokens_count = count_message_tokens(message)
messages_history.append(MessageRepresentation(
content = message,
role = role,
tokens_count = message_tokens_count
))
total_history_tokens_count += message_tokens_count
def format_messages_into_text_completion_request(messages: List[MessageRepresentation]) -> str:
text = '''You are AI assistant that is using
[retrieve], [store] and [delete] plugins to make better conversations with user and manage AI assistants own memory,
[google] plaugin to search internet,
[open] plaugin to read content summary from urls,
[time] plugin to read current user time.
If you dont not know the answer to a question, truthfully say you do not know.
Below is the record of our conversation:
{history}
assistant:'''
history = "\n".join([message.to_text_completion_msg() for message in messages[-10:]])
return text.format(history=history)
@yaspin(text="Processing...", color="white", spinner="dots", attrs={"bold": True})
def send_messages_history_to_open_ai(messages_history: List[MessageRepresentation], model) -> str:
global total_history_tokens_count
while True:
if total_history_tokens_count > MESSAGES_COUNT_LIMIT:
# pop both chatbot and user messages
total_history_tokens_count -= messages_history[0].tokens_count
messages_history.pop(0)
if len(messages_history) > 0:
total_history_tokens_count -= messages_history[0].tokens_count
messages_history.pop(0)
else:
break
if len(messages_history) == 0:
raise Exception("Error: Could not create chat completion. Messages history is empty.")
messages = [system_message]
for message_from_history in messages_history:
messages.append({
"role": message_from_history.role.name,
"content": message_from_history.content
})
try:
# completion = openai.ChatCompletion.create(
# model=model,
# max_tokens=500,
# temperature=0.7,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0.6,
# timeout=TIMEOUT_SECS,
# messages=messages
# )
# return completion.choices[0].message.content # type: ignore
generation = pipe(format_messages_into_text_completion_request(messages_history))
print("--- DEBUG:")
print(generation)
return "".join(str(filter(lambda x: len(x) > 1, generation[0]["generated_text"].split("\n"))[-1]).split("assistant:", maxsplit=2)[1]) # type: ignore
except Exception as e:
global retries_count
print(e)
print("Error: Could not create chat completion.")
traceback.print_exc()
return ""
| [] |
2024-01-10 | ai-cfia/ailab-db | bin~seed-identification-api-for-nachet-frontend.py | import os
import json
import logging
import sys
import ailab.db as db
import ailab.db.nachet as nachet
from ailab.models import openai
from ailab.db.nachet.seed_queries import seeds_urls
from ailab.db.nachet.seed_queries import get_seed_name
from ailab.db.nachet.seed_queries import get_webpage
from ailab.db.nachet.seed_queries import get_images
logging.basicConfig(
filename="mylog.log",
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
WEBSITE_URL = "https://inspection.canada.ca"
def create_seed_url_mapping(cursor, list_seed_url):
### Get a name from the seed URL
url_to_seed_mapping = {}
for rows in list_seed_url:
seed_full_url = WEBSITE_URL + rows["seeds_url"]
seed_name_query = get_seed_name(cursor, seed_full_url)
if seed_name_query:
seed_name = seed_name_query[0]["sd_nme"]
url_to_seed_mapping[seed_full_url] = seed_name
return url_to_seed_mapping
def transform_seed_data_into_json(
cursor,
url_to_seed_mapping,
system_prompt,
load_user_prompt,
json_template,
seed_data_path,
):
"""
Process seed data using Azure OpenAI endpoint and save results as JSON files.
Args:
system_prompt (str): A system prompt for the OpenAI conversation.
user_prompt (str): A user prompt for the OpenAI conversation.
json_template (json): A JSON template for the OpenAI request.
This function performs the following steps:
1. Iterates through a list of seed values.
2. Checks if a JSON file for each seed exists and skips if it does.
3. Constructs an SQL query to retrieve data related to the seed from a database.
4. Sends the query to the database and fetches the retrieved data.
5. Concatenates the cleaned content into a single 'page.'
6. Sends a request to the Azure OpenAI endpoint to get a response.
7. Processes the response, extracting the name and saving it as a JSON file.
"""
for url, seed_name in url_to_seed_mapping.items():
logging.info("Current seed: %s", seed_name)
seed_json_path = seed_name + ".json"
file_path = os.path.join(seed_data_path, seed_json_path)
if os.path.exists(file_path):
logging.info(
"JSON file %s exists in %s, skipping", seed_json_path, seed_data_path
)
else:
web_pages = get_webpage(cursor, url)
all_language_seed_page = ""
for row in web_pages:
web_text = row.get("cleaned_content")
all_language_seed_page += web_text
page = all_language_seed_page
md5hash = row.get("md5hash")
### Get the images corresponding to the current page
images_fetch = get_images(cursor, md5hash)
image_information = ""
for row in images_fetch:
image_links = row["photo_link"]
image_descriptions = row["photo_description"]
image_information += f"Image link: {image_links}"
image_information += f"\nImage description: {image_descriptions}\n\n"
logging.info("Sending request for summary to Azure OpenAI endpoint...\n")
user_prompt = (
load_user_prompt
+ "Return a JSON file that follows this template:\n\n"
+ json_template
+ "\n\nhere is the text to parse:\n"
+ page
+ "\n\nhere is the source url of the page:\n"
+ url
+ "\n\nAnd here is the images descriptions:\n"
+ image_information
)
response = openai.get_chat_answer(system_prompt, user_prompt, 2000)
data = json.loads(response.choices[0].message.content)
if isinstance(data, dict):
file_name = seed_name
file_name = file_name.encode("latin1").decode("unicode-escape")
file_name += ".json"
file_path = os.path.join(seed_data_path, file_name)
with open(file_path, "w") as json_file:
json.dump(data, json_file, ensure_ascii=False, indent=4)
logging.info("JSON data written to %s", file_path)
else:
logging.error(
"Error: not a dictionary, so it cannot be serialized to JSON."
)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " SEED_DATA_PATH PROMPT_PATH")
print("SEED_DATA_PATH: Directory for storing seeds")
print("PROMPT_PATH: Directory containing the API prompt")
sys.exit(1)
SEED_DATA_PATH = sys.argv[1]
PROMPT_PATH = sys.argv[2]
if not os.path.exists(SEED_DATA_PATH):
print(f"The directory '{SEED_DATA_PATH}' does not exist.")
sys.exit(1)
if not os.path.exists(PROMPT_PATH):
print(f"The directory '{PROMPT_PATH}' does not exist.")
sys.exit(1)
system_prompt = nachet.load_prompt(PROMPT_PATH, "system_prompt.txt")
load_user_prompt = nachet.load_prompt(PROMPT_PATH, "user_prompt.txt")
json_template = nachet.load_json_template(PROMPT_PATH)
nachet_db = db.connect_db()
with nachet_db.cursor() as cursor:
seed_urls = seeds_urls(cursor, 10)
url_to_seed_mapping = create_seed_url_mapping(cursor, seed_urls)
logging.info("%s", url_to_seed_mapping)
logging.info("\nList of selected seeds :")
for url, seed_name in url_to_seed_mapping.items():
logging.info("%s", seed_name)
transform_seed_data_into_json(
cursor,
url_to_seed_mapping,
system_prompt,
load_user_prompt,
json_template,
SEED_DATA_PATH,
)
| [
"user_prompt.txt",
"PLACEHOLDERReturn a JSON file that follows this template:\n\nPLACEHOLDER\n\nhere is the text to parse:\nPLACEHOLDER\n\nhere is the source url of the page:\nPLACEHOLDER\n\nAnd here is the images descriptions:\nPLACEHOLDER",
"system_prompt.txt"
] |
2024-01-10 | ai-cfia/ailab-db | tests~fetch_embedding.py |
import openai
import os
import sys
import dotenv
dotenv.load_dotenv()
openai.api_type = "azure"
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.api_base = f"https://{os.environ['AZURE_OPENAI_SERVICE']}.openai.azure.com"
openai.api_version = "2023-05-15"
def fetch_embedding(text):
"""Fetch embedding for a list of tokens from the Microsoft Azure OpenAI API"""
response = openai.Embedding.create(
input=text,
engine="ada"
)
embeddings = response['data'][0]['embedding']
return embeddings
if __name__ == '__main__':
text = " ".join(sys.argv[1:])
if len(text) == 0:
print('Please provide a text to embed')
raise SystemExit
print(fetch_embedding(text)) | [] |
2024-01-10 | OpenLemur/Lemur | xchat~eval~dispatch_openai_requests.py | # Copied and modified from https://gist.github.com/neubig/80de662fb3e225c18172ec218be4917a. Thanks to Graham Neubig for sharing the original code.
import asyncio
from typing import Any, Dict, List
import openai
async def dispatch_openai_chat_requesets(
messages_list: List[List[Dict[str, Any]]],
model: str,
**completion_kwargs: Any,
) -> List[str]:
"""Dispatches requests to OpenAI chat completion API asynchronously.
Args:
messages_list: List of messages to be sent to OpenAI chat completion API.
model: OpenAI model to use.
completion_kwargs: Keyword arguments to be passed to OpenAI ChatCompletion API. See https://platform.openai.com/docs/api-reference/chat for details.
Returns
-------
List of responses from OpenAI API.
"""
async_responses = [
openai.ChatCompletion.acreate(
model=model,
messages=x,
**completion_kwargs,
)
for x in messages_list
]
return await asyncio.gather(*async_responses)
async def dispatch_openai_prompt_requesets(
prompt_list: List[str],
model: str,
**completion_kwargs: Any,
) -> List[str]:
"""Dispatches requests to OpenAI text completion API asynchronously.
Args:
prompt_list: List of prompts to be sent to OpenAI text completion API.
model: OpenAI model to use.
completion_kwargs: Keyword arguments to be passed to OpenAI text completion API. See https://platform.openai.com/docs/api-reference/completions for details.
Returns
-------
List of responses from OpenAI API.
"""
async_responses = [
openai.Completion.acreate(
model=model,
prompt=x,
**completion_kwargs,
)
for x in prompt_list
]
return await asyncio.gather(*async_responses)
if __name__ == "__main__":
chat_completion_responses = asyncio.run(
dispatch_openai_chat_requesets(
messages_list=[
[{"role": "user", "content": "Write a poem about asynchronous execution."}],
[{"role": "user", "content": "Write a poem about asynchronous pirates."}],
],
model="gpt-3.5-turbo",
temperature=0.3,
max_tokens=200,
top_p=1.0,
)
)
for i, x in enumerate(chat_completion_responses):
print(f"Chat completion response {i}:\n{x['choices'][0]['message']['content']}\n\n")
prompt_completion_responses = asyncio.run(
dispatch_openai_prompt_requesets(
prompt_list=[
"Write a poem about asynchronous execution.\n",
"Write a poem about asynchronous pirates.\n",
],
model="text-davinci-003",
temperature=0.3,
max_tokens=200,
top_p=1.0,
)
)
for i, x in enumerate(prompt_completion_responses):
print(f"Prompt completion response {i}:\n{x['choices'][0]['text']}\n\n")
| [
"text-davinci-003",
"Write a poem about asynchronous execution.",
"Write a poem about asynchronous execution.\n",
"Write a poem about asynchronous pirates.",
"Write a poem about asynchronous pirates.\n"
] |
2024-01-10 | pedroteles17/bbg_fields_chat | biuld_index.py | #%%
import pandas as pd
import dotenv
import time
from tqdm import tqdm
import chromadb
from llama_index import Document, VectorStoreIndex, ServiceContext
from llama_index.text_splitter import SentenceSplitter
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.llms import OpenAI
dotenv.load_dotenv()
#%%
# Load Data
all_fields_data = pd.read_parquet('data/clean_fields_docs.parquet')
all_fields_data['documentation'] = all_fields_data['documentation']\
.apply(lambda x: x.replace('\n', ' '))
documents = []
for i, row in all_fields_data.iterrows():
document = Document(
text=f"{row['description']}: {row['documentation']}",
metadata={
"field_mnemonic": row['mnemonic'],
"field_description": row['description'],
"field_category": row['category_name'],
"source": row['source'],
},
excluded_embed_metadata_keys=["source"]
)
documents.append(document)
#%%
# Create storage_context (Persistent Storage, ChromaDB)
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("bloomberg_fields")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Create service_context (LLM, Embedding, Text Splitter)
text_splitter = SentenceSplitter(chunk_size=512, chunk_overlap=10)
llm = OpenAI(model="gpt-3.5-turbo-1106")
service_context = ServiceContext.from_defaults(
llm=llm, text_splitter=text_splitter,
)
#%%
# Create index
index = VectorStoreIndex(
[], service_context=service_context,
storage_context=storage_context, use_async=False
)
for document in tqdm(documents, total=len(documents)):
time.sleep(0.01)
index.insert(document)
| [] |
2024-01-10 | parthiv11/mindsdb | mindsdb~integrations~handlers~writer_handler~settings.py | from dataclasses import dataclass
from functools import lru_cache
from typing import List, Union
import pandas as pd
from chromadb import Settings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.docstore.document import Document
from langchain.document_loaders import DataFrameLoader
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS, Chroma, VectorStore
from pydantic import BaseModel, Extra, validator
DEFAULT_EMBEDDINGS_MODEL = "sentence-transformers/all-mpnet-base-v2"
USER_DEFINED_WRITER_LLM_PARAMS = (
"model_id",
"max_tokens",
"temperature",
"top_p",
"stop",
"best_of",
"verbose",
"writer_org_id",
"writer_api_key",
"base_url",
)
SUPPORTED_VECTOR_STORES = ("chroma", "faiss")
EVAL_COLUMN_NAMES = (
"question",
"answers",
"context",
)
SUPPORTED_EVALUATION_TYPES = ("retrieval", "e2e")
SUMMARIZATION_PROMPT_TEMPLATE = """
Summarize the following texts for me:
{context}
When summarizing, please keep the following in mind the following question:
{question}
"""
GENERATION_METRICS = ("rouge", "meteor", "cosine_similarity", "accuracy")
RETRIEVAL_METRICS = ("cosine_similarity", "accuracy")
def is_valid_store(name):
return name in SUPPORTED_VECTOR_STORES
class VectorStoreFactory:
@staticmethod
def get_vectorstore_class(name):
if not isinstance(name, str):
raise TypeError("name must be a string")
if not is_valid_store(name):
raise ValueError(f"Invalid vector store {name}")
if name == "faiss":
return FAISS
if name == "chroma":
return Chroma
def get_chroma_settings(persist_directory: str = "chromadb") -> Settings:
return Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=persist_directory,
anonymized_telemetry=False,
)
@dataclass
class PersistedVectorStoreSaverConfig:
vector_store_name: str
persist_directory: str
collection_name: str
vector_store: VectorStore
@dataclass
class PersistedVectorStoreLoaderConfig:
vector_store_name: str
embeddings_model: Embeddings
persist_directory: str
collection_name: str
class PersistedVectorStoreSaver:
def __init__(self, config: PersistedVectorStoreSaverConfig):
self.config = config
def save_vector_store(self, vector_store: VectorStore):
method_name = f"save_{self.config.vector_store_name}"
getattr(self, method_name)(vector_store)
def save_chroma(self, vector_store: Chroma):
vector_store.persist()
def save_faiss(self, vector_store: FAISS):
vector_store.save_local(
folder_path=self.config.persist_directory,
index_name=self.config.collection_name,
)
class PersistedVectorStoreLoader:
def __init__(self, config: PersistedVectorStoreLoaderConfig):
self.config = config
def load_vector_store_client(
self,
vector_store: str,
):
"""Load vector store client from the persisted vector store"""
if vector_store == "chroma":
return Chroma(
collection_name=self.config.collection_name,
embedding_function=self.config.embeddings_model,
client_settings=get_chroma_settings(
persist_directory=self.config.persist_directory
),
)
elif vector_store == "faiss":
return FAISS.load_local(
folder_path=self.config.persist_directory,
embeddings=self.config.embeddings_model,
index_name=self.config.collection_name,
)
else:
raise NotImplementedError(f"{vector_store} client is not yet supported")
def load_vector_store(self):
method_name = f"load_{self.config.vector_store_name}"
return getattr(self, method_name)()
def load_chroma(self) -> Chroma:
return self.load_vector_store_client(vector_store="chroma")
def load_faiss(self) -> FAISS:
return self.load_vector_store_client(vector_store="faiss")
class WriterLLMParameters(BaseModel):
"""Model parameters for the Writer LLM API interface"""
writer_api_key: str
writer_org_id: str = None
base_url: str = None
model_id: str = "palmyra-x"
callbacks: List[StreamingStdOutCallbackHandler] = [StreamingStdOutCallbackHandler()]
max_tokens: int = 1024
temperature: float = 0.0
top_p: float = 1
stop: List[str] = []
best_of: int = 5
verbose: bool = False
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
class MissingPromptTemplate(Exception):
pass
class UnsupportedVectorStore(Exception):
pass
class MissingUseIndex(Exception):
pass
# todo make a separate class for evaluation parameters
# todo use enum clases instead of iterable to control the values
class WriterHandlerParameters(BaseModel):
"""Model parameters for create model"""
prompt_template: str
llm_params: WriterLLMParameters
chunk_size: int = 500
chunk_overlap: int = 50
generation_evaluation_metrics: List[str] = list(GENERATION_METRICS)
retrieval_evaluation_metrics: List[str] = list(RETRIEVAL_METRICS)
evaluation_type: str = "e2e"
n_rows_evaluation: int = None # if None, evaluate on all rows
retriever_match_threshold: float = 0.7
generator_match_threshold: float = 0.8
evaluate_dataset: Union[List[dict], str] = None
run_embeddings: bool = True
external_index_name: str = None
top_k: int = 4
embeddings_model_name: str = DEFAULT_EMBEDDINGS_MODEL
context_columns: Union[List[str], str] = None
vector_store_name: str = "chroma"
vector_store: VectorStore = None
collection_name: str = "langchain"
summarize_context: bool = False
summarization_prompt_template: str = SUMMARIZATION_PROMPT_TEMPLATE
vector_store_folder_name: str = "chromadb"
vector_store_storage_path: str = None
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
use_enum_values = True
@validator("generation_evaluation_metrics")
def generation_evaluation_metrics_must_be_supported(cls, v):
for metric in v:
if metric not in GENERATION_METRICS:
raise ValueError(
f"generation_evaluation_metrics must be one of {', '.join(str(v) for v in GENERATION_METRICS)}, got {metric}"
)
return v
@validator("retrieval_evaluation_metrics")
def retrieval_evaluation_metrics_must_be_supported(cls, v):
for metric in v:
if metric not in GENERATION_METRICS:
raise ValueError(
f"retrieval_evaluation_metrics must be one of {', '.join(str(v) for v in RETRIEVAL_METRICS)}, got {metric}"
)
return v
@validator("evaluation_type")
def evaluation_type_must_be_supported(cls, v):
if v not in SUPPORTED_EVALUATION_TYPES:
raise ValueError(
f"evaluation_type must be one of `retrieval` or `e2e`, got {v}"
)
return v
@validator("vector_store_name")
def name_must_be_lower(cls, v):
return v.lower()
@validator("prompt_template")
def prompt_template_must_be_provided(cls, v):
if not v:
raise MissingPromptTemplate(
"Please provide a `prompt_template` for this engine."
)
return v
@validator("vector_store_name")
def vector_store_must_be_supported(cls, v):
if not is_valid_store(v):
raise UnsupportedVectorStore(
f"currently we only support {', '.join(str(v) for v in SUPPORTED_VECTOR_STORES)} vector store"
)
return v
class DfLoader(DataFrameLoader):
"""
override the load method of langchain.document_loaders.DataFrameLoaders to ignore rows with 'None' values
"""
def __init__(self, data_frame: pd.DataFrame, page_content_column: str):
super().__init__(data_frame=data_frame, page_content_column=page_content_column)
self._data_frame = data_frame
self._page_content_column = page_content_column
def load(self) -> List[Document]:
"""Loads the dataframe as a list of documents"""
documents = []
for n_row, frame in self._data_frame[self._page_content_column].items():
if pd.notnull(frame):
# ignore rows with None values
column_name = self._page_content_column
document_contents = frame
documents.append(
Document(
page_content=document_contents,
metadata={
"source": "dataframe",
"row": n_row,
"column": column_name,
},
)
)
return documents
def df_to_documents(
df: pd.DataFrame, page_content_columns: Union[List[str], str]
) -> List[Document]:
"""Converts a given dataframe to a list of documents"""
documents = []
if isinstance(page_content_columns, str):
page_content_columns = [page_content_columns]
for _, page_content_column in enumerate(page_content_columns):
if page_content_column not in df.columns.tolist():
raise ValueError(
f"page_content_column {page_content_column} not in dataframe columns"
)
loader = DfLoader(data_frame=df, page_content_column=page_content_column)
documents.extend(loader.load())
return documents
# todo hard coding device to cpu, add support for gpu later on
# e.g. {"device": "gpu" if torch.cuda.is_available() else "cpu"}
@lru_cache()
def load_embeddings_model(embeddings_model_name):
try:
model_kwargs = {"device": "cpu"}
embedding_model = HuggingFaceEmbeddings(
model_name=embeddings_model_name, model_kwargs=model_kwargs
)
except ValueError:
raise ValueError(
f"The {embeddings_model_name} is not supported, please select a valid option from Hugging Face Hub!"
)
return embedding_model
| [
"Please provide a `prompt_template` for this engine.",
"\nSummarize the following texts for me:\n{context}\n\nWhen summarizing, please keep the following in mind the following question:\n{question}\n"
] |
2024-01-10 | Pawan300/transformers | src~transformers~configuration_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT configuration """
import logging
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"
}
class OpenAIGPTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a :class:`~transformers.OpenAIGPTModel`.
It is used to instantiate an GPT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the `GPT <https://huggingface.co/openai-gpt>`__ architecture from OpenAI.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 40478):
Vocabulary size of the GPT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.CTRLModel`.
n_positions (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
n_ctx (:obj:`int`, optional, defaults to 512):
Dimensionality of the causal mask (usually same as n_positions).
n_embd (:obj:`int`, optional, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
afn (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
resid_pdrop (:obj:`float`, optional, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (:obj:`int`, optional, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (:obj:`float`, optional, defaults to 1e-5):
The epsilon to use in the layer normalization layers
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
predict_special_tokens (:obj:`boolean`, optional, defaults to :obj:`True`):
Whether special tokens should be predicted when the model is has a language modeling head.
summary_type (:obj:`string`, optional, defaults to "cls_index"):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Is one of the following options:
- 'last' => take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Add a projection after the vector extraction
summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.OpenAIGPTDoubleHeadsModel`.
'tanh' => add a tanh activation to the output, Other => no activation.
summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.OpenAIGPTDoubleHeadsModel`.
If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_first_dropout (:obj:`float`, optional, defaults to 0.1):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.OpenAIGPTDoubleHeadsModel`.
Add a dropout before the projection and activation
Example::
from transformers import OpenAIGPTConfig, OpenAIGPTModel
# Initializing a GPT configuration
configuration = OpenAIGPTConfig()
# Initializing a model from the configuration
model = OpenAIGPTModel(configuration)
# Accessing the model configuration
configuration = model.config
"""
model_type = "openai-gpt"
def __init__(
self,
vocab_size=40478,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| [] |
2024-01-10 | rhakbari/ms-thesis-gpt-3 | model_testing.py | import openai
ft_discriminator = "davinci:ft-personal-2022-11-22-20-19-27"
ft_qa = "davinci:ft-personal-2022-11-22-21-04-53"
def apply_ft_discriminator(context, question, discriminator_model):
"""
Apply the fine tuned discriminator to a question, to assess whether it can be answered from the context.
"""
prompt = f"{context}\nQuestion: {question}\n Related:"
result = openai.Completion.create(model=discriminator_model, prompt=prompt, max_tokens=1, temperature=0, top_p=1, n=1, logprobs=2)
return result['choices'][0]['logprobs']['top_logprobs']
result = apply_ft_discriminator('Construction is the process of constructing a building or infrastructure. Construction differs from manufacturing in that manufacturing typically involves mass production of similar items without a designated purchaser, while construction typically takes place on location for a known client. Construction as an industry comprises six to nine percent of the gross domestic product of developed countries. Construction starts with planning,[citation needed] design, and financing and continues until the project is built and ready for use.',
'What typically involves mass production of similar items without a designated purchaser?', ft_discriminator)
print(result)
def apply_ft_qa_answer(context, question, answering_model):
"""
Apply the fine tuned discriminator to a question
"""
prompt = f"{context}\nQuestion: {question}\nAnswer:"
result = openai.Completion.create(model=answering_model, prompt=prompt, max_tokens=30, temperature=0, top_p=1, n=1, stop=['.','\n'])
return result['choices'][0]['text']
answer = apply_ft_qa_answer('The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet\'s remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.',
'Which name is also used to describe the Amazon rainforest in English?', ft_qa)
print(answer) | [
"PLACEHOLDER\nQuestion: PLACEHOLDER\n Related:",
"PLACEHOLDER\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | gustmd0121/DALLE_ECG | dalle_pytorch~dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
torch.manual_seed(0)
torch.set_num_threads(16)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim=1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim=1) / mask.sum(dim=1)[..., None]
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres=0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, 3, padding=1),
nn.ReLU(),
nn.Conv1d(chan, chan, 3, padding=1),
nn.ReLU(),
nn.Conv1d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size=312,
num_tokens=1024,
codebook_dim=512,
num_layers=4,
num_resnet_blocks=0,
hidden_dim=64,
channels=12,
smooth_l1_loss=False,
temperature=0.9,
straight_through=False,
kl_div_loss_weight=0.,
normalization=((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
count = 0
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv1d(enc_in, enc_out, 4, stride=2, padding=1), nn.ReLU()))
if count == 0:
dec_layers.append(
nn.Sequential(nn.ConvTranspose1d(dec_in, dec_out, 4, stride=2, padding=1, output_padding=1),
nn.ReLU()))
count += 1
continue
else:
dec_layers.append(nn.Sequential(nn.ConvTranspose1d(dec_in, dec_out, 4, stride=2, padding=1), nn.ReLU()))
count += 1
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv1d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv1d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv1d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits=True)
codebook_indices = logits.argmax(dim=1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b t d -> b d t')
images = self.decoder(image_embeds)
return images
def forward(
self,
ecg,
return_loss=False,
return_recons=False,
return_logits=False,
temp=None
):
device, num_tokens, image_size, kl_div_loss_weight = ecg.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
# assert ecg.shape[-1] == image_size and ecg.shape[-2] == image_size, f'input must have the correct image size {image_size}'
# ecg = self.norm(ecg) #8, 3, 128, 128
logits = self.encoder(ecg) # 256, 512, 312
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=self.straight_through)
sampled = einsum('b n t, n d -> b d t', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(ecg, out)
# kl divergence
logits = rearrange(logits, 'b n t -> b t n') # [8, 256, 8192] [256, 312, 512]
log_qy = F.log_softmax(logits, dim=-1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device=device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target=True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text=512,
dim_image=512,
dim_latent=512,
num_text_tokens=10000,
text_enc_depth=6,
text_seq_len=257,
text_heads=8,
num_visual_tokens=512,
visual_enc_depth=6,
visual_heads=8,
visual_image_size=256,
visual_patch_size=32,
channels=3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth,
heads=text_heads, rotary_emb=False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal=False, seq_len=num_patches, dim=dim_image, depth=visual_enc_depth,
heads=visual_heads, rotary_emb=False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias=False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask=None,
return_loss=False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device=device))
enc_text = self.text_transformer(text_emb, mask=text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim=1)
else:
text_latents = enc_text.mean(dim=1)
image_latents = enc_image.mean(dim=1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device=device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens=10000,
text_seq_len=256,
depth,
heads=8,
dim_head=64,
reversible=False,
attn_dropout=0.,
ff_dropout=0,
sparse_attn=False,
attn_types=None,
loss_img_weight=7,
stable=False,
sandwich_norm=False,
shift_tokens=True,
rotary_emb=True
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = 512
image_fmap_size = 312
image_seq_len = 312 # 8192/32 = 256
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = nn.Embedding(image_fmap_size, dim) if not rotary_emb else always(
0) # [12, 5000]
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim=dim,
causal=True,
seq_len=seq_len,
depth=depth,
heads=heads,
dim_head=dim_head,
reversible=reversible,
attn_dropout=attn_dropout,
ff_dropout=ff_dropout,
attn_types=attn_types,
image_fmap_size=image_fmap_size,
sparse_attn=sparse_attn,
stable=stable,
sandwich_norm=sandwich_norm,
shift_tokens=shift_tokens,
rotary_emb=rotary_emb
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim=-1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text=None,
*,
filter_thres=0.5,
temperature=1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device=device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
text_tokens = torch.cat((text_tokens, sample), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip=None,
mask=None,
filter_thres=0.5,
temperature=1.,
img=None,
num_init_img_tokens=None
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[
3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens,
int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim=-1)
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image, mask=mask)[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
sample -= (
num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample), dim=-1)
if out.shape[1] <= text_seq_len:
mask = F.pad(mask, (0, 1), value=True)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss=False)
return images, scores
return images
def forward(
self,
text,
image=None,
mask=None,
return_loss=False
):
assert text.shape[
-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
device, total_seq_len = text.device, self.total_seq_len
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device=device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value=0)
# text.shape = [64, 257]
tokens = self.text_emb(text) # [64, 257, 512]
tokens += self.text_pos_emb(torch.arange(text.shape[1], device=device)) # [257, 512]
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 3
if is_raw_image:
image_size = self.vae.image_size
# assert tuple(image.shape[1:]) == (
# 3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = image.to(torch.float)
image = self.vae.get_codebook_indices(image)
# image = F.pad(image, (1,0), value=0)
image_len = image.shape[1]
image_emb = self.image_emb(image) # [64, 313, 512]
image_emb += self.image_pos_emb(torch.arange(image.shape[1], device=device)) #[4, 256, 512]
tokens = torch.cat((tokens, image_emb), dim = 1) #[4, 513, 512]
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
out = self.transformer(tokens)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim=1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len]) #[64, 314]
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| [] |
2024-01-10 | gustmd0121/DALLE_ECG | train_dalle2.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
import ecg_plot
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
torch.manual_seed(0)
torch.set_num_threads(16)
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default=None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default=None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument(
'--wds',
type=str,
default='',
help='Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.'
)
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default="dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest='flops_profiler', action='store_true',
help='Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default=1000, type=int, help='Number of epochs')
train_group.add_argument('--save_every_n_steps', default=1000, type=int, help='Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default=None, type=int,
help='(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default=64, type=int, help='Batch size')
train_group.add_argument('--ga_steps', default=1, type=int,
help='Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default=3e-4, type=float, help='Learning rate')
train_group.add_argument('--clip_grad_norm', default=0.5, type=float, help='Clip gradient norm')
train_group.add_argument('--lr_decay', dest='lr_decay', action='store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default=512, type=int, help='Model dimension')
model_group.add_argument('--text_seq_len', default=256, type=int, help='Text sequence length')
model_group.add_argument('--depth', default=2, type=int, help='Model depth')
model_group.add_argument('--heads', default=8, type=int, help='Model number of heads')
model_group.add_argument('--dim_head', default=64, type=int, help='Model head dimension')
train_group.add_argument('--ff_dropout', default=0.0, type=float, help='Feed forward dropout.')
train_group.add_argument('--attn_dropout', default=0.0, type=float, help='Feed forward dropout.')
model_group.add_argument('--reversible', dest='reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default=7, type=int, help='Image loss weight')
model_group.add_argument('--attn_types', default='full', type=str,
help='comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--shift_tokens', help='Use the shift tokens feature', action='store_true')
model_group.add_argument('--rotary_emb', help='Use rotary embeddings', action='store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
SHIFT_TOKENS = args.shift_tokens
ROTARY_EMB = args.rotary_emb
ATTN_TYPES = tuple(args.attn_types.split(','))
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(
args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(
args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
**dalle_params
)
IMAGE_SIZE = vae.image_size
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if distr_backend.is_root_worker():
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
shift_tokens=SHIFT_TOKENS,
rotary_emb=ROTARY_EMB,
)
resume_epoch = 0
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(
1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
def filter_dataset(item): # For e.g. C@H which (rarely) has no caption available.
if mycap not in item:
return False
if myimg not in item:
return False
return True
w_dataset = wds.WebDataset(DATASET, handler=wds.warn_and_continue)
filtered_dataset = w_dataset.select(filter_dataset)
ds = filtered_dataset.map_dict(**image_text_mapping).map_dict(**image_mapping).to_tuple(mycap, myimg).batched(
BATCH_SIZE, partial=True)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if distr_backend.is_root_worker():
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
else:
data_sampler = None
if ENABLE_WEBDATASET:
# WebLoader for WebDataset and DeepSpeed compatibility
dl = wds.WebLoader(ds, batch_size=None, shuffle=False) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.repeat(2).slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
else:
scheduler = None
if distr_backend.is_root_worker():
# experiment tracker
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print(
"See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print(
"If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=(
(None if ENABLE_WEBDATASET else ds)
if using_deepspeed
else dl
),
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and distr_backend.is_root_worker():
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not distr_backend.is_root_worker():
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage',
0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
}
save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)
torch.save(save_obj, path)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and distr_backend.is_root_worker():
t = time.time()
if args.fp16:
images = images.half()
images = images.type(torch.LongTensor)
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and distr_backend.is_root_worker():
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 1000 == 0:
if distr_backend.is_root_worker():
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
ecg_plot.plot(image.squeeze(0).detach().cpu().numpy(), sample_rate=500, title="Generated ECG")
ecg_plot.save_as_png("Generated_ECG")
log = {
**log,
}
if not avoid_model_calls:
log['generated_image'] = wandb.log(
{"Generated ECG": wandb.Image("Generated_ECG.png", caption=decoded_text)})
if i % 10 == 9 and distr_backend.is_root_worker():
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if distr_backend.is_root_worker():
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
wandb.save(DALLE_OUTPUT_FILE_NAME)
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
wandb.finish()
| [] |
2024-01-10 | gustmd0121/DALLE_ECG | dalle_pytorch~dalle_pytorch_MRF.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
import os
from dalle_pytorch.residual import ResBlock1
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim=1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim=1) / mask.sum(dim=1)[..., None]
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres=0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, 3, padding=1),
nn.ReLU(),
nn.Conv1d(chan, chan, 3, padding=1),
nn.ReLU(),
nn.Conv1d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size=5000,
num_tokens=512,
codebook_dim=512,
num_layers=4,
num_resnet_blocks=0,
hidden_dim=64,
channels=12,
smooth_l1_loss=False,
temperature=0.9,
straight_through=False,
kl_div_loss_weight=0.,
normalization=((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
count = 0
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv1d(enc_in, enc_out, 4, stride=2, padding=1), nn.ReLU()))
if count == 0:
dec_layers.append(
nn.Sequential(nn.ConvTranspose1d(dec_in, dec_out, 4, stride=2, padding=1, output_padding=1),
nn.ReLU()))
count += 1
continue
else:
dec_layers.append(nn.Sequential(nn.ConvTranspose1d(dec_in, dec_out, 4, stride=2, padding=1), nn.ReLU()))
count += 1
resblock = ResBlock1
self.resblocks = nn.Sequential()
for i in range(4):
for j, (k, d) in enumerate(zip([3, 7, 11], [[1,3,5], [1,3,5], [1,3,5]])):
enc_layers.append(resblock(512, k, d))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
# enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv1d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv1d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv1d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits=True)
codebook_indices = logits.argmax(dim=1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b t d -> b d t')
images = self.decoder(image_embeds)
return images
def forward(
self,
ecg,
return_loss=False,
return_recons=False,
return_logits=False,
temp=None
):
device, num_tokens, image_size, kl_div_loss_weight = ecg.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
# assert ecg.shape[-1] == image_size and ecg.shape[-2] == image_size, f'input must have the correct image size {image_size}'
# ecg = self.norm(ecg) #8, 3, 128, 128
logits = self.encoder(ecg) # 256, 512, 312
for i in range(4):
logits = F.leaky_relu(logits, 0.1)
xs = None
for j in range(3):
if xs is None:
xs = self.resblocks[i * 3 + j](logits)
else:
xs += self.resblocks[i * 3 + j](logits)
logits = xs / 3
# print(logits)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=self.straight_through)
sampled = einsum('b n t, n d -> b d t', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(ecg, out)
# kl divergence
logits = rearrange(logits, 'b n t -> b t n') # [8, 256, 8192] [256, 312, 512]
log_qy = F.log_softmax(logits, dim=-1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device=device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target=True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text=512,
dim_image=512,
dim_latent=512,
num_text_tokens=10000,
text_enc_depth=6,
text_seq_len=256,
text_heads=8,
num_visual_tokens=512,
visual_enc_depth=6,
visual_heads=8,
visual_image_size=256,
visual_patch_size=32,
channels=3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth,
heads=text_heads, rotary_emb=False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal=False, seq_len=num_patches, dim=dim_image, depth=visual_enc_depth,
heads=visual_heads, rotary_emb=False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias=False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask=None,
return_loss=False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device=device))
enc_text = self.text_transformer(text_emb, mask=text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim=1)
else:
text_latents = enc_text.mean(dim=1)
image_latents = enc_image.mean(dim=1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device=device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens=10000,
text_seq_len=256,
depth,
heads=8,
dim_head=64,
reversible=False,
attn_dropout=0.,
ff_dropout=0,
sparse_attn=False,
attn_types=None,
loss_img_weight=7,
stable=False,
sandwich_norm=False,
shift_tokens=True,
rotary_emb=True
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape=(
image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim=dim,
causal=True,
seq_len=seq_len,
depth=depth,
heads=heads,
dim_head=dim_head,
reversible=reversible,
attn_dropout=attn_dropout,
ff_dropout=ff_dropout,
attn_types=attn_types,
image_fmap_size=image_fmap_size,
sparse_attn=sparse_attn,
stable=stable,
sandwich_norm=sandwich_norm,
shift_tokens=shift_tokens,
rotary_emb=rotary_emb
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim=-1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text=None,
*,
filter_thres=0.5,
temperature=1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device=device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
text_tokens = torch.cat((text_tokens, sample), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip=None,
mask=None,
filter_thres=0.5,
temperature=1.,
img=None,
num_init_img_tokens=None
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[
3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens,
int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim=-1)
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image, mask=mask)[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
sample -= (
num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample), dim=-1)
if out.shape[1] <= text_seq_len:
mask = F.pad(mask, (0, 1), value=True)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss=False)
return images, scores
return images
def forward(
self,
text,
image=None,
mask=None,
return_loss=False
):
assert text.shape[
-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
device, total_seq_len = text.device, self.total_seq_len
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device=device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value=0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device=device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
assert tuple(image.shape[1:]) == (
3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim=1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
out = self.transformer(tokens)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim=1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| [] |
2024-01-10 | pbcpraveen/LLM-Saliency | factual_recall_qa~icl_query.py | import itertools
import sys
import os
from pathlib import Path
import pickle
import pandas as pd
import json
import openai
import pickle
import dotenv
import random
import time
path = Path(os.getcwd())
sys.path.append(str(path.parent.absolute()))
import propmts
from common_utils import *
from constants import *
from utils import *
load_dotenv('../api_key.env')
openai.api_key = os.getenv("OPENAI_API_KEY")
setup_directories()
logger = get_logger(F"{MOVIE}.log", depth="INFO")
with open("../knowledge_validation/dataset/nobel_laureates_attribute_verified.pickle", "rb") as file:
data = pickle.load(file)
target_attribute = Attribute.WORK_CITY.value
concept_class = None
meta = metadata[NOBEL_LAUREATES]
for i in meta[TARGET_ATTRIBUTES].keys():
if target_attribute in meta[TARGET_ATTRIBUTES][i]:
concept_class = i
entities = []
icl_examples = {}
for concept_class in meta[TARGET_ATTRIBUTES].keys():
icl_examples[concept_class] = []
for target_attribute in meta[TARGET_ATTRIBUTES][concept_class]:
entities = []
idx = 0
for entry in data:
if entry[ATTRIBUTE_VERIFIED][target_attribute]:
e = {}
e[INDEX_COLUMN] = idx
for attr in meta[CONTEXTUALISING_ATTRIBUTES]:
e[attr] = entry[CONTEXTUALISING_ATTRIBUTES][attr]
e[target_attribute] = entry[TARGET_ATTRIBUTES][concept_class][target_attribute]
entities.append(e)
idx += 1
icl_attr = get_icl_examples(entities, meta, target_attribute, count=min(500, len(entities)))
icl_attr[TARGET_ATTRIBUTES] = target_attribute
icl_examples[concept_class].append(icl_attr)
for concept_class in icl_examples.keys():
file = open(f"dataset/icl_query_{concept_class}_{meta[ENTITY]}.pickle", "wb")
data_concept = pd.concat(icl_examples[concept_class])
prompts = data_concept[ICL_PROMPT_COLUMN].to_list()
if concept_class == ConceptClass.YEAR.value:
ground_truth = list(map(extract_year, data_concept[GROUND_TRUTH].to_list()))
else:
ground_truth = data_concept[GROUND_TRUTH].to_list()
target_attr = data_concept[TARGET_ATTRIBUTES].to_list()
responses = create_and_run_api_request_threads(prompts, 5, logger)
responses = list(itertools.chain(*responses))
df = pd.DataFrame()
df[ICL_PROMPT_COLUMN] = prompts
df[GROUND_TRUTH] = ground_truth
df[TARGET_ATTRIBUTES] = target_attr
df[GPT_4_RESPONSE] = responses
pickle.dump(df, file)
file.close()
logger.info(f"Finished writing {concept_class} ICL queries to file")
| [] |
2024-01-10 | pbcpraveen/LLM-Saliency | constants.py | from enum import Enum
import openai
from dotenv import load_dotenv
import os
load_dotenv('api_key.env')
openai.api_key = os.getenv("OPENAI_API_KEY")
PROMPT_COLUMN = 'prompt'
GPT_4_RESPONSE = 'GPT 4 Response'
INDEX_COLUMN = "index"
PROMPT_INDEX_COLUMN = "prompt_index"
GROUND_TRUTH = "ground_truth"
ICL_PROMPT_COLUMN = "icl_prompt"
ENTITY = "entity"
CONTEXTUALISING_ATTRIBUTES = "contextualising_attributes"
TARGET_ATTRIBUTES = "target_attributes"
VERIFIED = "verified"
# Sources
WIKIBIO = "wiki_bio"
NOBEL_LAUREATES_DATASET = ("https://public.opendatasoft.com/api/explore/v2.1/catalog/datasets/nobel-prize-laureates/exports/"
"json?lang=en&timezone=America%2FLos_Angeles")
NOBEL_LAUREATES = "nobel_laureates"
MOVIE_DATASET = ("harshitshankhdhar/imdb-dataset-of-top-1000-movies-and-tv-shows")
MOVIE = "movie"
COUNTRY_DATASET = ("https://raw.githubusercontent.com/bastianherre/global-leader-ideologies/"
"main/global_leader_ideologies.csv")
ATTRIBUTE_VERIFIED = "attribute_verified"
VERIFIED_RECORDS = {
WIKIBIO: "wikibio_verified.pickle",
NOBEL_LAUREATES: "nobel_laureates_verified.pickle",
MOVIE: "movies_verified.pickle"
}
class ConceptClass(Enum):
PLACE = "place"
PERSON_NAME = "name"
YEAR = "year"
class EntityClass(Enum):
PERSON = "person"
NOBEL_LAUREATES = "nobel_laureates"
MOVIE = "movie"
class Attribute(Enum):
NAME = "name"
NATIONALITY = "nationality"
OCCUPATION = "occupation"
BIRTH_DATE = "birth_date"
DEATH_DATE = "death_date"
BIRTH_PLACE = "birth_place"
DEATH_PLACE = "death_place"
MOTIVATION_NOBEL = "motivation"
CATEGORY_NOBEL = "category"
BIRTH_DATE_NOBEL = "born"
DEATH_DATE_NOBEL = "died"
YEAR = "year"
BIRTH_CITY = "borncity"
DEATH_CITY = "diedcity"
WORK_CITY = "city"
FIRST_NAME = "firstname"
SURNAME = "surname"
MOVIE_TITLE = "Series_Title"
MOVIE_DESCRIPTION = "Overview"
RELEASE_YEAR_MOVIE = "Released_Year"
CERTIFICATE_MOVIE = "Certificate"
GENRE_MOVIE = "Genre"
IMDB_RATING_MOVIE = "IMDB_Rating"
VOTES_COUNT_MOVIE = "No_of_Votes"
DIRECTOR_MOVIE = "Director"
STAR1_MOVIE = "Star1"
STAR2_MOVIE = "Star2"
STAR3_MOVIE = "Star3"
STAR4_MOVIE = "Star4"
COUNTRY_NAME = "country_name"
LEADER_NAME = "leader"
LEADER_POSITION = "leader_position"
metadata = {
WIKIBIO: {
ENTITY: EntityClass.PERSON.value,
CONTEXTUALISING_ATTRIBUTES: [
Attribute.NAME.value,
Attribute.NATIONALITY.value,
Attribute.OCCUPATION.value
],
TARGET_ATTRIBUTES: {
ConceptClass.YEAR.value: [Attribute.BIRTH_DATE.value, Attribute.DEATH_DATE.value],
ConceptClass.PLACE.value: [Attribute.BIRTH_PLACE.value, Attribute.DEATH_PLACE.value]
}
},
NOBEL_LAUREATES: {
ENTITY: EntityClass.NOBEL_LAUREATES.value,
CONTEXTUALISING_ATTRIBUTES: [
Attribute.FIRST_NAME.value,
Attribute.SURNAME.value,
Attribute.MOTIVATION_NOBEL.value,
Attribute.CATEGORY_NOBEL.value
],
TARGET_ATTRIBUTES: {
ConceptClass.YEAR.value: [
Attribute.BIRTH_DATE_NOBEL.value,
Attribute.DEATH_DATE_NOBEL.value,
Attribute.YEAR.value
],
ConceptClass.PLACE.value: [
Attribute.BIRTH_CITY.value,
Attribute.DEATH_CITY.value,
Attribute.WORK_CITY.value
]
}
},
MOVIE: {
ENTITY: EntityClass.MOVIE.value,
CONTEXTUALISING_ATTRIBUTES: [
Attribute.MOVIE_TITLE.value,
Attribute.RELEASE_YEAR_MOVIE.value,
Attribute.GENRE_MOVIE.value,
Attribute.CERTIFICATE_MOVIE.value
],
TARGET_ATTRIBUTES: {
ConceptClass.PERSON_NAME.value: [Attribute.DIRECTOR_MOVIE.value,
Attribute.STAR1_MOVIE.value,
Attribute.STAR2_MOVIE.value,
Attribute.STAR3_MOVIE.value,
Attribute.STAR4_MOVIE.value]
}
}
}
| [
"prompt_index",
"icl_prompt",
"prompt"
] |
2024-01-10 | pbcpraveen/LLM-Saliency | knowledge_validation~noble_laureates.py | import itertools
import sys
import os
from pathlib import Path
import pickle
import pandas as pd
import json
import openai
import pickle
import dotenv
path = Path(os.getcwd())
sys.path.append(str(path.parent.absolute()))
import propmts
from common_utils import *
from constants import *
from utils import *
def get_prompt(row):
return propmts.NOBLE_LAUREATES_VALIDATION_QUERY.format(row[Attribute.YEAR.value],
row[Attribute.CATEGORY_NOBEL.value])
def format_name(record):
first_name = record[CONTEXTUALISING_ATTRIBUTES][Attribute.FIRST_NAME.value]
last_name = record[CONTEXTUALISING_ATTRIBUTES][Attribute.SURNAME.value]
name = ''
if first_name is not None:
name += first_name
if last_name is not None:
name += (' ' + last_name)
return name
load_dotenv('../api_key.env')
openai.api_key = os.getenv("OPENAI_API_KEY")
setup_directories()
logger = get_logger('noble_laureates.log')
logger.info("Checking if LLM has been already queried")
if "noble_laureates_LLM_recall.csv" not in os.listdir('dataset/'):
logger.info("Querying LLM for knowledge validation")
data = pickle.load(open("../data/dataset/noble_prize.pickle", "rb"))
record = set()
for i in data:
record.add((i[CONTEXTUALISING_ATTRIBUTES][Attribute.CATEGORY_NOBEL.value],
i[TARGET_ATTRIBUTES][ConceptClass.YEAR.value][Attribute.YEAR.value]))
df = pd.DataFrame()
df[Attribute.CATEGORY_NOBEL.value] = [i[0] for i in record]
df[Attribute.YEAR.value] = [i[1] for i in record]
df[PROMPT_COLUMN] = df.apply(get_prompt, axis = 1)
prompts = df.prompt.to_list()
responses = create_and_run_api_request_threads(prompts, 5, logger)
responses = list(itertools.chain(*responses))
df[GPT_4_RESPONSE] = responses
logger.info('Saving responses to ./dataset/noble_laureates_LLM_recall.csv')
df.to_csv("dataset/noble_laureates_LLM_recall.csv")
logger.info("Reading LLM responses from ./dataset/noble_laureates_LLM_recall.csv")
df = pd.read_csv("dataset/noble_laureates_LLM_recall.csv")
lookup = dict()
data = df.T.to_dict().values()
logger.info("Creating a lookup dictionary with LLM response")
for i in data:
if i[Attribute.YEAR.value] not in lookup.keys():
lookup[i[Attribute.YEAR.value]] = dict()
lookup[i[Attribute.YEAR.value]][i[Attribute.CATEGORY_NOBEL.value]] = get_names(i[GPT_4_RESPONSE])
logger.info("Reading prepared dataset from ../data/dataset/nobel_prize.pickle")
data = pickle.load(open("../data/dataset/noble_prize.pickle", "rb"))
logger.info("Creating verification flag")
for i in range(len(data)):
record = data[i]
name = format_name(record)
category = record[CONTEXTUALISING_ATTRIBUTES][Attribute.CATEGORY_NOBEL.value]
year = int(record[TARGET_ATTRIBUTES][ConceptClass.YEAR.value][Attribute.YEAR.value])
flag = name_match(candidate=name, responses=lookup[year][category])
data[i][VERIFIED] = flag
logger.info("Writing data with verified flag to dataset/noble_prize_verified.pickle")
with open(f'dataset/{VERIFIED_RECORDS[NOBEL_PRIZE]}', 'wb') as handle:
pickle.dump(data, handle)
| [] |
2024-01-10 | pbcpraveen/LLM-Saliency | common_utils.py | import logging
import threading
import time
import unicodedata
from textdistance import Levenshtein
from tqdm import tqdm
from constants import *
from logging.handlers import RotatingFileHandler
import sys
import os
import openai
from threading import Thread
from dotenv import load_dotenv
import re
responses = []
load_dotenv('api_key.env')
openai.api_key = os.getenv("OPENAI_API_KEY")
def chatgpt_query(query, model = "gpt-4-0314", temperature=0, replace_newline=True, max_tokens=150):
response = openai.ChatCompletion.create(
model=model,
messages=query,
temperature=temperature,
# request_timeout=90, # set timeout in the thread instead of the api call - to reduce charges for timed-out threads
max_tokens=max_tokens
)
if replace_newline:
return response.choices[0].message["content"].replace('\n', ' ')
else:
return response.choices[0].message["content"]
def setup_directories():
if not os.path.exists('dataset/'):
os.makedirs('dataset/')
if not os.path.exists('logs/'):
os.makedirs('logs/')
def get_logger(log_file, depth=logging.DEBUG):
logging.basicConfig(filename="logs/{}".format(log_file),
filemode='a')
logger = logging.getLogger()
logger.setLevel(depth)
log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')
handler = RotatingFileHandler("logs/{}".format(log_file), maxBytes=1024*1024*5, backupCount=1)
handler.setFormatter(log_formatter)
handler.setLevel(depth)
logger.addHandler(handler)
logger.addHandler(logging.StreamHandler(sys.stdout))
return logger
def query_thread(prompts, global_index, model, temp):
global responses
count = len(prompts)
i = 0
responses_thread = []
pbar = tqdm(total=count)
while i < count:
try:
query = [
{"role": "user", "content": prompts[i]}
]
response = chatgpt_query(query, model=model, temperature=temp)
i += 1
responses_thread.append(response)
pbar.update(1)
except Exception as e:
print(e)
time.sleep(10)
pbar.close()
responses[global_index] = responses_thread
print("Thread " + str(i) + "completed ")
print("==============================================================")
def create_and_run_api_request_threads(queries, n_threads, logger, model='gpt-4-0314', temperature=0):
global responses
count = len(queries)
responses = [[] for _ in range(n_threads)]
partitions = []
bin_size = count // n_threads
for i in range(n_threads - 1):
partitions.append(queries[i * bin_size: (i + 1) * bin_size])
partitions.append(queries[(n_threads - 1) * bin_size:])
threads = []
for i in range(n_threads):
threads.append(threading.Thread(target=query_thread, args=(partitions[i], i,model, temperature)))
logger.info("starting API resquests to OPENAI's GPT 4 using " + str(n_threads) + " threads")
logger.info("Number of threads created: " + str(len(threads)))
logger.info("Number of partitions created: " + str(len(partitions)))
logger.info("Size of each partition: " + str(bin_size))
for i in range(n_threads):
threads[i].start()
for i in range(n_threads):
threads[i].join(timeout=90) # set timeout in the thread instead of the api call - to reduce charges for timed-out threads
return responses
def name_match(candidate, responses):
for i in responses:
if name_similarity(candidate, i):
return True
return False
def normalize_string(input_str):
# Remove punctuation and extra whitespace
input_str = re.sub(r'[^\w\s]', '', input_str)
input_str = ' '.join(input_str.split())
# Remove diacritics (accent marks)
input_str = ''.join(c for c in unicodedata.normalize('NFKD', input_str) if not unicodedata.combining(c))
# Convert to lowercase for case-insensitive comparison
input_str = input_str.lower()
return input_str
def tokenize_string(s):
"""Split string into tokens."""
return s.split()
def is_abbreviation(abbr, word):
"""Check if `abbr` is an abbreviation of `word`."""
return word.startswith(abbr)
def name_similarity_exact(name1, name2):
"""Calculate similarity score between two names."""
# Normalizing the names
norm_name1 = normalize_string(name1)
norm_name2 = normalize_string(name2)
# Tokenizing the names
tokens1 = tokenize_string(norm_name1)
tokens2 = tokenize_string(norm_name2)
# Initial match based on abbreviations
for token1 in tokens1:
for token2 in tokens2:
if is_abbreviation(token1, token2) or is_abbreviation(token2, token1):
return 1
# Using Levenshtein distance as a similarity metric
levenshtein = Levenshtein()
distance = levenshtein.distance(norm_name1, norm_name2)
max_len = max(len(norm_name1), len(norm_name2))
similarity = (max_len - distance) / max_len
return similarity
def name_similarity(name1, name2):
"""Calculate similarity score between two names."""
return name_similarity_exact(name1, name2) > 0.8 # Threshold can be adjusted
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.