date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | uva-bi-sdad/publicrd | src~Tech-Report~model_selection~topic_models-LDA.py | import pandas as pd
#import numpy as np
import pickle
import time
#import gc
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from gensim.models.coherencemodel import CoherenceModel
# data needed for coherence calculation
# import entire dataset
f = open('coherence_vars.sav', 'rb')
[id2word, docs] = pickle.load(f)
f.close()
print("data ingested--------------------------", flush = True)
# corpus - word frequency in docs - not needed for coherence calculation
# id2word - dictionary
# docs - df["final_tokens"]
# input needed for LDA, NMF and LSA (all from Scikit-Learn) is one string per document (not a list of strings)
text = []
for abstract in docs:
text.append(" ".join(abstract))
# Function to format topics as a "list of list of strings".
# Needed for topic coherence function in Gensim
# function modified from https://nlpforhackers.io/topic-modeling/
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# create document-term matrix
stop_wds = ['research', 'study', 'project'] # use will be eliminated by max_df
vectorizer = CountVectorizer(max_df=0.6, min_df=20, lowercase=False, stop_words=stop_wds)
doc_term_matrix = vectorizer.fit_transform(text)
print("doc term matrix computed------------", flush = True)
# delete text - no longer needed
#del text
#gc.collect()
# run once so start up time isn't factored into first iteration time
lda_model = LatentDirichletAllocation(n_components=1, doc_topic_prior = 1,
topic_word_prior=0.1, n_jobs=39, random_state = 0)
lda_model.fit_transform(doc_term_matrix)
print("model loop beginning-----------", flush = True)
# function adapted from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/
def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute LDA model & find perplexity, save topics list for coherence calc
Parameters:
----------
doc_term_matrix
n_topics : list of number of topics
"""
perplexity_values = []
lda_time = []
topics_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics,
topic_word_prior=0.1, n_jobs=39, random_state = i)
lda_model.fit_transform(doc_term_matrix)
t2 = time.time()
lda_time.append(t2-t1)
print(f" Model time: {t2-t1}", flush = True)
# compute perplexity
perplexity_values.append(lda_model.bound_)
# create list of topics
topics = list_topics(lda_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
print('Number of topics =', num_topics, "complete.", flush = True)
return perplexity_values, lda_time, topics_list
# code copied from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/
# minor alterations made
n_topics = list(range(5,131,5)) + [140, 150, 175, 200]
num_runs = 2
batch= 8
col_names = [f"iteration {i+batch}" for i in range(num_runs)]
lda_p = pd.DataFrame(index = n_topics, columns = col_names)
lda_t = pd.DataFrame(index = n_topics, columns = col_names)
lda_topics = pd.DataFrame(index = n_topics, columns = col_names)
for i in range(num_runs):
print(f"Iteration {i}", flush = True)
# run models
[p, t, topic_terms] = lda_models(doc_term_matrix=doc_term_matrix, n_topics=n_topics, vectorizer=vectorizer,
rand_start = (i+batch)*len(n_topics))
# save results
lda_p[f"iteration {i+batch}"] = p
lda_t[f"iteration {i+batch}"] = t
lda_topics[f"iteration {i+batch}"] = topic_terms
# save results
lda_p.to_pickle("./results/LDA/lda_p8-9.pkl")
lda_t.to_pickle("./results/LDA/lda_t8-9.pkl")
lda_topics.to_pickle("./results/LDA/lda_topics8-9.pkl")
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~By_year~slurm_first_stage.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from scipy.linalg import block_diag
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
##############################################################################################################################################
# Set all functions for the dynamic topics modellings
# function to create a new dictionary and corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# function to pre-process the data: compute tfidf
def preprocess(df, stopwords):
# Append all the final tokens
text = []
docs = df['list_final_tokens']
for abstract in docs:
text.append(' '.join(abstract))
# Create the term-document matrix
tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=0, lowercase=False, stop_words=stop_wds)
tf_idf = tfidf_vectorizer.fit_transform(text)
return (tf_idf, tfidf_vectorizer)
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# function to solve the first stage of the dynamic nmf
def first_stage(path, year, n_topics, dictionary, docs):
"""
Solve a dynamic nmf for each fiscal year
Parameters:
----------
path: location of term-document matrix
year: list of fiscal year
n_topics: list of topics number
dictionary: dictionary of terms
docs: corpus
"""
batch = 7
windows_coherence = []
windows_topic_list = []
windows_topic = []
windows_W = []
windows_H = []
windows_terms = []
# Run the dynamic nmf for each fiscal year
for y in year:
# Load the document-term matrix
(tf_idf,tfidf_vectorizer,df) = joblib.load( path+str(y)+'.pkl' )
# save all the term
#terms = tfidf_vectorizer.get_feature_names()
# Solve an nmf model for a given range of topics
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Compute the coherence for each topics
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=30)
coherence.append(cm.get_coherence())
# find the topics that maximize the coherence
max_coherence = numpy.nanmax(coherence)
index = coherence.index(max_coherence)
topic_select = n_topics[index]
fy_topic_list = topics_list[index]
W = W_list[index]
H = H_list[index]
# For the best model (that maximize the coherence) transform the matrix H (for each topic set the weigth of non top n terms to 0)
# select all the unique terms of topics
topic_terms = list(set(sum(fy_topic_list,[])))
# select the index of terms that appear in the topics and subset the matrix H to those terms
if hasattr(tfidf_vectorizer, 'get_feature_names'):
terms = tfidf_vectorizer.get_feature_names()
else:
terms = tfidf_vectorizer
indcol = [terms.index(i) for i in topic_terms]
subH = H[:,indcol]
# For each topic (rows) set the weigth of terms that are not listed the topic to 0.
for i,j in enumerate(subH):
# by row find the index of top_n terms
indtopic = [topic_terms.index(p) for p in fy_topic_list[i]]
notop = [k for k in range(len(topic_terms)) if k not in indtopic]
j[notop]=0
# append the result
windows_coherence.append(max_coherence)
windows_topic_list.append(fy_topic_list)
windows_topic.append(topic_select)
windows_W.append(W)
windows_H.append(subH)
windows_terms.append(topic_terms)
print('--- windows topic '+str(y)+' solve ---')
print('--- Dynamic nmf: first stage clear ---')
return windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms
# Create a new document term matrix using the topic distribution
def create_matrix(windows_H, windows_terms):
"""
Create the topic-term matrix from all window topics that have been added so far.
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
"""
# Set a list of all terms unique terms across windows (all_terms) and the combine windows terms (all_windows_terms)
all_windows_terms = sum(windows_terms,[])
# Create a block diagonal matrix of all topics: the number of rows is the same as the length of list_terms
M = block_diag(*windows_H)
# Identify duplicated terms (columns) and sum them
# The fastest way is to transform M into data frame with
dfM = pd.DataFrame(data = M, columns=all_windows_terms).groupby(level=0, axis=1).sum()
# Transform back the dataframe to matrix and get the variable names (in the order in the matrix) as the final all terms
M_concat = dfM.to_numpy()
all_terms = list(dfM.columns)
print('--- New document-terms have been created ---')
return M_concat, all_terms
# function to solve the second stage of the dynamic nmf
def second_stage(windows_H, windows_terms, n_topics):
"""
Build a new document term matrix and run a new nmf model
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
n_topics: list of topics number for the second stage
"""
batch = 7
# Build the new document-term matrix
(M, all_terms) = create_matrix(windows_H, windows_terms)
# Run a second nmf model
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=M, n_topics=n_topics, vectorizer=all_terms, rand_start = (batch)*len(n_topics))
print('--- Dynamic nmf: second stage clear ---')
return M, all_terms, topics_list, W_list,H_list
# Track the dynamic of a given topic (option topic)
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
topic: topic to track the dynamic
W: weigth matrix from the second stage
windows_topic_list: topic list from the first stage
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
##########################################################################################################################################################
# Load the dataset
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Create a list of tokens
df["list_final_tokens"] = df["final_tokens"].str.split(' ').tolist()
year = df['FY'].unique()
# build the dictionary id2word
docs = df["list_final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
###########################################################################################################################################################
# Run a dynamic topic model
# First stage : use the same list of number of topics for both first and second stage
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'
n_topics = list(range(20,61,5))
(windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms) = first_stage(path, year, n_topics, dictionary, docs)
# save output for the first stage
joblib.dump((windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/first_stage.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2014.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2014
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2020.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2020
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hLDA~train_hLDA_corona.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
df = pd.read_pickle("../dspg20RnD/data/final/dashboard_data/corona_corpus.pkl")
docs = df["final_frqwds_removed"]
def list_topics_hlda(mdl, top_n):
topic_words = []
topic_levels = []
for k in range(mdl.k):
if not mdl.is_live_topic(k):
continue
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
topic_levels.append(mdl.level(k))
return topic_words, dict(Counter(topic_levels))
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#keep_only_most_common=int(len(docs)/2) #LDA works best with less features than documents
#Filter words to only those found in at least a set number of documents (min_appearances)
#id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=keep_only_most_common)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hlda(min_cf, rm_top, top_n,
alpha, eta, gamma, depth,
corpus, id2word, docs):
# initialize PA model
mdl = tp.HLDAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
depth = depth, alpha = alpha, eta = eta, gamma = gamma)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
#print('Training...', file=sys.stderr, flush=True)
for i in range(0, 1000, 10):
mdl.train(10)
#print('Iteration: {}\tLog-likelihood: {}'.format(i, mdl.ll_per_word))
# create list of topics
topics, level_count = list_topics_hlda(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 8)
cv = cm.get_coherence()
return cv, level_count
docs_dict, docs_corpus = createTCvars(docs)
#alpha_vec = [0.005, 0.01, 0.025, 0.1]
alpha = 0.1
eta_vec = [0.05, 0.075, 0.1, 0.2, 0.3]
#eta = 0.01
#gamma_vec = [0.01, 0.05, 0.1, 0.2]
gamma = 0.2
#min_cf_vec = [0, 1, 2]
min_cf = 2
#rm_top_vec = [5, 10, 15]
rm_top = 10
depth_vec = [4, 5, 6, 7, 8]
#depth = 4
param_tune_mat = np.zeros((len(eta_vec)*len(depth_vec),8))
param_ind = 0
num_topic_mat = []
for eta in eta_vec:
for depth in depth_vec:
tc, topics_in_lvl = train_hlda(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
gamma = gamma,
depth = depth,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
param_tune_mat[param_ind,:] = [alpha, eta, gamma, min_cf, rm_top, depth, tc, sum(topics_in_lvl.values())]
param_ind += 1
num_topic_mat.append(topics_in_lvl)
param_tune_df = pd.DataFrame(param_tune_mat)
param_tune_df.columns = ['alpha', 'eta', 'gamma', 'min_cf', 'rm_top', 'depth', 'tc', 'total_topics']
param_tune_df.to_csv(r'hlda_results/hlda_corona_eta_depth_tune.csv', index = False)
num_topic_df = pd.DataFrame(num_topic_mat)
num_topic_df.to_csv(r'hlda_results/hlda_corona_topic_levels_eta_depth_tune.csv', index = False)
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2015.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2015
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hLDA~train_hLDA_pandemics.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
df = pd.read_pickle("../dspg20RnD/data/final/dashboard_data/pandemic_corpus.pkl")
docs = df["final_frqwds_removed"]
def list_topics_hlda(mdl, top_n):
topic_words = []
topic_levels = []
for k in range(mdl.k):
if not mdl.is_live_topic(k):
continue
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
topic_levels.append(mdl.level(k))
return topic_words, dict(Counter(topic_levels))
def get_doc_topic_dists(mdl, top_n = 5):
# df of topic probs for each doc
topic_dists = pd.DataFrame([doc.get_topic_dist() for doc in mdl.docs])
# df of topic paths for each doc
topic_path = pd.DataFrame([doc.path for doc in mdl.docs])
topic_lst = []
for doc_path in topic_path:
temp_lst = []
for t_id in doc_path:
temp_lst.append([words[0] for words in mdl.get_topic_words(t_id, top_n = top_n)])
topic_lst.append(temp_lst)
# df of top 5 words for each topic in topic path for each doc
topic_words = pd.DataFrame(topic_lst)
doc_topic = pd.concat([topic_path, topic_dists, topic_words], axis = 1)
doc_topic.columns = ['Root_topic', 'Super_topic', 'Sub_topic',
'Root_prob', 'Super_prob', 'Sub_prob',
'Root_words', 'Super_words', 'Sub_words']
return doc_topic
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#keep_only_most_common=int(len(docs)/2) #LDA works best with less features than documents
#Filter words to only those found in at least a set number of documents (min_appearances)
#id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=keep_only_most_common)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hlda(min_cf, rm_top, top_n,
alpha, eta, gamma, depth,
corpus, id2word, docs):
# initialize PA model
mdl = tp.HLDAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
depth = depth, alpha = alpha, eta = eta, gamma = gamma)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
#print('Training...', file=sys.stderr, flush=True)
for i in range(0, 1000, 10):
mdl.train(10)
#print('Iteration: {}\tLog-likelihood: {}'.format(i, mdl.ll_per_word))
# create list of topics
topics, level_count = list_topics_hlda(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 8)
cv = cm.get_coherence()
doc_topic_df = get_doc_topic_dists(mdl, top_n = 5)
return cv, doc_topic_df
docs_dict, docs_corpus = createTCvars(docs)
#alpha_vec = [0.005, 0.01, 0.025, 0.1]
alpha = 0.1
eta_vec = [0.05, 0.075, 0.1, 0.2, 0.3]
#eta = 0.01
#gamma_vec = [0.01, 0.05, 0.1, 0.2]
gamma = 0.2
#min_cf_vec = [0, 1, 2]
min_cf = 2
#rm_top_vec = [5, 10, 15]
rm_top = 10
depth_vec = [4, 5, 6, 7, 8]
#depth = 4
param_tune_mat = np.zeros((len(eta_vec)*len(depth_vec),8))
param_ind = 0
num_topic_mat = []
for eta in eta_vec:
for depth in depth_vec:
tc, topics_in_lvl = train_hlda(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
gamma = gamma,
depth = depth,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
param_tune_mat[param_ind,:] = [alpha, eta, gamma, min_cf, rm_top, depth, tc, sum(topics_in_lvl.values())]
param_ind += 1
num_topic_mat.append(topics_in_lvl)
param_tune_df = pd.DataFrame(param_tune_mat)
param_tune_df.columns = ['alpha', 'eta', 'gamma', 'min_cf', 'rm_top', 'depth', 'tc', 'total_topics']
param_tune_df.to_csv(r'hlda_results/hlda_pandemic_eta_depth_tune.csv', index = False)
num_topic_df = pd.DataFrame(num_topic_mat)
num_topic_df.to_csv(r'hlda_results/hlda_pandemic_topic_levels_eta_depth_tune.csv', index = False)
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~NMF_num_topic_tuning~nmf_200.py | import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import gensim
import time
from sklearn.decomposition import NMF, TruncatedSVD, LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.models.coherencemodel import CoherenceModel
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
print("\nTopic %d:" % (idx))
#print([(vectorizer.get_feature_names()[i], topic[i]) # printing out words corresponding to indices found in next line
#for i in topic.argsort()[:-top_n - 1:-1]]) # finding indices of top words in topic
print_list = [(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]]
for item in print_list:
print(item)
def list_topics(model, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
f = open('coherence_vars_full.sav', 'rb')
[corpus, id2word, docs] = pickle.load(f)
f.close()
text = []
for abstract in docs:
text.append(" ".join(abstract))
tfidf_vectorizer = TfidfVectorizer(max_df=0.6, min_df=20, lowercase=False, max_features=int(len(docs)/2))
tf_idf = tfidf_vectorizer.fit_transform(text)
num_topics = 200
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = 0)
doc_topic = nmf_model.fit_transform(tf_idf)
t2 = time.time()
print(f" Model time: {t2-t1}")
topic_term = nmf_model.components_
# calculate topic coherence
# create list of topics
topics = list_topics(nmf_model, tfidf_vectorizer, top_n=10)
t1 = time.time()
cm = CoherenceModel(topics=topics, corpus=corpus, dictionary=id2word, texts=docs,
coherence='c_v', processes=10) #window_size=500 )
print(cm.get_coherence())
t2 = time.time()
print(f" Coherence time: {t2-t1}")
topics_5 = list_topics(nmf_model, tfidf_vectorizer, top_n=5)
nmf_output = pd.DataFrame(cm.get_coherence_per_topic(with_std=True))
nmf_output.insert(0, 'topic_words', topics_5)
nmf_output.columns = ['topic_words', 'coherence_mean', 'coherence_stdev']
doc_topic_df = pd.DataFrame(data=doc_topic.copy())
nmf_output["avg_weight_in_corpus"] = doc_topic_df.mean(axis=0)
nmf_output["med_weight_in_corpus"] = doc_topic_df.median(axis=0)
# create a column for the number of documents that contain a topic
doc_topic_bool = pd.DataFrame(data=doc_topic.copy())
doc_topic_bool[doc_topic_bool > 0] = 1
nmf_output["num_docs_containing_topic"] = doc_topic_bool.sum(axis=0)
nmf_output["percent_docs_containing_topic"] = 100*(nmf_output["num_docs_containing_topic"]/doc_topic.shape[0])
# find the dominant topic per document
max_topic = doc_topic_df.idxmax(axis=1)
nmf_output["num_times_max_topic"] = max_topic.value_counts()
nmf_output["percent_times_max_topic"] = 100*(nmf_output["num_times_max_topic"]/doc_topic.shape[0])
# save to file
pickle.dump([doc_topic, topic_term, nmf_output], open('nmf_tuning/full/nmf_200.sav','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2011.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2011
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hLDA~train_hLDA_full.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
df = df = pd.read_pickle("../dspg20RnD/data/final/final_dataset_7-20.pkl")
docs = df["final_frqwds_removed"]
def list_topics_hlda(mdl, top_n):
topic_words = []
topic_levels = []
for k in range(mdl.k):
if not mdl.is_live_topic(k):
continue
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
topic_levels.append(mdl.level(k))
return topic_words, dict(Counter(topic_levels))
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#keep_only_most_common=int(len(docs)/2) #LDA works best with less features than documents
#Filter words to only those found in at least a set number of documents (min_appearances)
#id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=keep_only_most_common)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hlda(min_cf, rm_top, top_n,
alpha, eta, gamma, depth,
corpus, id2word, docs):
# initialize PA model
mdl = tp.HLDAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
depth = depth, alpha = alpha, eta = eta, gamma = gamma,
min_df = 20)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
#print('Training...', file=sys.stderr, flush=True)
for i in range(0, 1000, 10):
mdl.train(10)
#print('Iteration: {}\tLog-likelihood: {}'.format(i, mdl.ll_per_word))
# create list of topics
topics, level_count = list_topics_hlda(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 10)
cv = cm.get_coherence()
return cv, level_count
docs_dict, docs_corpus = createTCvars(docs)
#alpha_vec = [0.005, 0.01, 0.025, 0.1]
alpha = 0.1
#eta_vec = [0.05, 0.1]
eta = 0.2
#gamma_vec = [0.01, 0.05, 0.1, 0.2]
gamma = 0.2
#min_cf_vec = [0, 1, 2]
min_cf = 2
#rm_top_vec = [5, 10, 15]
rm_top = 10
#depth_vec = [4, 5, 6, 7, 8]
depth = 4
param_tune_mat = []
tc, topics_in_lvl = train_hlda(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
gamma = gamma,
depth = depth,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
param_tune_mat.append([alpha, eta, gamma, min_cf, rm_top, depth, tc, sum(topics_in_lvl.values())])
param_tune_df = pd.DataFrame(param_tune_mat)
param_tune_df.columns = ['alpha', 'eta', 'gamma', 'min_cf', 'rm_top', 'depth', 'tc', 'total_topics']
param_tune_df.to_csv(r'hlda_results/hlda_full_tune.csv', index = False)
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~By_year~slurm_year_2008.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from scipy.linalg import block_diag
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
##############################################################################################################################################
# Set all functions for the dynamic topics modellings
# function to create a new dictionary and corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# function to pre-process the data: compute tfidf
def preprocess(df, stopwords):
# Append all the final tokens
text = []
docs = df['list_final_tokens']
for abstract in docs:
text.append(' '.join(abstract))
# Create the term-document matrix
tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=0, lowercase=False, stop_words=stop_wds)
tf_idf = tfidf_vectorizer.fit_transform(text)
return (tf_idf, tfidf_vectorizer)
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# function to solve the first stage of the dynamic nmf
def first_stage_year(path, year, n_topics, dictionary, docs):
"""
Solve a dynamic nmf for each fiscal year
Parameters:
----------
path: location of term-document matrix
year: list of fiscal year
n_topics: list of topics number
dictionary: dictionary of terms
docs: corpus
"""
batch = 7
windows_coherence = []
windows_topic_list = []
windows_topic = []
windows_W = []
windows_H = []
windows_terms = []
# Run the dynamic nmf for each fiscal year
# Load the document-term matrix
(tf_idf,tfidf_vectorizer,df) = joblib.load( path+str(year)+'.pkl' )
# save all the term
#terms = tfidf_vectorizer.get_feature_names()
# Solve an nmf model for a given range of topics
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Compute the coherence for each topics
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=8)
coherence.append(cm.get_coherence())
# find the topics that maximize the coherence
max_coherence = numpy.nanmax(coherence)
index = coherence.index(max_coherence)
topic_select = n_topics[index]
fy_topic_list = topics_list[index]
W = W_list[index]
H = H_list[index]
# For the best model (that maximize the coherence) transform the matrix H (for each topic set the weigth of non top n terms to 0)
# select all the unique terms of topics
topic_terms = list(set(sum(fy_topic_list,[])))
# select the index of terms that appear in the topics and subset the matrix H to those terms
if hasattr(tfidf_vectorizer, 'get_feature_names'):
terms = tfidf_vectorizer.get_feature_names()
else:
terms = tfidf_vectorizer
indcol = [terms.index(i) for i in topic_terms]
subH = H[:,indcol]
# For each topic (rows) set the weigth of terms that are not listed the topic to 0.
for i,j in enumerate(subH):
# by row find the index of top_n terms
indtopic = [topic_terms.index(p) for p in fy_topic_list[i]]
notop = [k for k in range(len(topic_terms)) if k not in indtopic]
j[notop]=0
# append the result
windows_coherence.append(max_coherence)
windows_topic_list.append(fy_topic_list)
windows_topic.append(topic_select)
windows_W.append(W)
windows_H.append(subH)
windows_terms.append(topic_terms)
print('--- windows topic '+str(y)+' solve ---')
print('--- Dynamic nmf: first stage clear ---')
return windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms
# Create a new document term matrix using the topic distribution
def create_matrix(windows_H, windows_terms):
"""
Create the topic-term matrix from all window topics that have been added so far.
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
"""
# Set a list of all terms unique terms across windows (all_terms) and the combine windows terms (all_windows_terms)
all_windows_terms = sum(windows_terms,[])
# Create a block diagonal matrix of all topics: the number of rows is the same as the length of list_terms
M = block_diag(*windows_H)
# Identify duplicated terms (columns) and sum them
# The fastest way is to transform M into data frame with
dfM = pd.DataFrame(data = M, columns=all_windows_terms).groupby(level=0, axis=1).sum()
# Transform back the dataframe to matrix and get the variable names (in the order in the matrix) as the final all terms
M_concat = dfM.to_numpy()
all_terms = list(dfM.columns)
print('--- New document-terms have been created ---')
return M_concat, all_terms
# function to solve the second stage of the dynamic nmf
def second_stage(windows_H, windows_terms, n_topics):
"""
Build a new document term matrix and run a new nmf model
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
n_topics: list of topics number for the second stage
"""
batch = 7
# Build the new document-term matrix
(M, all_terms) = create_matrix(windows_H, windows_terms)
# Run a second nmf model
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=M, n_topics=n_topics, vectorizer=all_terms, rand_start = (batch)*len(n_topics))
print('--- Dynamic nmf: second stage clear ---')
return M, all_terms, topics_list, W_list,H_list
# Track the dynamic of a given topic (option topic)
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
topic: topic to track the dynamic
W: weigth matrix from the second stage
windows_topic_list: topic list from the first stage
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
##########################################################################################################################################################
# Load the dataset
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Create a list of tokens
df["list_final_tokens"] = df["final_tokens"].str.split(' ').tolist()
year = 2008
# build the dictionary id2word
docs = df["list_final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
del df
del corpus
###########################################################################################################################################################
# Run a dynamic topic model
# First stage : use the same list of number of topics for both first and second stage
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'
n_topics = list(range(20,61,5))
(windows_topic_08, windows_coherence_08, windows_topic_list_08, windows_W_08, windows_H_08, windows_terms_08) = first_stage_year(path, year, n_topics, dictionary, docs)
# save output for the first stage
joblib.dump((windows_topic_08, windows_coherence_08, windows_topic_list_08, windows_W_08, windows_H_08, windows_terms_08), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/first_stage_08.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_nmf_full_abstracts.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from scipy.linalg import block_diag
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
##############################################################################################################################################
# Set all functions for the dynamic topics modellings
# function to create a new dictionary and corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# function to pre-process the data: compute tfidf
def preprocess(df, stopwords):
# Append all the final tokens
text = []
docs = df['list_final_tokens']
for abstract in docs:
text.append(' '.join(abstract))
# Create the term-document matrix
tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=0, lowercase=False, stop_words=stop_wds)
tf_idf = tfidf_vectorizer.fit_transform(text)
return (tf_idf, tfidf_vectorizer)
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# function to solve the first stage of the dynamic nmf
def first_stage(path, year, n_topics, dictionary, docs):
"""
Solve a dynamic nmf for each fiscal year
Parameters:
----------
path: location of term-document matrix
year: list of fiscal year
n_topics: list of topics number
dictionary: dictionary of terms
docs: corpus
"""
batch = 7
windows_coherence = []
windows_topic_list = []
windows_topic = []
windows_W = []
windows_H = []
windows_terms = []
# Run the dynamic nmf for each fiscal year
for y in year:
# Load the document-term matrix
(tf_idf,tfidf_vectorizer,df) = joblib.load( path+str(y)+'.pkl' )
# save all the term
#terms = tfidf_vectorizer.get_feature_names()
# Solve an nmf model for a given range of topics
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Compute the coherence for each topics
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=30)
coherence.append(cm.get_coherence())
# find the topics that maximize the coherence
max_coherence = numpy.nanmax(coherence)
index = coherence.index(max_coherence)
topic_select = n_topics[index]
fy_topic_list = topics_list[index]
W = W_list[index]
H = H_list[index]
# For the best model (that maximize the coherence) transform the matrix H (for each topic set the weigth of non top n terms to 0)
# select all the unique terms of topics
topic_terms = list(set(sum(fy_topic_list,[])))
# select the index of terms that appear in the topics and subset the matrix H to those terms
if hasattr(tfidf_vectorizer, 'get_feature_names'):
terms = tfidf_vectorizer.get_feature_names()
else:
terms = tfidf_vectorizer
indcol = [terms.index(i) for i in topic_terms]
subH = H[:,indcol]
# For each topic (rows) set the weigth of terms that are not listed the topic to 0.
for i,j in enumerate(subH):
# by row find the index of top_n terms
indtopic = [topic_terms.index(p) for p in fy_topic_list[i]]
notop = [k for k in range(len(topic_terms)) if k not in indtopic]
j[notop]=0
# append the result
windows_coherence.append(max_coherence)
windows_topic_list.append(fy_topic_list)
windows_topic.append(topic_select)
windows_W.append(W)
windows_H.append(subH)
windows_terms.append(topic_terms)
print('--- windows topic '+str(y)+' solve ---')
print('--- Dynamic nmf: first stage clear ---')
return windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms
# Create a new document term matrix using the topic distribution
def create_matrix(windows_H, windows_terms):
"""
Create the topic-term matrix from all window topics that have been added so far.
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
"""
# Set a list of all terms unique terms across windows (all_terms) and the combine windows terms (all_windows_terms)
all_windows_terms = sum(windows_terms,[])
# Create a block diagonal matrix of all topics: the number of rows is the same as the length of list_terms
M = block_diag(*windows_H)
# Identify duplicated terms (columns) and sum them
# The fastest way is to transform M into data frame with
dfM = pd.DataFrame(data = M, columns=all_windows_terms).groupby(level=0, axis=1).sum()
# Transform back the dataframe to matrix and get the variable names (in the order in the matrix) as the final all terms
M_concat = dfM.to_numpy()
all_terms = list(dfM.columns)
print('--- New document-terms have been created ---')
return M_concat, all_terms
# function to solve the second stage of the dynamic nmf
def second_stage(windows_H, windows_terms, n_topics):
"""
Build a new document term matrix and run a new nmf model
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
n_topics: list of topics number for the second stage
"""
batch = 7
# Build the new document-term matrix
(M, all_terms) = create_matrix(windows_H, windows_terms)
# Run a second nmf model
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=M, n_topics=n_topics, vectorizer=all_terms, rand_start = (batch)*len(n_topics))
print('--- Dynamic nmf: second stage clear ---')
return M, all_terms, topics_list, W_list,H_list
# Track the dynamic of a given topic (option topic)
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
topic: topic to track the dynamic
W: weigth matrix from the second stage
windows_topic_list: topic list from the first stage
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
##########################################################################################################################################################
# Load the dataset
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Create a list of tokens
df["list_final_tokens"] = df["final_tokens"].str.split(' ').tolist()
year = df['FY'].unique()
##########################################################################################################################################################
# Preprocessing
# Create a new dataset for each fiscal year
for y in year:
df_subset = df[df['FY']==y]
# save the pickle file
pickle.dump(df_subset, open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Data/FR_'+str(y)+'.pkl','wb'))
# Create the term-document matrix tfidf for each pkl file
stop_wds = ['research', 'study', 'project'] # use will be eliminated by max_df
for y in year:
# Load the sample for a given year
fw = open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Data/FR_'+str(y)+'.pkl', 'rb')
dfw = pickle.load(fw)
fw.close()
# Pre-processing the pkl file
(tf_idf, tfidf_vectorizer) = preprocess(dfw, stop_wds)
# Save the term-document matrix
joblib.dump((tf_idf,tfidf_vectorizer,df), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'+str(y)+'.pkl' )
print('Document-terms matrix for '+str(y))
# build the dictionary id2word
docs = df["list_final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
# Delete some information to release the memory
del df
del df_subset
del dfw
del corpus
del stop_wds
###########################################################################################################################################################
# Run a dynamic topic model
# First stage : use the same list of number of topics for both first and second stage
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'
n_topics = list(range(20,61,5))
(windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms) = first_stage(path, year, n_topics, dictionary, docs)
# Solve for the second stage
(M, all_terms, topics_list, W_list, H_list) = second_stage(windows_H, windows_terms, n_topics)
# Compute the coherence for the second stage
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=30)
coherence.append(cm.get_coherence())
#########################################################################################################################################################
# Save the result
# save output for the first stage
joblib.dump((windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/first_stage.pkl' )
# save output for the second stage
joblib.dump((M, all_terms, topics_list, W_list, H_list), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/second_stage.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hPAM~train_hPAM_pandemics.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
import csv
import statistics
df = pd.read_pickle("../dspg20RnD/data/final/dashboard_data/pandemic_corpus.pkl")
docs = df["final_frqwds_removed"]
# function to get topic word distributions
def list_topics_hpam(mdl, top_n):
topic_words = []
for k in range(1 + mdl.k1 + mdl.k2):
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
return topic_words
# hPAM topic dist
# function to get level of each topic as well as top 10 words in dist
def sub_topic_dist(mdl, top_n):
sub_topics = []
topic_words = []
topic_words.append([-1, 0, mdl.get_topic_words(0, top_n = top_n)])
for k in range(1, 1+mdl.k1):
topic_words.append([0, k, mdl.get_topic_words(k, top_n = top_n)])
for p in range(1+mdl.k1, 1+mdl.k1+mdl.k2):
topic_words.append([1, p, mdl.get_topic_words(p, top_n = top_n)])
topic_words_df = pd.DataFrame(topic_words)
topic_words_df.columns = ['parent_level', 'topic_id', 'Top 10 words']
for l in range(mdl.k1):
subtopics = mdl.get_sub_topics(l, top_n = 3)
sub_topics.append(subtopics)
sub_topic_df = pd.DataFrame(sub_topics)
return topic_words_df, sub_topic_df
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hpam(min_cf, rm_top, top_n,
alpha, eta, k1, k2,
corpus, id2word, docs):
# initialize hPAM
mdl = tp.HPAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
k1 = k1, k2 = k2, alpha = alpha, eta = eta, seed = 321)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
for i in range(0, 1000, 10):
mdl.train(10)
# create list of topics
topics = list_topics_hpam(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 8)
cv = cm.get_coherence()
# get topic distributions
topic_words_df, sub_topic_df = sub_topic_dist(mdl, top_n = 10)
# extract candidates for auto topic labeling
extractor = tp.label.PMIExtractor(min_cf = min_cf, min_df = 0, max_len = 5, max_cand = 10000)
cands = extractor.extract(mdl)
# ranking the candidates of labels for a specific topic
labeler = tp.label.FoRelevance(mdl, cands, min_df = 0, smoothing = 1e-2, mu = 0.25)
label_lst = []
for p in range(1+mdl.k1+mdl.k2):
label_lst.append(label for label, score in labeler.get_topic_labels(p, top_n = 5))
label_df = pd.DataFrame(label_lst)
label_df.columns = ['Label 1', 'Label 2', 'Label 3', 'Label 4', 'Label 5']
topic_words_label_df = pd.concat([topic_words_df.reset_index(drop = True), label_df], axis = 1)
return cv, topic_words_label_df
docs_dict, docs_corpus = createTCvars(docs)
# hPAM parameters
alpha_vec = [0.15, 0.20, 0.25, 0.30, 0.35]
eta_vec = [0.3, 0.4, 0.5, 0.6, 0.7]
min_cf = 50
rm_top = 0
k1 = 5
k2 = 30
with open('hpam_results/pandemics/optimal_idf/hPAM_pandemics_all_tune.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['alpha', 'eta', 'min_cf', 'rm_top', 'k1', 'k2', 'tc'])
for alpha in alpha_vec:
for eta in eta_vec:
tc_vec = []
for l in range(10):
tc, topic_labels = train_hpam(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
k1 = k1,
k2 = k2,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
tc_vec.append(tc)
writer.writerow([alpha, eta, min_cf, rm_top, k1, k2, statistics.median(tc_vec)])
#topic_labels.to_csv(r'hpam_results/pandemics/optimal_idf/hpam_pandemics_idf_alpha={}_eta={}_min_cf={}_rm_top={}_k1={}_k2={}.csv'.format(alpha, eta, min_cf, rm_top, k1, k2), index = False) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Topic%20Modelling%20FR~4.%20Final%20Model%20Runs~topic_tune_nmf.py | import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import gensim
import time
from sklearn.decomposition import NMF, TruncatedSVD, LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.models.coherencemodel import CoherenceModel
# DATA INGESTION =========================================================
# import NSF data
#f = open('../../data/prd/RND Topic Modelling/nsf_stanford_lemma.sav', 'rb')
# import entire dataset
f = open('../../data/prd/RND Topic Modelling/lda_data_stanford_lemma.sav', 'rb')
[corpus, id2word, docs] = pickle.load(f)
f.close()
# corpus - word frequency in docs
# id2word - dictionary
# docs - lemmatized abstracts
# input needed for LDA, NMF and LSA (all from Scikit-Learn) is one string per document (not a list of strings)
text = []
for doc in docs:
text.append(" ".join(doc))
# FUNCTIONS NEEDED FOR ALL MODELS ============================================
# function slightly modified from https://nlpforhackers.io/topic-modeling/
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
print("\nTopic %d:" % (idx))
#print([(vectorizer.get_feature_names()[i], topic[i]) # printing out words corresponding to indices found in next line
#for i in topic.argsort()[:-top_n - 1:-1]]) # finding indices of top words in topic
print_list = [(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]]
for item in print_list:
print(item)
# Function to format topics as a "list of list of strings".
# Needed for topic coherence function in Gensim
# function modified from https://nlpforhackers.io/topic-modeling/
def list_topics(model, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
#print("\nTopic %d:" % (idx))
#print([(vectorizer.get_feature_names()[i], topic[i]) # printing out words corresponding to indices found in next line
#for i in topic.argsort()[:-top_n - 1:-1]]) # finding indices of top words in topic
if top_n == -1:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# CREATE TF_IDF ============================================================================
# used for NMF and LSA
tfidf_vectorizer = TfidfVectorizer(max_df=0.4, min_df=3, lowercase=False, max_features=int(len(docs)/2))
tf_idf = tfidf_vectorizer.fit_transform(text)
# NMF RUNS ===================================================================================
# function adapted from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/
def nmf_metrics(doc_term_matrix, n_topics, vectorizer, corpus, id2word, docs, rand_start):
"""
Compute c_v topic coherence for various number of topics
Parameters:
----------
tf_idf
n_topics : list of number of topics
Returns:
-------
coherence_values : c_v topic coherence values corresponding to the NMF model with respective number of topics
"""
coherence_values = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
print(f" Model time: {t2-t1}")
# create list of topics
topics = list_topics(nmf_model, vectorizer, top_n=10)
# calculate coherence
t1 = time.time()
cm = CoherenceModel(topics=topics, corpus=corpus, dictionary=id2word, texts=docs,
coherence='c_v', processes=10) #window_size=500 )
coherence_values.append(cm.get_coherence())
t2 = time.time()
print(f" Coherence time: {t2-t1}")
# output completion message
i = i+1
print('Number of topics =', num_topics, "complete.")
return coherence_values
# code copied from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/
# minor alterations made
n_topics = range(4,101,4)
num_runs = 3
batch = 1
col_names = [f"iteration {i+batch}" for i in range(num_runs)]
nmf_c = pd.DataFrame(index = n_topics, columns = col_names)
for i in range(num_runs):
print(f"Iteration {i}")
# run models
c = nmf_metrics(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer,
corpus=corpus, id2word=id2word, docs=docs, rand_start = (i+batch)*len(n_topics))
# save results
nmf_c[f"iteration {i+batch}"] = c
# SAVE RESULTS
#nmf_c.to_pickle("./nsf_nmf_c67.pkl")
nmf_c.to_pickle("./nmf_c123.pkl") | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_lda~pandemic_case_study~slurm_lda_dtm_bestchain_30.py | import pandas as pd
import numpy as np
import pickle
import time
import gensim
import decimal
import csv
# setting up our imports
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Import the final tokens
f = open('/project/biocomplexity/sdad/projects_data/ncses/prd/Tech-Report/case_studies/coronavirus_corpus.pkl', 'rb')
df = pickle.load(f)
f.close()
# Create the dictionary and the corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['study'], \
id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# build the dictionary id2word
docs = df["final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
# Create the time slice using the fiscal year
df['Year'] = df['FY']
time_slice = df['PROJECT_ID'].groupby(df['Year']).count()
time = list(time_slice.index)
# Find best chain variance parameter
chain_vec = list(range(0.01,0.2,10))
coherence_mean = []
for chain in chain_vec:
# Run a DTM-LDA with the specifc chain variance
ldaseq_chain = ldaseqmodel.LdaSeqModel(corpus=corpus, id2word=dictionary, time_slice=time_slice, num_topics=30, chain_variance=chain)
# Compute the coherence for each model
time_coherence = []
for t in range(0,len(time)):
topics_dtm = ldaseq_chain.dtm_coherence(time=t)
cm = CoherenceModel(topics=topics_dtm, dictionary=dictionary, texts=docs, coherence='c_v', processes=30)
time_coherence.append(cm.get_coherence())
# Compute the coherence serie for the model
coherence_tm = pd.Series(time_coherence, index =time)
# Compute and store the average/median coherence
coherence_mean.append(coherence_tm.mean())
coherence_median.append(coherence_tm.median())
# Save the value
coherence_serie = pd.Series(coherence_mean, coherence_median, index =chain_vec)
# save the global coherence value in a pickle file
pickle.dump(coherence_serie, open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/LDA/Coherence_chain_30.pkl','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
model_index = []
coherence = []
max_coherence = []
for fy in year:
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
model_index.append(index)
max_coherence.append(max_value)
print('------- solve for a year -------')
# Save the result from the first step
joblib.dump((model_index, max_coherence), path+'first_stage.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2018.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2018
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2008.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2008
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_lda~pandemic_case_study~slurm_lda_dtm_full.py | import pandas as pd
import numpy as np
import pickle
import time
import gensim
import decimal
import csv
# setting up our imports
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Import the final tokens
f = open('/project/biocomplexity/sdad/projects_data/ncses/prd/Tech-Report/case_studies/coronavirus_corpus.pkl', 'rb')
df = pickle.load(f)
f.close()
# Create the dictionary and the corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['study'], \
id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# build the dictionary id2word
docs = df["final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
# Create the time slice using the fiscal year
df['Year'] = df['FY']
time_slice = df['PROJECT_ID'].groupby(df['Year']).count()
time = list(time_slice.index)
# Find best chain variance parameter
chain_vec = list(range(0.01,0.2,10))
topic_vec = list(range(10,100,10))
coherence_vec = []
for topic in topic_vec:
# For a value of the chain variance, run a Dynamic LDA
chain_coherence = []
for chain in chain_vec:
# Run a DTM-LDA with the specifc chain variance
ldaseq_chain = ldaseqmodel.LdaSeqModel(corpus=corpus, id2word=dictionary, time_slice=time_slice, num_topics=topic, chain_variance=chain)
# Compute the coherence for each model
time_coherence = []
for t in range(0,len(time)):
topics_dtm = ldaseq_chain.dtm_coherence(time=t)
cm = CoherenceModel(topics=topics_dtm, dictionary=dictionary, texts=docs, coherence='c_v', processes=30)
time_coherence.append(cm.get_coherence())
# Compute the average topic coherence serie for the model
coherence_tm = pd.Series(time_coherence, index =time)
# Compute and store the average/median coherence
coherence_mean.append(coherence_tm.mean())
# Save the coherence value
coherence_vec.joint(coherence_mean)
# save the global coherence value in a pickle file
pickle.dump(coherence_vec, open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/LDA/Coherence.pkl','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_dynamic_nmf.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from scipy.linalg import block_diag
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Create a new document term matrix using the topic distribution
def create_matrix(windows_H, windows_terms):
"""
Create the topic-term matrix from all window topics that have been added so far.
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
"""
# Set a list of all terms unique terms across windows (all_terms) and the combine windows terms (all_windows_terms)
all_windows_terms = sum(windows_terms,[])
# Create a block diagonal matrix of all topics: the number of rows is the same as the length of list_terms
M = block_diag(*windows_H)
# Identify duplicated terms (columns) and sum them
# The fastest way is to transform M into data frame with
dfM = pd.DataFrame(data = M, columns=all_windows_terms).groupby(level=0, axis=1).sum()
# Transform back the dataframe to matrix and get the variable names (in the order in the matrix) as the final all terms
M_concat = dfM.to_numpy()
all_terms = list(dfM.columns)
print('--- New document-terms have been created ---')
return M_concat, all_terms
# Track the dynamic of a given topic (option topic)
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
topic: topic to track the dynamic
W: weigth matrix from the second stage
windows_topic_list: topic list from the first stage
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# solve an nmf model for each windows topic
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
# Create a new term-document matrix: Combining all the top term from the windiws nmf
windows_topic_list = []
windows_W = []
windows_H = []
windows_terms = []
# Build the windows H matrix
for fy in year:
# Upload the nmf model
tfidf_vectorizer = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[1]
(nmf_time,topics_list,W_list,H_list) = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
(model, max_coherence) = joblib.load( path+'Coherence/model_'+str(fy)+'.pkl' )
# Build the list of terms for all topics (top_n) in a given fiscal year
fy_topic_list = topics_list[model]
# Get the H and W matrix for the model
W = W_list[model]
H = H_list[model]
# select the index of terms that appear in the topics and subset the matrix H to those terms
if hasattr(tfidf_vectorizer, 'get_feature_names'):
terms = tfidf_vectorizer.get_feature_names()
else:
terms = tfidf_vectorizer
# select the index of terms that appear in the topics and subset the matrix H to those terms
topic_terms = list(set(sum(fy_topic_list,[])))
indcol = [terms.index(i) for i in topic_terms]
subH = H[:,indcol]
# For each topic (rows) set the weigth of terms that are not listed the topic to 0.
for i,j in enumerate(subH):
# by row find the index of top_n terms
indtopic = [topic_terms.index(p) for p in fy_topic_list[i]]
notop = [k for k in range(len(topic_terms)) if k not in indtopic]
j[notop]=0
# append the result
windows_topic_list.append(fy_topic_list)
windows_W.append(W)
windows_H.append(subH)
windows_terms.append(topic_terms)
# Build the new document-term matrix M
(M, all_terms) = create_matrix(windows_H, windows_terms)
# save the new tif-idf matrix
joblib.dump((M, all_terms), path+'new_Term_docs.pkl' )
# Run am nmf model from the new document term matrix
batch = 7
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=M, n_topics=n_topics, vectorizer=all_terms, rand_start = (batch)*len(n_topics))
# Save the result for the second nmf
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/second_stage.pkl' )
# Compute the coherence for the dynamic
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/second_stage.pkl' )[1]
(docs,dictionary) = joblib.load( path+'dico_docs.pkl' )
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value, coherence), path+'Coherence/final_model.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2019.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2019
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2009.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2009
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2016.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2016
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_lda~pandemic_case_study~slurm_lda_dtm_30.py | import pandas as pd
import numpy
import pickle
import time
import gensim
#from sklearn.decomposition import LatentDirichletAllocation
#from sklearn.feature_extraction.text import CountVectorizer
#from gensim.models.coherencemodel import CoherenceModel
# setting up our imports
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
# Import the final tokens
f = open('/project/biocomplexity/sdad/projects_data/ncses/prd/Tech-Report/case_studies/coronavirus_corpus.pkl', 'rb')
df = pickle.load(f)
f.close()
# Create the dictionary and the corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['study'], \
id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# build the dictionary id2word
docs = df["final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
# Create the time slice using the fiscal year
df['Year'] = df['FY']
time_slice = df['PROJECT_ID'].groupby(df['Year']).count()
# Run the DMT. Pre-training model
ldaseq = ldaseqmodel.LdaSeqModel(corpus=corpus, id2word=dictionary, time_slice=time_slice, num_topics=30)
# save to file
pickle.dump(ldaseq, open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/LDA/DTM_LDA_30.pkl','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hPAM~train_hPAM_corona.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
import csv
import statistics
df = pd.read_pickle("../dspg20RnD/data/final/dashboard_data/corona_corpus.pkl")
docs = df["final_frqwds_removed"]
# function to get topic word distributions
def list_topics_hpam(mdl, top_n):
topic_words = []
for k in range(1 + mdl.k1 + mdl.k2):
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
return topic_words
# hPAM topic dist
# function to get level of each topic as well as top 10 words in dist
def sub_topic_dist(mdl, top_n):
sub_topics = []
topic_words = []
topic_words.append([-1, 0, mdl.get_topic_words(0, top_n = top_n)])
for k in range(1, 1+mdl.k1):
topic_words.append([0, k, mdl.get_topic_words(k, top_n = top_n)])
for p in range(1+mdl.k1, 1+mdl.k1+mdl.k2):
topic_words.append([1, p, mdl.get_topic_words(p, top_n = top_n)])
topic_words_df = pd.DataFrame(topic_words)
topic_words_df.columns = ['parent_level', 'topic_id', 'Top 10 words']
for l in range(mdl.k1):
subtopics = mdl.get_sub_topics(l, top_n = 3)
sub_topics.append(subtopics)
sub_topic_df = pd.DataFrame(sub_topics)
return topic_words_df, sub_topic_df
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hpam(min_cf, rm_top, top_n,
alpha, eta, k1, k2,
corpus, id2word, docs):
# initialize hPAM
mdl = tp.HPAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
k1 = k1, k2 = k2, alpha = alpha, eta = eta, seed = 123)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
for i in range(0, 1000, 10):
mdl.train(10)
# create list of topics
topics = list_topics_hpam(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 8)
cv = cm.get_coherence()
# get topic distributions
topic_words_df, sub_topic_df = sub_topic_dist(mdl, top_n = 10)
# extract candidates for auto topic labeling
extractor = tp.label.PMIExtractor(min_cf = min_cf, min_df = 0, max_len = 5, max_cand = 10000)
cands = extractor.extract(mdl)
# ranking the candidates of labels for a specific topic
labeler = tp.label.FoRelevance(mdl, cands, min_df = 0, smoothing = 1e-2, mu = 0.25)
label_lst = []
for p in range(1+mdl.k1+mdl.k2):
label_lst.append(label for label, score in labeler.get_topic_labels(p, top_n = 5))
label_df = pd.DataFrame(label_lst)
label_df.columns = ['Label 1', 'Label 2', 'Label 3', 'Label 4', 'Label 5']
topic_words_label_df = pd.concat([topic_words_df.reset_index(drop = True), label_df], axis = 1)
return cv, topic_words_label_df
docs_dict, docs_corpus = createTCvars(docs)
# hPAM parameters
alpha_vec = [0.15, 0.20, 0.25, 0.30, 0.35]
eta_vec = [0.3, 0.4, 0.5, 0.6, 0.7]
min_cf = 50
rm_top = 0
k1 = 7
k2 = 30
with open('hpam_results/corona/optimal_idf/hPAM_corona_all_tune.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['alpha', 'eta', 'min_cf', 'rm_top', 'k1', 'k2', 'tc'])
for alpha in alpha_vec:
for eta in eta_vec:
tc_vec = []
for l in range(10):
tc, topic_labels = train_hpam(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
k1 = k1,
k2 = k2,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
tc_vec.append(tc)
writer.writerow([alpha, eta, min_cf, rm_top, k1, k2, statistics.median(tc_vec)])
#topic_labels.to_csv(r'hpam_results/corona/optimal_idf/hpam_corona_idf_alpha={}_eta={}_min_cf={}_rm_top={}_k1={}_k2={}.csv'.format(alpha, eta, min_cf, rm_top, k1, k2), index = False)
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2012.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2012
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~NMF_num_topic_tuning~nmf_100.py | import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import gensim
import time
from sklearn.decomposition import NMF, TruncatedSVD, LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.models.coherencemodel import CoherenceModel
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
print("\nTopic %d:" % (idx))
#print([(vectorizer.get_feature_names()[i], topic[i]) # printing out words corresponding to indices found in next line
#for i in topic.argsort()[:-top_n - 1:-1]]) # finding indices of top words in topic
print_list = [(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]]
for item in print_list:
print(item)
def list_topics(model, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
f = open('coherence_vars_full.sav', 'rb')
[corpus, id2word, docs] = pickle.load(f)
f.close()
text = []
for abstract in docs:
text.append(" ".join(abstract))
tfidf_vectorizer = TfidfVectorizer(max_df=0.6, min_df=20, lowercase=False, max_features=int(len(docs)/2))
tf_idf = tfidf_vectorizer.fit_transform(text)
num_topics = 100
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = 0)
doc_topic = nmf_model.fit_transform(tf_idf)
t2 = time.time()
print(f" Model time: {t2-t1}")
topic_term = nmf_model.components_
# calculate topic coherence
# create list of topics
topics = list_topics(nmf_model, tfidf_vectorizer, top_n=10)
t1 = time.time()
cm = CoherenceModel(topics=topics, corpus=corpus, dictionary=id2word, texts=docs,
coherence='c_v', processes=10) #window_size=500 )
print(cm.get_coherence())
t2 = time.time()
print(f" Coherence time: {t2-t1}")
topics_5 = list_topics(nmf_model, tfidf_vectorizer, top_n=5)
nmf_output = pd.DataFrame(cm.get_coherence_per_topic(with_std=True))
nmf_output.insert(0, 'topic_words', topics_5)
nmf_output.columns = ['topic_words', 'coherence_mean', 'coherence_stdev']
doc_topic_df = pd.DataFrame(data=doc_topic.copy())
nmf_output["avg_weight_in_corpus"] = doc_topic_df.mean(axis=0)
nmf_output["med_weight_in_corpus"] = doc_topic_df.median(axis=0)
# create a column for the number of documents that contain a topic
doc_topic_bool = pd.DataFrame(data=doc_topic.copy())
doc_topic_bool[doc_topic_bool > 0] = 1
nmf_output["num_docs_containing_topic"] = doc_topic_bool.sum(axis=0)
nmf_output["percent_docs_containing_topic"] = 100*(nmf_output["num_docs_containing_topic"]/doc_topic.shape[0])
# find the dominant topic per document
max_topic = doc_topic_df.idxmax(axis=1)
nmf_output["num_times_max_topic"] = max_topic.value_counts()
nmf_output["percent_times_max_topic"] = 100*(nmf_output["num_times_max_topic"]/doc_topic.shape[0])
# save to file
pickle.dump([doc_topic, topic_term, nmf_output], open('nmf_tuning/full/nmf_100.sav','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2013.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2013
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~NMF_num_topic_tuning~nmf_150.py | import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import gensim
import time
from sklearn.decomposition import NMF, TruncatedSVD, LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.models.coherencemodel import CoherenceModel
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
print("\nTopic %d:" % (idx))
#print([(vectorizer.get_feature_names()[i], topic[i]) # printing out words corresponding to indices found in next line
#for i in topic.argsort()[:-top_n - 1:-1]]) # finding indices of top words in topic
print_list = [(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]]
for item in print_list:
print(item)
def list_topics(model, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(model.components_): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
f = open('coherence_vars_full.sav', 'rb')
[corpus, id2word, docs] = pickle.load(f)
f.close()
text = []
for abstract in docs:
text.append(" ".join(abstract))
tfidf_vectorizer = TfidfVectorizer(max_df=0.6, min_df=20, lowercase=False, max_features=int(len(docs)/2))
tf_idf = tfidf_vectorizer.fit_transform(text)
num_topics = 150
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = 0)
doc_topic = nmf_model.fit_transform(tf_idf)
t2 = time.time()
print(f" Model time: {t2-t1}")
topic_term = nmf_model.components_
# calculate topic coherence
# create list of topics
topics = list_topics(nmf_model, tfidf_vectorizer, top_n=10)
t1 = time.time()
cm = CoherenceModel(topics=topics, corpus=corpus, dictionary=id2word, texts=docs,
coherence='c_v', processes=10) #window_size=500 )
print(cm.get_coherence())
t2 = time.time()
print(f" Coherence time: {t2-t1}")
topics_5 = list_topics(nmf_model, tfidf_vectorizer, top_n=5)
nmf_output = pd.DataFrame(cm.get_coherence_per_topic(with_std=True))
nmf_output.insert(0, 'topic_words', topics_5)
nmf_output.columns = ['topic_words', 'coherence_mean', 'coherence_stdev']
doc_topic_df = pd.DataFrame(data=doc_topic.copy())
nmf_output["avg_weight_in_corpus"] = doc_topic_df.mean(axis=0)
nmf_output["med_weight_in_corpus"] = doc_topic_df.median(axis=0)
# create a column for the number of documents that contain a topic
doc_topic_bool = pd.DataFrame(data=doc_topic.copy())
doc_topic_bool[doc_topic_bool > 0] = 1
nmf_output["num_docs_containing_topic"] = doc_topic_bool.sum(axis=0)
nmf_output["percent_docs_containing_topic"] = 100*(nmf_output["num_docs_containing_topic"]/doc_topic.shape[0])
# find the dominant topic per document
max_topic = doc_topic_df.idxmax(axis=1)
nmf_output["num_times_max_topic"] = max_topic.value_counts()
nmf_output["percent_times_max_topic"] = 100*(nmf_output["num_times_max_topic"]/doc_topic.shape[0])
# save to file
pickle.dump([doc_topic, topic_term, nmf_output], open('nmf_tuning/full/nmf_150.sav','wb')) | [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~SDAD_code~full_abstract~slurm_windows_coherence_2017.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# set all functions
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# Load the dataset.
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
# Compute the time variable
year = df['FY'].unique()
del df
# Select fiscal year
fy = 2017
# Find topics that maximise the coherence for each windows
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/'
n_topics = list(range(20,61,5))
batch = 7
# Load the document-terms matrix and solve an nmf model
(tf_idf,tfidf_vectorizer,docs,dictionary) = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )
(nmf_time, topics_list, W_list, H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Save the nmf output
joblib.dump((nmf_time,topics_list,W_list,H_list), path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )
del tf_idf
del tfidf_vectorizer
del docs
del nmf_time
del topics_list
del W_list
del H_list
del dictionary
print('------- solve nmf for a year -------')
# Compute the coherence
coherence = []
# upload the result that are necessary for the coherence
topics_list = joblib.load( path+'nmf_out/windows_nmf'+str(fy)+'.pkl' )[1]
docs = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[2]
dictionary = joblib.load( path+'Term_docs_'+str(fy)+'.pkl' )[3]
for t in range(0,len(n_topics)):
term_rankings = topics_list[t]
cm = CoherenceModel(topics=term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=1)
# get the coherence value
coherence.append(cm.get_coherence())
print("one step")
# find the topics that maximize the coherence
max_value = numpy.nanmax(coherence)
index = coherence.index(max_value)
print('------- solve coherence for a year -------')
# Save the result from the first step
joblib.dump((index, max_value), path+'Coherence/model_'+str(fy)+'.pkl' )
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Structured_topic_models~hLDA~train_hLDA_samp.py | import pandas as pd
import numpy as np
import pickle
import tomotopy as tp
import sys
import gensim
from gensim.models.coherencemodel import CoherenceModel
import time
from collections import Counter
df = pd.read_pickle("sample_abstracts.pkl")
docs = df["final_frqwds_removed"]
def list_topics_hlda(mdl, top_n):
topic_words = []
topic_levels = []
for k in range(mdl.k):
if not mdl.is_live_topic(k):
continue
topic_words.append([words[0] for words in mdl.get_topic_words(k, top_n)])
topic_levels.append(mdl.level(k))
return topic_words, dict(Counter(topic_levels))
def createTCvars(docs):
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#keep_only_most_common=int(len(docs)/2) #LDA works best with less features than documents
#Filter words to only those found in at least a set number of documents (min_appearances)
#id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=keep_only_most_common)
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
def train_hlda(min_cf, rm_top, top_n,
alpha, eta, gamma, depth,
corpus, id2word, docs):
# initialize PA model
mdl = tp.HLDAModel(tw = tp.TermWeight.IDF, min_cf = min_cf, rm_top = rm_top,
depth = depth, alpha = alpha, eta = eta, gamma = gamma)
# load docs
for abstracts in docs:
mdl.add_doc(abstracts)
# setup model
mdl.burn_in = 100
mdl.train(0)
# train model
#print('Training...', file=sys.stderr, flush=True)
for i in range(0, 1000, 10):
mdl.train(10)
#print('Iteration: {}\tLog-likelihood: {}'.format(i, mdl.ll_per_word))
# create list of topics
topics, level_count = list_topics_hlda(mdl, top_n = top_n)
# calculate topic coherence
cm = CoherenceModel(topics = topics, corpus = corpus, dictionary = id2word,
texts = docs, coherence = 'c_v', processes = 8)
return cm.get_coherence(), level_count
docs_dict, docs_corpus = createTCvars(docs)
alpha_vec = [0.005, 0.01, 0.025, 0.1]
#alpha = 0.01
eta_vec = [0.05, 0.075, 0.1, 0.2, 0.3]
#eta = 0.01
gamma_vec = [0.01, 0.05, 0.1, 0.2]
#gamma = 0.1
min_cf_vec = [0, 1, 2]
#min_cf = 2
#rm_top_vec = [5, 10, 15]
rm_top = 10
#depth_vec = [3, 4, 5, 6, 7, 8]
depth = 4
param_tune_mat = np.zeros((len(alpha_vec)*len(eta_vec)*len(gamma_vec)*len(min_cf_vec),8))
param_ind = 0
num_topic_mat = []
for alpha in alpha_vec:
for eta in eta_vec:
for gamma in gamma_vec:
for min_cf in min_cf_vec:
tc, topics_in_lvl = train_hlda(min_cf = min_cf,
rm_top = rm_top,
top_n = 10,
alpha = alpha,
eta = eta,
gamma = gamma,
depth = depth,
corpus = docs_corpus,
id2word = docs_dict,
docs = docs)
param_tune_mat[param_ind,:] = [alpha, eta, gamma, min_cf, rm_top, depth, tc, sum(topics_in_lvl.values())]
param_ind += 1
num_topic_mat.append(topics_in_lvl)
param_tune_df = pd.DataFrame(param_tune_mat)
param_tune_df.columns = ['alpha', 'eta', 'gamma', 'min_cf', 'rm_top', 'depth', 'tc', 'total_topics']
param_tune_df.to_csv(r'hlda_results/hlda_samp_all_tune.csv', index = False)
num_topic_df = pd.DataFrame(num_topic_mat)
num_topic_df.to_csv(r'hlda_results/hlda_samp_topic_levels_all_tune.csv', index = False)
| [] |
2024-01-10 | uva-bi-sdad/publicrd | src~Dynamic_Topic_Modelling~DTM_nmf~Greene_Cross~unsupervised~nmf.py | import pandas as pd
import numpy
import pickle
import time
import joblib
import gensim
import matplotlib.pyplot as plt
from itertools import islice
from scipy.linalg import block_diag
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.corpora import Dictionary, bleicorpus
from gensim.matutils import hellinger
from gensim.models.coherencemodel import CoherenceModel
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# Set all functions for the dynamic topics modellings
##############################################################################################################################################
# Function need to solve the dynamic nmf
# function to create a new dictionary and corpus
def createLDAvars(docs):
# Create the variables needed for LDA from df[final_frqwds_removed]: dictionary (id2word), corpus
# Create Dictionary
id2word = gensim.corpora.Dictionary(docs)
#Filter words to only those found in at least a set number of documents (min_appearances)
id2word.filter_extremes(no_below=20, no_above=0.6)
# filter out stop words - "use" already filtered out by previous line
id2word.filter_tokens(bad_ids=[id2word.token2id['research'], id2word.token2id['project']])
# Create Corpus (Term Document Frequency)
#Creates a count for each unique word appearing in the document, where the word_id is substituted for the word
# corpus not need for c_v coherence
corpus = [id2word.doc2bow(doc) for doc in docs]
return id2word, corpus
# function to pre-process the data: compute tfidf
def preprocess(df, stopwords):
# Append all the final tokens
text = []
docs = df['final_tokens']
for abstract in docs:
text.append(' '.join(abstract))
# Create the term-document matrix
tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=0, lowercase=False, stop_words=stop_wds)
tf_idf = tfidf_vectorizer.fit_transform(text)
return (tf_idf, tfidf_vectorizer)
# function to list topic (modified function from https://nlpforhackers.io/topic-modeling/)
def list_topics(topic_term_dist, vectorizer, top_n=10):
#input. top_n: how many words to list per topic. If -1, then list all words.
topic_words = []
for idx, topic in enumerate(topic_term_dist): # loop through each row of H. idx = row index. topic = actual row
if top_n == -1:
# check if the vectorized has an attribute get_features_names. if not vectorized contains terms hasattr('abc', 'lower')
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[::-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[::-1]])
else:
if hasattr(vectorizer, 'get_feature_names'):
topic_words.append([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n - 1:-1]])
else:
topic_words.append([vectorizer[i] for i in topic.argsort()[:-top_n - 1:-1]])
return topic_words
# function to solve the nmf (modified from https://datascienceplus.com/evaluation-of-topic-modeling-topic-coherence/)
def nmf_models(doc_term_matrix, n_topics, vectorizer, rand_start):
"""
Compute NMF model, save topics list for coherence calc
Parameters:
----------
doc_term_matrix: document-terms matrix
n_topics: list of topics number
vectorizer: vector of terms
rand_start: random seed
"""
nmf_time = []
topics_list = []
W_list = []
H_list = []
i = rand_start
for num_topics in n_topics:
# create model
t1 = time.time()
nmf_model = NMF(n_components=num_topics, random_state = i)
nmf_model.fit_transform(doc_term_matrix)
t2 = time.time()
nmf_time.append(t2-t1)
#print(f" Model time: {t2-t1}", flush=True)
# create list of topics
topics = list_topics(nmf_model.components_, vectorizer, top_n=10)
topics_list.append(topics)
# output completion message
i = i+1
#print('Number of topics =', num_topics, "complete.", flush=True)
# save the matrix W and H
W = nmf_model.fit_transform(doc_term_matrix)
W_list.append(W)
H = nmf_model.components_
# truncate the H matrix: set the weight of the non top n words to zero
#top_n = 10
#for idx, topic in enumerate(H):
# thresold = numpy.nanmin(topic[topic.argsort()[:-top_n-1:-1]])
# topic[topic<thresold]=0
H_list.append(H)
return nmf_time, topics_list, W_list, H_list
# function to solve the first stage of the dynamic nmf
def first_stage(path, year, n_topics, dictionary, docs):
"""
Solve a dynamic nmf for each fiscal year
Parameters:
----------
path: location of term-document matrix
year: list of fiscal year
n_topics: list of topics number
dictionary: dictionary of terms
docs: corpus
"""
batch = 7
windows_coherence = []
windows_topic_list = []
windows_topic = []
windows_W = []
windows_H = []
windows_terms = []
# Run the dynamic nmf for each fiscal year
for y in year:
# Load the document-term matrix
(tf_idf,tfidf_vectorizer,df) = joblib.load( path+str(y)+'.pkl' )
# save all the term
terms = tfidf_vectorizer.get_feature_names()
# Solve an nmf model for a given range of topics
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=tf_idf, n_topics=n_topics, vectorizer=tfidf_vectorizer, rand_start = (batch)*len(n_topics))
# Compute the coherence for each topics
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=30)
coherence.append(cm.get_coherence())
# find the topics that maximize the coherence
max_coherence = numpy.nanmax(coherence)
index = coherence.index(max_coherence)
topic_select = n_topics[index]
fy_topic_list = topics_list[index]
W = W_list[index]
H = H_list[index]
# For the best model (that maximize the coherence) transform the matrix H (for each topic set the weigth of non top n terms to 0)
# select all the unique terms of topics
topic_terms = list(set(sum(fy_topic_list,[])))
# select the index of terms that appear in the topics and subset the matrix H to those terms
indcol = [terms.index(i) for i in topic_terms]
subH = H[:,indcol]
# For each topic (rows) set the weigth of terms that are not listed the topic to 0.
for i,j in enumerate(subH):
# by row find the index of top_n terms
indtopic = [topic_terms.index(p) for p in fy_topic_list[i]]
notop = [k for k in range(len(topic_terms)) if k not in indtopic]
j[notop]=0
# append the result
windows_coherence.append(max_coherence)
windows_topic_list.append(fy_topic_list)
windows_topic.append(topic_select)
windows_W.append(W)
windows_H.append(subH)
windows_terms.append(topic_terms)
print('--- windows topic '+str(y)+' solve ---')
print('--- Dynamic nmf: first stage clear ---')
return windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms
# Create a new document term matrix using the topic distribution
def create_matrix(windows_H, windows_terms):
"""
Create the topic-term matrix from all window topics that have been added so far.
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
"""
# Set a list of all terms unique terms across windows (all_terms) and the combine windows terms (all_windows_terms)
all_windows_terms = sum(windows_terms,[])
# Create a block diagonal matrix of all topics: the number of rows is the same as the length of list_terms
M = block_diag(*windows_H)
# Identify duplicated terms (columns) and sum them
# The fastest way is to transform M into data frame with
dfM = pd.DataFrame(data = M, columns=all_windows_terms).groupby(level=0, axis=1).sum()
# Transform back the dataframe to matrix and get the variable names (in the order in the matrix) as the final all terms
M_concat = dfM.to_numpy()
all_terms = list(dfM.columns)
print('--- New document-terms have been created ---')
return M_concat, all_terms
# function to solve the second stage of the dynamic nmf
def second_stage(windows_H, windows_terms, n_topics):
"""
Build a new document term matrix and run a new nmf model
Parameters:
----------
windows_H: windiws topic distribution of top n words
windows_terms: windows terms used for each fiscal year
n_topics: list of topics number for the second stage
"""
batch = 7
# Build the new document-term matrix
(M, all_terms) = create_matrix(windows_H, windows_terms)
# Run a second nmf model
(nmf_time,topics_list,W_list,H_list) = nmf_models(doc_term_matrix=M, n_topics=n_topics, vectorizer=all_terms, rand_start = (batch)*len(n_topics))
print('--- Dynamic nmf: second stage clear ---')
return M, all_terms, topics_list, W, H
# Track the dynamic of a given topic (option topic)
def track_dynamic(topic,W,windows_topic_list):
"""
Link topics in the first stage with topic in second stage using the matrix W
Parameters:
----------
topic: topic to track the dynamic
W: weigth matrix from the second stage
windows_topic_list: topic list from the first stage
"""
# For each topic from the first stage (rows) find the topic in the second stage (columns) with the higher weight
topic_second = []
for i, topic_first in enumerate(W):
topic_second.append(topic_first.argmax())
# Split topics classification in the first by year
it = iter(topic_second)
topic_first_year = [[next(it) for _ in range(size)] for size in windows_topic]
# For each topic, identify the correspondance for each year
dynamic_topic_list = []
for y in range(0, len(year)):
topic_year = [i for i, e in enumerate(topic_first_year[y]) if e == topic]
dynamic_topic_list.append(topic_year)
# Compute the list of list of topics (list of year and list of main topic)
dynamic_topic = []
for y in range(0, len(year)):
dynamic_list = dynamic_topic_list[y]
fy_topic = [windows_topic_list[y][dynamic_list[i]] for i in range(0,len(dynamic_list))]
dynamic_topic.append(fy_topic)
# Print the result in a dataframe
topic_print = []
names = []
# print the dynamic topic
for y in range(0,len(year)):
for t in range(0,len(dynamic_topic[y])):
topic_print.append(dynamic_topic[y][t])
names.append('Year_'+str(year[y])+'_'+str(t))
df = pd.DataFrame (topic_print).transpose()
df.columns = names
return df, dynamic_topic_list
##########################################################################################################################################################
# Load the dataset
df = pd.read_pickle("/project/biocomplexity/sdad/projects_data/ncses/prd/Paper/FR_meta_and_final_tokens_23DEC21.pkl")
df.head()
##########################################################################################################################################################
# Preprocessing
# Create a list of tokens
df["list_final_tokens"] = df["final_tokens"].str.split(' ').tolist()
year = df['FY'].unique()
for y in year:
df_subset = df[df['FY']==y]
# save the pickle file
pickle.dump(df_subset, open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Data/FR_'+str(year)+'.pkl','wb'))
# Create the term-document matrix tfidf for each pkl file
stop_wds = ['research', 'study', 'project'] # use will be eliminated by max_df
for y in year:
# Load the sample for a given year
fw = open('/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Data/FR_'+str(year)+'.pkl', 'rb')
dfw = pickle.load(fw)
fw.close()
# Pre-processing the pkl file
(tf_idf, tfidf_vectorizer) = preprocess(dfw, stop_wds)
# Save the term-document matrix
joblib.dump((tf_idf,tfidf_vectorizer,df), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'+str(y)+'.pkl' )
# build the dictionary id2word
docs = df["final_tokens"]
[dictionary, corpus] = createLDAvars(docs)
# Delete some information to release the memory
del df
del df_subset
del fw
###########################################################################################################################################################
# Run a dynamic topic model
# First stage
path = '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/Term_docs_'
n_topics = list(range(10,41,5))
(windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms) = first_stage(path, year, n_topics, dictionary, docs)
# Solve for the second stage
(M, all_terms, topics_list, W_list, H_list) = second_stage(path, windows_H, windows_terms, n_topics, dictionary, docs)
# Compute the coherence for the second stage
# Compute the coherence for the second stage
coherence = []
for t in range(0,len(n_topics)):
window_term_rankings = topics_list[t]
cm = CoherenceModel(topics=window_term_rankings, dictionary=dictionary, texts=docs, coherence='c_v', topn=10, processes=30)
coherence.append(cm.get_coherence())
#########################################################################################################################################################
# Save the result
# save output for the first stage
joblib.dump((windows_topic, windows_coherence, windows_topic_list, windows_W, windows_H, windows_terms), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/first_stage.pkl' )
# save output for the second stage
joblib.dump((M, all_terms, topics_list, W_list, H_list), '/project/biocomplexity/sdad/projects_data/ncses/prd/Dynamic_Topics_Modelling/nmf_fullabstract/slurm_result/second_stage.pkl' )
| [] |
2024-01-10 | cswamy/text-2-sql-cove | model~planner.py | import openai
import os
from scripts import utils
from dotenv import load_dotenv
def build_plan(question:str, sql:str):
"""
Build a plan for a given question and SQL query.
"""
# Setup OpenAI API
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
model = utils.get_model()
# Build prompt
messages = []
# Build system instruction
system_instruction = {
"role": "system",
"content": "You will be given a \"question\" in english and corresponding sql \"query\". Respond with a set of questions to check validity of \"query\". Generate as many \"questions\" as needed."
}
messages.append(system_instruction)
# Build one-shot example
one_shot_user = {
"role": "user",
"content": "\"question\": what were sales of cookware in california in dec 2019?\n\"query\": SELECT SUM(unitprice * orderquantity) AS cookware_revenues FROM salesorders so JOIN products pd ON so.productid = pd.productid JOIN storelocations sl ON so.storeid = sl.storeid WHERE pd.productname LIKE '%Cookware%' AND sl.statecode = 'CA' AND strftime('%Y-%m-%d', so.orderdate) BETWEEN '2019-12-01' AND '2019-12-31'"
}
messages.append(one_shot_user)
one_shot_assistant = {
"role": "assistant",
"content": "does salesorders have columns unitprice and orderquantity? does products have column productname? does storelocations have column statecode? can productid be used to join salesorders and products? can storeid be used to join salesorders and storelocations? what column should be used to filter for cookware data? what column should be used to filter for california data? what column should be used to filter for dec 2019 data?"
}
messages.append(one_shot_assistant)
# Build user question
user_question = {
"role": "user",
"content": f"\"question\": {question}\n\"query\": {sql}"
}
messages.append(user_question)
# Get response from OpenAI API
try:
response = utils.call_openai(
model=model['model_name'],
messages=messages,
temperature=model['temperature'],
max_tokens=model['max_tokens'],
top_p=model['top_p'],
frequency_penalty=model['frequency_penalty'],
presence_penalty=model['presence_penalty']
)
except:
print('[ERROR] OpenAI API call failed for build plan.')
return None
# Return response
return {
'plan': response['choices'][0]['message']['content'],
'input_tokens': response['usage']['prompt_tokens'],
'output_tokens': response['usage']['completion_tokens']
}
def execute_plan(target_schema:str, plan:str):
"""
Execute plan for a given target schema and plan.
"""
# Setup OpenAI API
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
model = utils.get_model()
# Build prompt
messages = []
# Build system instruction
system_instruction = {
"role": "system",
"content": "For the database schema, answer the questions so responses can be used to build sql queries. Be concise." + target_schema
}
messages.append(system_instruction)
# Build user question
user_question = {
"role": "user",
"content": plan
}
messages.append(user_question)
# Get response from OpenAI API
try:
response = utils.call_openai(
model=model['model_name'],
messages=messages,
temperature=model['temperature'],
max_tokens=model['max_tokens'],
top_p=model['top_p'],
frequency_penalty=model['frequency_penalty'],
presence_penalty=model['presence_penalty']
)
except:
print('[ERROR] OpenAI API call failed for execute plan.')
return None
# Parse response
answers = response['choices'][0]['message']['content'].replace("\n", " ")
qa = plan + " " + answers
# Return response
return {
'qa': qa,
'input_tokens': response['usage']['prompt_tokens'],
'output_tokens': response['usage']['completion_tokens']
} | [
"2019-12-31",
"\"question\": PLACEHOLDER\n\"query\": PLACEHOLDER",
"does salesorders have columns unitprice and orderquantity? does products have column productname? does storelocations have column statecode? can productid be used to join salesorders and products? can storeid be used to join salesorders and storelocations? what column should be used to filter for cookware data? what column should be used to filter for california data? what column should be used to filter for dec 2019 data?",
"%Cookware%",
"2019-12-01",
"For the database schema, answer the questions so responses can be used to build sql queries. Be concise.PLACEHOLDER",
"You will be given a \"question\" in english and corresponding sql \"query\". Respond with a set of questions to check validity of \"query\". Generate as many \"questions\" as needed."
] |
2024-01-10 | cswamy/text-2-sql-cove | model~baseline.py | import openai
import os
from scripts import utils
from dotenv import load_dotenv
def get_baseline_response(
question:str,
db_id:str,
top_matches: list,
schemas:dict):
"""
Function to get baseline sql from LLM
"""
# Setup OpenAI API
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
model = utils.get_model()
# Build prompt
messages = []
# Build system instruction
db_type = 'sqlite'
target_schema = utils.generate_schema_code(db_id, schemas)
system_instruction = {
"role": "system",
"content": f'Given a schema and a question in English, generate read only sql code for {db_type}. Respond with sql code only and do not include any explanation of code in response. If you are unsure, respond with "Error".\n\nImportant: Always use joins or set operations over nested queries. Never use left joins in queries.\n\n' + target_schema
}
messages.append(system_instruction)
# Build few shot examples
for top_match in top_matches:
few_shot_user = {
"role": "user",
"content": top_match['question']
}
messages.append(few_shot_user)
few_shot_assistant = {
"role": "assistant",
"content": top_match['query']
}
messages.append(few_shot_assistant)
# Build user question
user_question = {
"role": "user",
"content": question
}
messages.append(user_question)
# Get response from OpenAI API
try:
response = utils.call_openai(
model=model['model_name'],
messages=messages,
temperature=model['temperature'],
max_tokens=model['max_tokens'],
top_p=model['top_p'],
frequency_penalty=model['frequency_penalty'],
presence_penalty=model['presence_penalty']
)
except:
print('[ERROR] OpenAI API call failed for baseline response.')
return None
# Return response
return {
'sql': response['choices'][0]['message']['content'],
'target_schema': target_schema,
'input_tokens': response['usage']['prompt_tokens'],
'output_tokens': response['usage']['completion_tokens'],
} | [
"Given a schema and a question in English, generate read only sql code for sqlite. Respond with sql code only and do not include any explanation of code in response. If you are unsure, respond with \"Error\".\n\nImportant: Always use joins or set operations over nested queries. Never use left joins in queries.\n\nPLACEHOLDER",
"question"
] |
2024-01-10 | cswamy/text-2-sql-cove | model~assembler.py | import openai
import os
from scripts import utils
from dotenv import load_dotenv
def assemble_output(question:str, target_schema:str, baseline_sql:str, qa_str:str):
"""
Function to assemble final sql from LLM
"""
# Setup OpenAI API
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
model = utils.get_model()
# Build prompt
messages = []
# Build system instruction
system_instruction = {
"role": "system",
"content": "You will be given a \"User question\", \"Schema\", a \"Baseline sql query\", and \"Knowledge\" about the schema. Use all this information to respond with an sql query for sqlite that accurately answers the \"User question\". \nImportant: Your response should include the sql query only. No other text should be included in your response."
}
messages.append(system_instruction)
# Build user question
user_question = {
"role": "user",
"content": "User question:\n" + question + "\n\nSchema:\n" + target_schema + f"\n\nBaseline sql query: {baseline_sql}\n\n" + "Knowledge:\n" + qa_str
}
messages.append(user_question)
# Get response from OpenAI API
try:
response = utils.call_openai(
model=model['model_name'],
messages=messages,
temperature=model['temperature'],
max_tokens=model['max_tokens'],
top_p=model['top_p'],
frequency_penalty=model['frequency_penalty'],
presence_penalty=model['presence_penalty']
)
except:
print('[ERROR] OpenAI API call failed for assemble output.')
return None
# Clean up final sql string
response_str = response['choices'][0]['message']['content']
start_idx = response_str.lower().find('select')
final_sql = response_str[start_idx:].replace('\n', ' ').replace('\t', ' ').replace(' ', ' ').strip()
# Return response
return {
'final_sql': final_sql,
'input_tokens': response['usage']['prompt_tokens'],
'output_tokens': response['usage']['completion_tokens']
} | [
"You will be given a \"User question\", \"Schema\", a \"Baseline sql query\", and \"Knowledge\" about the schema. Use all this information to respond with an sql query for sqlite that accurately answers the \"User question\". \nImportant: Your response should include the sql query only. No other text should be included in your response.",
"User question:\nPLACEHOLDER\n\nSchema:\nPLACEHOLDER\n\nBaseline sql query: PLACEHOLDER\n\nKnowledge:\nPLACEHOLDER"
] |
2024-01-10 | tjpapenfuss/thesis.AI | entrov_gpt_app~gpt_app.py | from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import time
import json
import mongo_db_connector
import config
os.environ["OPENAI_API_KEY"] = config.api_key
def construct_index(directory_path):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
return index
def chatbot(input_text):
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(input_text, response_mode="compact")
return response.response
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Custom-trained AI Chatbot")
start_time = time.time()
index = construct_index("files_to_index")
end_time = time.time()
execution_time = end_time - start_time
print("The index took {} seconds to create.".format(execution_time))
iface.launch(share=True) | [] |
2024-01-10 | tjpapenfuss/thesis.AI | langchain~gpt_app.py | from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import config
os.environ["OPENAI_API_KEY"] = config.api_key
def construct_index(directory_path):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
return index
def chatbot(input_text):
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(input_text, response_mode="compact")
return response.response
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Custom-trained AI Chatbot")
index = construct_index("docs")
iface.launch(share=True) | [] |
2024-01-10 | tjpapenfuss/thesis.AI | sample_digital_ocean_workflow~build_index_and_store.py | # Internal files
import mongo_db_connector as mongo
import spaces_connector as spaces
import config
# External files
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, Document
from langchain.chat_models import ChatOpenAI
# import gradio as gr
import os
import time
from datetime import datetime
# -----------------------------------------------------------------------------------------------------------------------------
# Function to build the index for OpenAI ChatBot.
# Prereqs:
# - config.py containts the correct OpenAI api key; config.api_key
# - config.py is configured with the connection string to the mongo DB; variable name -> mongo_string
# - config.py is has the correct database to search to the mongo DB; variable name -> MONGO_DATABASE
# - config.py has spaces configurations OBJECT_STORAGE_KEY, OBJECT_STORAGE_SECRET, OBJECT_STORAGE_REGION, and OBJECT_STORAGE_BUCKET
# -----------------------------------------------------------------------------------------------------------------------------
os.environ["OPENAI_API_KEY"] = config.api_key
SPACES_JSON_FILE_NAME = "initial_index1.json"
def build_index(documents):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('./index.json')
return index
org_list = ["566f6980-2166-4718-ba88-77610e998cbd"]
json_list = []
for item in mongo.get_refined(org_list):
# Some dictionaries do not have key word counts. If they do not, do not append them to the string
if "keywords" in item:
new_string = str(item["page_url"]) + " " + str(item["summary"]) + str(item["keywords"]["keyword_counts"])
else:
new_string = str(item["page_url"]) + " " + str(item["summary"])
json_list.append(new_string)
documents = [Document(t) for t in json_list]
start_time = time.time()
index = build_index(documents=documents)
end_time = time.time()
completion_time = end_time-start_time
print(f"index build time: ", completion_time)
#Configure s3 connection
s3config = {
"region_name": config.OBJECT_STORAGE_REGION,
"endpoint_url": "https://{}.digitaloceanspaces.com".format(config.OBJECT_STORAGE_REGION),
"aws_access_key_id": config.OBJECT_STORAGE_KEY,
"aws_secret_access_key": config.OBJECT_STORAGE_SECRET,
"bucket_name": config.OBJECT_STORAGE_BUCKET}
today = str(datetime.today())
# Set up the metadata to attach to the spaces storage
metadata = {'Orgids': ','.join(org_list), 'Ingestion_Date': today}
# Upload the indexed file to spaces for storage.
spaces.upload_file_spaces(s3config["bucket_name"],
'./index.json', SPACES_JSON_FILE_NAME,
s3config["endpoint_url"], s3config["aws_access_key_id"],
s3config["aws_secret_access_key"], metadata = metadata)
| [] |
2024-01-10 | tjpapenfuss/thesis.AI | entrov_gpt_app~workflow_manager.py |
import web_scrape
import os.path
import config
import doc_summarization
from datetime import date
import json
import time
import openai
import traceback
def error_logger(e, url):
with open("error_log_summarization.txt", "a") as file:
file.write(f"Invalid request error retrieving: {url}")
file.write("Here is the error:\n")
file.write(traceback.format_exc())
file.write("\n")
def write_json_to_file(json_data, filename, directory):
# Create the directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
# Construct the full file path
filepath = os.path.join(directory, filename)
# Write the JSON data to a .json file
with open(filepath, 'w') as file:
json.dump(json_data, file, indent=4)
with open ('websites.txt', 'rt') as myfile: # Open websites.txt for reading
for myline in myfile: # For each line, read to a string,
start_time = time.time()
url = myline.strip() # Each line is a new URL to open and scrape
json_data = {} # Initialize the JSON file.
json_data["URL"] = url
# Step 1: Webpage Scraping
page_text = web_scrape.extract_text_from_url(url)
# Step 2: Obtain a unique ID for the webpage URL
webpage_guid = doc_summarization.encode_to_guid(url)
print(f"webpage GUID: {webpage_guid} and website URL: {url}")
out_file_name = webpage_guid + ".txt"
if page_text:
# Save the extracted website text to extracted_websites.
save_path = 'extracted_websites/'
completeName = os.path.join(save_path, out_file_name)
file1 = open(completeName, "w")
file1.write(page_text)
file1.close()
# Create a summary for each webpage and write to JSON
try:
directory = "files_to_index/"
json_data["Time stamp"] = str(date.today())
json_data["Summary"] = doc_summarization.summarize_doc(document=page_text)
json_filename = webpage_guid + ".json"
write_json_to_file(json_data=json_data, filename=json_filename, directory=directory)
except openai.error.InvalidRequestError as e:
error_logger(e, url=url)
except Exception as e:
error_logger(e, url=url)
end_time = time.time()
execution_time = end_time - start_time
print("The above website took {} seconds to create.".format(execution_time)) | [] |
2024-01-10 | tjpapenfuss/thesis.AI | sample_digital_ocean_workflow~gpt_app.py | from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, Document
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import time
import mongo_db_connector
import json
import config
os.environ["OPENAI_API_KEY"] = config.api_key
def construct_index(documents):
max_input_size = 4096
num_outputs = 512
max_chunk_overlap = 20
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
return index
def chatbot(input_text):
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(input_text, response_mode="compact")
return response.response
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Custom-trained AI Chatbot")
json_list = []
for item in mongo_db_connector.get_mongodb_contents(collection = "orgid not found"):
item = json.loads(item)
new_string = str(item["URL"]) + str(item["Summary"]) + str(item["keyword_counts"])
json_list.append(new_string)
documents = [Document(t) for t in json_list]
start_time = time.time()
index = construct_index(documents=documents)
end_time = time.time()
execution_time = end_time - start_time
print("The index took {} seconds to create.".format(execution_time))
iface.launch(share=True) | [] |
2024-01-10 | tjpapenfuss/thesis.AI | sample_digital_ocean_workflow~doc_summarization.py | from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import PromptTemplate
import config
import uuid
def encode_to_guid(input_string):
# Generate a UUID version 3 using the input string as the namespace
namespace_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, input_string)
# Convert the UUID to a string representation
guid_string = str(namespace_uuid)
return guid_string
def read_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def summarize_doc(document):
openai_api_key = config.api_key
llm=ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo")
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap=0, separators=[" ", ",", "\n"])
#text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
texts = text_splitter.create_documents([document])
map_prompt = """
You will be given a website.
Your goal is to give a summary of this website so that a reader will have a full understanding.
Your response should be at least three paragraphs and fully encompass what was said in the passage.
```{text}```
FULL SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
map_chain = load_summarize_chain(llm=llm,
chain_type="stuff",
prompt=map_prompt_template)
return map_chain.run(texts)
| [
"\n You will be given a website. \n Your goal is to give a summary of this website so that a reader will have a full understanding.\n Your response should be at least three paragraphs and fully encompass what was said in the passage.\n\n ```{text}```\n FULL SUMMARY:\n "
] |
2024-01-10 | tjpapenfuss/thesis.AI | sample_digital_ocean_workflow~workflow_manager.py | # External packages
import json
from datetime import datetime
import os
import openai
import traceback
import time
#Internal packages
from config import OBJECT_STORAGE_REGION, OBJECT_STORAGE_BUCKET
import spaces_upload
import database
import formata
import web_scrape
import mongo_db_connector as mongo
import key_word_matcher as keyword
import doc_summarization
keywords_master = database.getkeywords()
#orgs_master = database.getorgs()
def error_logger(e, url):
with open("error_log_summarization.txt", "a") as file:
file.write(f"Invalid request error retrieving: {url}")
file.write("Here is the error:\n")
file.write(traceback.format_exc())
file.write("\n")
def write_json_to_file(json_data, filename, directory):
# Create the directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
# Construct the full file path
filepath = os.path.join(directory, filename)
# Write the JSON data to a .json file
with open(filepath, 'w') as file:
json.dump(json_data, file, indent=4)
def refine(orgid,url,page_text):
#step 1: create doc summary
try:
summary = doc_summarization.summarize_doc(document=page_text)
except openai.error.InvalidRequestError as e:
summary = "Failed to create the summary file for this website."
error_logger(e, url=url)
except Exception as e:
summary = "Failed to create the summary file for this website."
error_logger(e, url=url)
#step 2: get keywords
try:
keywords = keyword.count_keywords(text=page_text,keywords=keywords_master)
except:
keywords = 'Failed to detect keywords'
#step 3: get org references
#try:
# reference_orgs = keyword.count_keywords(text=summary,keywords=orgs_master)
#except:
# reference_orgs = 'Failed to detect keywords'
finally:
#step 4: format updates to push
updates = {
'summary':summary,
'keywords':keywords,
'refined':True,
'lastupdate':datetime.utcnow()
}
#step 3: push updates to mongodb source
try:
mongo.updateitem(database='scraped',collection=orgid,item_field='page_url',item_value=url,update_file=updates)
return(True)
except:
return(False)
def run(domains: 'list' = None):
#step 1: get orgid from input domain
orgids = [{'domain':item,'orgid':database.getorgid(item)} for item in domains]
#step 2: get pages from domain that need processing
need_refining = [mongo.get_pages_to_refine(database='scraped',collection=item['orgid']) for item in orgids]
#step 3: summarize pages that have page data but processed set to false
index = 0
for item in need_refining:
for record in item:
print(record['page_url'])
refine(orgid=orgids[index]['orgid'],url=record['page_url'],page_text=record['page_text'])
index = index + 1
#step 4: Add pageid, detect keywords
#example
#print(run(domains=['aws.amazon.com']))
"""
with open ('websites.txt', 'rt') as myfile: # Open websites.txt for reading
for myline in myfile: # For each line, read to a string,
url = myline.strip() # Each line is a new URL to open and scrape
start_time = time.time() # Start the timer to capture how long entire process takes.
# Step 1: Webpage Scraping
page_text = web_scrape.extract_text_from_url(url)
url_cleaned = formata.clean_page(url)
page_data = database.getpagedetails(url_cleaned)
keywords = database.getkeywords()
if page_data is None:
page_data = {'pid':'NOT_FOUND/'+url_cleaned.replace("/","_"),'did':'domainid not found','orgid':'orgid not found'}
if page_text:
# Step 2: Data Transformation
transformed_data = spaces_upload.transform_data(page_text)
# Step 3: Storing Data in DigitalOcean Spaces
object_name = str(page_data['pid'])
orgid = str(page_data['orgid'])
domainid = str(page_data['did'])
today = str(date.today())
# Set up the metadata to attach to the spaces storage
metadata = {'URL': url, 'Ingestion_Date': today,'URL_cleaned':url_cleaned,'orgid':orgid,'domainid':domainid}
spaces_upload.upload_to_spaces(bucket_name, object_name, transformed_data,
s3config["endpoint_url"], s3config["aws_access_key_id"],
s3config["aws_secret_access_key"], metadata = metadata)
# Step 4: Get the keywords in the Website. Add in pid, orgid, and did.
keyword_JSON = key_word_matcher.count_keywords(page_text, keywords)
keyword_JSON['URL']=url #Adding the URL to the JSON output
keyword_JSON['PID']=object_name #Adding the PID to the output
keyword_JSON['orgid']=orgid #Adding the Organization ID to the output
keyword_JSON['did']=domainid #Adding the Domain ID to the output
# Try to create the summary for a given document. If this fails, document it
try:
keyword_JSON["Summary"] = doc_summarization.summarize_doc(document=page_text)
except openai.error.InvalidRequestError as e:
keyword_JSON["Summary"] = "Failed to create the summary file for this website."
error_logger(e, url=url)
except Exception as e:
keyword_JSON["Summary"] = "Failed to create the summary file for this website."
error_logger(e, url=url)
#print(today)
json_output = json.dumps(keyword_JSON, indent=4)
mongo.send_json_to_mongodb(json_data=json_output,orgid=orgid)
# Create a summary for each webpage and write to JSON
end_time = time.time()
execution_time = end_time - start_time
print(f"The website {url} took {execution_time} seconds to create.")
ARCHIVED
s3config = {
"region_name": OBJECT_STORAGE_REGION,
"endpoint_url": "https://{}.digitaloceanspaces.com".format(OBJECT_STORAGE_REGION),
"aws_access_key_id": OBJECT_STORAGE_KEY,
"aws_secret_access_key": OBJECT_STORAGE_SECRET }
bucket_name = OBJECT_STORAGE_BUCKET
""" | [] |
2024-01-10 | tjpapenfuss/thesis.AI | entrov_gpt_app~deprecated~doc_summarization%20copy.py | from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
import config
from langchain import PromptTemplate
def read_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai_api_key = config.api_key
llm=ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo")
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 3000, chunk_overlap=0, separators=[" ", ",", "\n"])
documents = read_file("areenablingpartnersuccess.txt")
docs = text_splitter.create_documents(documents)
llm3 = ChatOpenAI(temperature=0,
openai_api_key=openai_api_key,
max_tokens=1000,
model='gpt-3.5-turbo'
)
map_prompt = """
You will be given a section of a website. This section will be enclosed in triple backticks (```)
Your goal is to give a summary of this section so that a reader will have a full understanding of what happened.
Your response should be at least three paragraphs and fully encompass what was said in the passage.
```{text}```
FULL SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
map_chain = load_summarize_chain(llm=llm3,
chain_type="stuff",
prompt=map_prompt_template)
# Then go get your docs which the top vectors represented.
selected_docs = [docs[doc] for doc in selected_indices]
# Let's loop through our selected docs and get a good summary for each chunk. We'll store the summary in a list.
# Make an empty list to hold your summaries
summary_list = []
# Loop through a range of the lenght of your selected docs
for i, doc in enumerate(selected_docs):
# Go get a summary of the chunk
chunk_summary = map_chain.run([doc])
# Append that summary to your list
summary_list.append(chunk_summary)
print (f"Summary #{i} (chunk #{selected_indices[i]}) - Preview: {chunk_summary[:250]} \n")
llm3 = ChatOpenAI(temperature=0,
openai_api_key=openai_api_key,
max_tokens=3000,
model='gpt-3.5-turbo',
request_timeout=120
)
combine_prompt = """
You will be given a series of summaries from a book. The summaries will be enclosed in triple backticks (```)
Your goal is to give a verbose summary of what happened in the story.
The reader should be able to grasp what happened in the book.
```{text}```
VERBOSE SUMMARY:
"""
# Loaders
from langchain.schema import Document
summaries = Document(page_content=texts)
combine_prompt_template = PromptTemplate(template=combine_prompt, input_variables=["text"])
reduce_chain = load_summarize_chain(llm=llm3,
chain_type="stuff",
prompt=combine_prompt_template,
# verbose=True # Set this to true if you want to see the inner workings
)
output = reduce_chain.run([summaries]) | [
"\nYou will be given a series of summaries from a book. The summaries will be enclosed in triple backticks (```)\nYour goal is to give a verbose summary of what happened in the story.\nThe reader should be able to grasp what happened in the book.\n\n```{text}```\nVERBOSE SUMMARY:\n",
"\nYou will be given a section of a website. This section will be enclosed in triple backticks (```)\nYour goal is to give a summary of this section so that a reader will have a full understanding of what happened.\nYour response should be at least three paragraphs and fully encompass what was said in the passage.\n\n```{text}```\nFULL SUMMARY:\n"
] |
2024-01-10 | tjpapenfuss/thesis.AI | langchain~chroma_testing.py | from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
import config
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain import OpenAI, PromptTemplate, LLMChain
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
openai_api_key = config.api_key
llm=OpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo")
text_splitter = CharacterTextSplitter()
loader = PyPDFLoader("/Users/tannerpapenfuss/thesis.AI/langchain/Chase-Data-Driven.pdf")
#loader = PyPDFLoader("/Users/tannerpapenfuss/thesis.AI/langchain/Ford-Data-Driven.pdf")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
chain = load_summarize_chain(llm, chain_type="map_reduce")
#docs = [Document(page_content=t) for t in texts[4:7]]
print(chain.run(texts))
#print(texts[4:7]) | [] |
2024-01-10 | tjpapenfuss/thesis.AI | entrov_gpt_app~doc_summarization.py | from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import PromptTemplate
import config
import uuid
def encode_to_guid(input_string):
# Generate a UUID version 3 using the input string as the namespace
namespace_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, input_string)
# Convert the UUID to a string representation
guid_string = str(namespace_uuid)
return guid_string
def read_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def summarize_doc(document):
openai_api_key = config.api_key
llm=ChatOpenAI(openai_api_key=openai_api_key, model_name="gpt-3.5-turbo")
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap=0, separators=[" ", ",", "\n"])
#text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
texts = text_splitter.create_documents([document])
# print(len(texts))
# chain = load_summarize_chain(llm, chain_type="refine", return_intermediate_steps=True)
# #docs = [Document(page_content=t) for t in texts[4:7]]
# print(chain({"input_documents": texts}, return_only_outputs=True))
map_prompt = """
You will be given a website.
Your goal is to give a summary of this website so that a reader will have a full understanding.
Your response should be at least three paragraphs and fully encompass what was said in the passage.
```{text}```
FULL SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
map_chain = load_summarize_chain(llm=llm,
chain_type="stuff",
prompt=map_prompt_template)
return map_chain.run(texts)
# Example usage for above functions
# input_string = "example"
# encoded_guid = encode_to_guid(input_string)
# print("Encoded GUID:", encoded_guid)
# guid = encode_to_guid("areenablingpartnersuccess.txt")
# print("GUID IS: ", guid)
# doc = read_file("areenablingpartnersuccess.txt")
# print(summarize_doc(document=doc)) | [
"\n You will be given a website. \n Your goal is to give a summary of this website so that a reader will have a full understanding.\n Your response should be at least three paragraphs and fully encompass what was said in the passage.\n\n ```{text}```\n FULL SUMMARY:\n "
] |
2024-01-10 | tjpapenfuss/thesis.AI | langchain~testing_gpt4.py | import openai
import config
OPENAI_API_KEY = config.api_key
openai.api_key = OPENAI_API_KEY
msg = input("Enter your value: ")
messages=[{"role": "user", "content": msg}]
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=500,
temperature=1.2,
messages = messages)
print(response.choices[0].message.content) | [] |
2024-01-10 | lin-tan/CURE | src~trainer~gpt_fconv_trainer.py | import json
import os
import sys
import time
import codecs
import random
import numpy as np
import torch
import torch.nn as nn
from transformers import OpenAIGPTLMHeadModel
GPT_FCONV_TRAINER_DIR = os.path.abspath(__file__)[: os.path.abspath(__file__).rindex('/') + 1]
sys.path.append(GPT_FCONV_TRAINER_DIR + '../models/')
sys.path.append(GPT_FCONV_TRAINER_DIR + '../dataloader/')
from gpt_fconv import GPTFConvModel
from dictionary import Dictionary
from gpt_fconv_data_loader import GPTFConvDataLoader
class GPTFConvTrainer():
def __init__(self, train_loader, valid_loader, dictionary, gpt_file):
gpt_loaded = torch.load(gpt_file)
config = gpt_loaded['config']
gpt_model = OpenAIGPTLMHeadModel(config).cuda()
gpt_model.load_state_dict(gpt_loaded['model'])
self.train_loader = train_loader
self.valid_loader = valid_loader
self.dictionary = dictionary
self.batch_size = 12
self.load_size = 1200
self.gpt_model = gpt_model
self.model = None
self.hyper_parameter = {}
self.hyper_parameter_set = {'{}'}
self.optimizer = None
self.current_train_step = 0
self.val_loss = {}
def shuffle_dataset(self):
indices = [i for i in range(len(self.train_loader.dataset))]
random.shuffle(indices)
return indices
def train_step(self, samples):
self.model.train()
self.current_train_step += 1
self.optimizer.zero_grad()
batch = self.train_loader.dataset.collater(samples)
if torch.cuda.is_available():
outputs = self.model(
batch['net_input']['src_tokens'].cuda(),
batch['net_input']['src_with_prev_context'].cuda(),
prev_tokens_index=batch['target_index'].cuda(),
prev_tokens_with_context=batch['target_with_prev_context'].cuda(),
labels=batch['target'].cuda(),
)
logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4]
loss = apr_loss + 0.3 * lm_loss
loss.mean().backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 0.5, norm_type=2)
self.optimizer.step()
return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item()
def valid_step(self, samples):
self.model.eval()
batch = self.valid_loader.dataset.collater(samples)
outputs = self.model(
batch['net_input']['src_tokens'].cuda(),
batch['net_input']['src_with_prev_context'].cuda(),
prev_tokens_index=batch['target_index'].cuda(),
prev_tokens_with_context=batch['target_with_prev_context'].cuda(),
labels=batch['target'].cuda(),
)
logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4]
loss = apr_loss + 0.3 * lm_loss
return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item(), logits
def validate_and_save(self, model_id, save_dir):
oom = 0
with torch.no_grad():
val_loss, val_fconv_loss, val_lm_loss = [], [], []
for i in range(0, self.valid_loader.total_size, self.batch_size):
samples = [self.valid_loader.dataset[j]
for j in range(i, min(len(self.valid_loader.dataset), i + self.batch_size))]
try:
loss, fconv_loss, lm_loss, logits = self.valid_step(samples)
val_loss.append(float(loss))
val_fconv_loss.append(float(fconv_loss))
val_lm_loss.append(float(lm_loss))
except Exception as e:
oom += 1
info = 'val loss:{}, val apr_loss:{}, val lm_loss:{}, val ppl:{}, oom:{}'.format(
round(float(np.mean(val_loss)), 6),
round(float(np.mean(val_fconv_loss)), 6),
round(float(np.mean(val_lm_loss)), 6),
round(float(np.exp(np.mean(val_loss))), 6),
oom
)
print(info)
val_loss = np.mean(val_fconv_loss)
checkpoint = {
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_step': self.current_train_step,
'config': self.model.module.config(),
'val_loss': val_loss,
}
torch.save(checkpoint, save_dir + 'gpt_fconv_' + str(model_id) + '.pt')
self.val_loss[model_id] = {
'val_loss': val_loss,
'hyper-parameter': str(self.hyper_parameter),
}
return val_loss
def train(self, model_id, epochs, hyper_parameter, save_dir):
self.hyper_parameter = hyper_parameter
self.model = GPTFConvModel(
self.dictionary, embed_dim=384, max_positions=1024,
encoder_convolutions=self.hyper_parameter['encoder_convolutions'],
decoder_convolutions=self.hyper_parameter['decoder_convolutions'],
dropout=self.hyper_parameter['dropout'], embed_model=self.gpt_model,
).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=6.25e-5)
self.model = nn.DataParallel(self.model, device_ids=device_ids)
self.valid_loader.load_data(0, self.valid_loader.total_size)
for epoch in range(epochs):
start_time = time.time()
for i in range(0, self.train_loader.total_size, self.load_size):
oom = 0
self.train_loader.load_data(i, i + self.load_size)
indices = self.shuffle_dataset()
train_loss, train_apr_loss, train_lm_loss = [], [], []
start, end = 0, 0
samples = []
max_src, max_ctx, max_tgt = 0, 0, 0
while end < len(self.train_loader.dataset):
sample = self.train_loader.dataset[indices[end]]
if max_ctx + len(sample['target']) >= 1023 \
or max_tgt + len(sample['prev_context']) >= 1023 \
or max_ctx + len(sample['source']) >= 1023 \
or max_src + len(sample['prev_context']) >= 1023 \
or end - start == self.batch_size:
try:
loss, apr_loss, lm_loss = self.train_step(samples)
train_loss.append(loss)
train_apr_loss.append(apr_loss)
train_lm_loss.append(lm_loss)
except Exception as e:
oom += 1
start = end
max_src, max_ctx, max_tgt = 0, 0, 0
samples = []
continue
max_src = max(max_src, len(sample['source']))
max_ctx = max(max_ctx, len(sample['prev_context']))
max_tgt = max(max_tgt, len(sample['target']))
end += 1
samples.append(sample)
if len(samples) > 0:
try:
loss, apr_loss, lm_loss = self.train_step(samples)
train_loss.append(loss)
train_apr_loss.append(apr_loss)
train_lm_loss.append(lm_loss)
except Exception as e:
oom += 1
if (i // self.load_size) % 10 == 0:
info = 'epoch:{}, load data:{}, lr:{}, loss:{}, apr_loss:{}, lm_loss:{}, time:{}s, oom:{}'.\
format(epoch + 1, i + self.load_size,
round(self.optimizer.param_groups[0]['lr'], 10),
round(float(np.mean(train_loss)), 6),
round(float(np.mean(train_apr_loss)), 6),
round(float(np.mean(train_lm_loss)), 6),
int(time.time() - start_time), oom
)
start_time = time.time()
print(str(model_id) + ' ' + info)
if (i // self.load_size) % 100 == 0:
self.validate_and_save(model_id, save_dir)
self.validate_and_save(model_id, save_dir)
if __name__ == '__main__':
device_ids = [0, 1, 2, 3]
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
vocab_file = GPT_FCONV_TRAINER_DIR + '../../data/vocabulary/vocabulary.txt'
train_file = GPT_FCONV_TRAINER_DIR + '../../data/data/training_bpe.txt'
valid_file = GPT_FCONV_TRAINER_DIR + '../../data/data/validation_bpe.txt'
gpt_file = GPT_FCONV_TRAINER_DIR + '../../data/models/code_gpt.pt'
dictionary = Dictionary(vocab_file, min_cnt=0)
print('dictionary initialized, vocab size:{}'.format(len(dictionary)))
train_loader = GPTFConvDataLoader(train_file, dictionary)
valid_loader = GPTFConvDataLoader(valid_file, dictionary)
print('data loader initialized, train size:{}, validate size:{}'.
format(train_loader.total_size, valid_loader.total_size))
trainer = GPTFConvTrainer(train_loader, valid_loader, dictionary, gpt_file)
hyper_parameter = {
'encoder_convolutions': ((192, 5),) * 1,
'decoder_convolutions': ((192, 5),) * 1,
'dropout': 0.1,
}
trainer.train(1, 2, hyper_parameter, save_dir=GPT_FCONV_TRAINER_DIR + '../../data/models/')
| [] |
2024-01-10 | lin-tan/CURE | src~trainer~gpt_conut_trainer.py | import os
import sys
import json
import time
import codecs
import random
import numpy as np
import torch
import torch.nn as nn
from transformers import OpenAIGPTLMHeadModel
GPT_CONUT_TRAINER_DIR = os.path.abspath(__file__)[: os.path.abspath(__file__).rindex('/') + 1]
sys.path.append(GPT_CONUT_TRAINER_DIR + '../models/')
sys.path.append(GPT_CONUT_TRAINER_DIR + '../dataloader/')
from gpt_conut import GPTCoNuTModel
from dictionary import Dictionary
from gpt_conut_data_loader import GPTCoNuTDataLoader
class GPTCoNuTTrainer():
def __init__(self, train_loader, valid_loader, dictionary, gpt_file):
gpt_loaded = torch.load(gpt_file)
config = gpt_loaded['config']
gpt_model = OpenAIGPTLMHeadModel(config).cuda()
gpt_model.load_state_dict(gpt_loaded['model'])
self.train_loader = train_loader
self.valid_loader = valid_loader
self.dictionary = dictionary
self.batch_size = 12
self.load_size = 1200 # load 1200 samples from training data every time
self.gpt_model = gpt_model
self.model = None
self.hyper_parameter = {}
self.optimizer = None
self.current_train_step = 0
self.val_loss = {}
def shuffle_dataset(self):
indices = [i for i in range(len(self.train_loader.dataset))]
random.shuffle(indices)
return indices
def train_step(self, samples):
self.model.train()
self.current_train_step += 1
self.optimizer.zero_grad()
batch = self.train_loader.dataset.collater(samples)
if torch.cuda.is_available():
outputs = self.model(
batch['net_input']['src_tokens'].cuda(),
batch['net_input']['src_with_prev_context'].cuda(),
batch['net_input']['ctx_tokens'].cuda(),
prev_tokens_index=batch['target_index'].cuda(),
prev_tokens_with_context=batch['target_with_prev_context'].cuda(),
labels=batch['target'].cuda(),
)
else:
outputs = self.model(
batch['net_input']['src_tokens'],
batch['net_input']['src_with_prev_context'],
batch['net_input']['ctx_tokens'],
prev_tokens_index=batch['target_index'],
prev_tokens_with_context=batch['target_with_prev_context'],
labels=batch['target'],
)
logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4]
loss = apr_loss + 0.3 * lm_loss
loss.mean().backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 0.5, norm_type=2)
self.optimizer.step()
return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item()
def valid_step(self, samples):
self.model.eval()
batch = self.valid_loader.dataset.collater(samples)
outputs = self.model(
batch['net_input']['src_tokens'].cuda(),
batch['net_input']['src_with_prev_context'].cuda(),
batch['net_input']['ctx_tokens'].cuda(),
prev_tokens_index=batch['target_index'].cuda(),
prev_tokens_with_context=batch['target_with_prev_context'].cuda(),
labels=batch['target'].cuda(),
)
logits, avg_attn_scores, apr_loss, lm_loss = outputs[:4]
loss = apr_loss + 0.3 * lm_loss
return loss.mean().item(), apr_loss.mean().item(), lm_loss.mean().item(), logits
def validate_and_save(self, model_id, save_dir):
oom = 0
with torch.no_grad():
val_loss, val_fconv_loss, val_lm_loss = [], [], []
for i in range(0, self.valid_loader.total_size, self.batch_size):
samples = [self.valid_loader.dataset[j]
for j in range(i, min(len(self.valid_loader.dataset), i + self.batch_size))]
try:
loss, fconv_loss, lm_loss, logits = self.valid_step(samples)
val_loss.append(float(loss))
val_fconv_loss.append(float(fconv_loss))
val_lm_loss.append(float(lm_loss))
except Exception as e:
oom += 1
info = 'val loss:{}, val apr_loss:{}, val lm_loss:{}, val ppl:{}, oom:{}'.format(
round(float(np.mean(val_loss)), 6),
round(float(np.mean(val_fconv_loss)), 6),
round(float(np.mean(val_lm_loss)), 6),
round(float(np.exp(np.mean(val_loss))), 6),
oom
)
print(info)
val_loss = np.mean(val_fconv_loss)
checkpoint = {
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_step': self.current_train_step,
'config': self.model.module.config(),
'val_loss': val_loss,
}
torch.save(checkpoint, save_dir + 'gpt_conut_' + str(model_id) + '.pt')
self.val_loss[model_id] = {
'val_loss': val_loss,
'hyper-parameter': str(self.hyper_parameter),
}
return val_loss
def train(self, model_id, epochs, hyper_parameter, save_dir):
self.hyper_parameter = hyper_parameter
self.model = GPTCoNuTModel(
self.dictionary, embed_dim=384, max_positions=1024,
src_encoder_convolutions=self.hyper_parameter['src_encoder_convolutions'],
ctx_encoder_convolutions=self.hyper_parameter['ctx_encoder_convolutions'],
decoder_convolutions=self.hyper_parameter['decoder_convolutions'],
dropout=self.hyper_parameter['dropout'], embed_model=self.gpt_model,
).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=6.25e-5)
self.model = nn.DataParallel(self.model, device_ids=device_ids)
self.valid_loader.load_data(0, self.valid_loader.total_size)
for epoch in range(epochs):
start_time = time.time()
for i in range(0, self.train_loader.total_size, self.load_size):
oom = 0
self.train_loader.load_data(i, i + self.load_size)
indices = self.shuffle_dataset()
train_loss, train_apr_loss, train_lm_loss = [], [], []
start, end = 0, 0
samples = []
max_src, max_ctx, max_tgt = 0, 0, 0
while end < len(self.train_loader.dataset):
sample = self.train_loader.dataset[indices[end]]
if max_ctx + len(sample['target']) >= 1023 \
or max_tgt + len(sample['prev_context']) >= 1023 \
or max_ctx + len(sample['source']) >= 1023 \
or max_src + len(sample['prev_context']) >= 1023 \
or end - start == self.batch_size:
try:
loss, apr_loss, lm_loss = self.train_step(samples)
train_loss.append(loss)
train_apr_loss.append(apr_loss)
train_lm_loss.append(lm_loss)
except Exception as e:
oom += 1
start = end
max_src, max_ctx, max_tgt = 0, 0, 0
samples = []
continue
max_src = max(max_src, len(sample['source']))
max_ctx = max(max_ctx, len(sample['prev_context']))
max_tgt = max(max_tgt, len(sample['target']))
end += 1
samples.append(sample)
if len(samples) > 0:
try:
loss, apr_loss, lm_loss = self.train_step(samples)
train_loss.append(loss)
train_apr_loss.append(apr_loss)
train_lm_loss.append(lm_loss)
except Exception as e:
oom += 1
if (i // self.load_size) % 10 == 0:
info = 'epoch:{}, load data:{}, lr:{}, loss:{}, apr_loss:{}, lm_loss:{}, time:{}s, oom:{}'.\
format(epoch + 1, i + self.load_size,
round(self.optimizer.param_groups[0]['lr'], 10),
round(float(np.mean(train_loss)), 6),
round(float(np.mean(train_apr_loss)), 6),
round(float(np.mean(train_lm_loss)), 6),
int(time.time() - start_time), oom
)
start_time = time.time()
print(str(model_id) + ' ' + info)
if (i // self.load_size) % 100 == 0:
self.validate_and_save(model_id, save_dir)
self.validate_and_save(model_id, save_dir)
if __name__ == '__main__':
device_ids = [0, 1, 2, 3]
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
vocab_file = GPT_CONUT_TRAINER_DIR + '../../data/vocabulary/vocabulary.txt'
train_file = GPT_CONUT_TRAINER_DIR + '../../data/data/training_bpe.txt'
valid_file = GPT_CONUT_TRAINER_DIR + '../../data/data/validation_bpe.txt'
gpt_file = GPT_CONUT_TRAINER_DIR + '../../data/models/code_gpt.pt'
dictionary = Dictionary(vocab_file, min_cnt=0)
print('dictionary initialized, vocab size:{}'.format(len(dictionary)))
train_loader = GPTCoNuTDataLoader(train_file, dictionary)
valid_loader = GPTCoNuTDataLoader(valid_file, dictionary)
print('data loader initialized, train size:{}, validate size:{}'.
format(train_loader.total_size, valid_loader.total_size))
trainer = GPTCoNuTTrainer(train_loader, valid_loader, dictionary, gpt_file)
hyper_parameter = {
'src_encoder_convolutions': ((192, 5),) * 1,
'ctx_encoder_convolutions': ((384, 5),) * 1,
'decoder_convolutions': ((192, 5),) * 1,
'dropout': 0.1,
}
model_id = 1
epochs = 5
trainer.train(model_id, epochs, hyper_parameter, save_dir=GPT_CONUT_TRAINER_DIR + '../../data/models/')
| [] |
2024-01-10 | alfredcs/BabyAGI_Bedrock | demo~run_babyapi.py | from baby_agi_with_agent import Optional, BabyAGI
from langchain import OpenAI
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
import faiss
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
OBJECTIVE = "What happened to the First Republic Bank? Will the FED take the same action as it did on SVB's failure?"
llm = OpenAI(temperature=0)
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = 1
baby_agi = BabyAGI.from_llm(
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
)
response = baby_agi({"objective": OBJECTIVE})
print(response)
| [] |
2024-01-10 | alfredcs/BabyAGI_Bedrock | demo~baby_agi_with_agent.py | #!/usr/bin/env python
# coding: utf-8
# # BabyAGI with Tools
#
# This notebook builds on top of [baby agi](baby_agi.ipynb), but shows how you can swap out the execution chain. The previous execution chain was just an LLM which made stuff up. By swapping it out with an agent that has access to tools, we can hopefully get real reliable information
# ## Install and Import Required Modules
import os
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
import faiss
# Connect to the Vector Store
# Depending on what vectorstore you use, this step may look different.
#get_ipython().run_line_magic('pip', 'install faiss-gpu > /dev/null')
#get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
# ## Define the Chains
#
# BabyAGI relies on three LLM chains:
# - Task creation chain to select new tasks to add to the list
# - Task prioritization chain to re-prioritize tasks
# - Execution Chain to execute the tasks
#
class TaskCreationChain(LLMChain):
"""Chain to generates tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"You are an task creation AI that uses the result of an execution agent"
" to create new tasks with the following objective: {objective},"
" The last completed task has the result: {result}."
" This result was based on this task description: {task_description}."
" These are incomplete tasks: {incomplete_tasks}."
" Based on the result, create new tasks to be completed"
" by the AI system that do not overlap with incomplete tasks."
" Return the tasks as an array."
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=[
"result",
"task_description",
"incomplete_tasks",
"objective",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
# In[5]:
class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
" the following tasks: {task_names}."
" Consider the ultimate objective of your team: {objective}."
" Do not remove any tasks. Return the result as a numbered list, like:"
" #. First task"
" #. Second task"
" Start the task list with number {next_task_id}."
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
# In[13]:
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain import OpenAI, SerpAPIWrapper, LLMChain
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper(serpapi_api_key=os.environ.get('serp_api_token'))
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
# ### Define the BabyAGI Controller
#
# BabyAGI composes the chains defined above in a (potentially-)infinite loop.
# In[14]:
def get_next_task(
task_creation_chain: LLMChain,
result: Dict,
task_description: str,
task_list: List[str],
objective: str,
) -> List[Dict]:
"""Get the next task."""
incomplete_tasks = ", ".join(task_list)
response = task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
)
new_tasks = response.split("\n")
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
# In[15]:
def prioritize_tasks(
task_prioritization_chain: LLMChain,
this_task_id: int,
task_list: List[Dict],
objective: str,
) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in task_list]
next_task_id = int(this_task_id) + 1
response = task_prioritization_chain.run(
task_names=task_names, next_task_id=next_task_id, objective=objective
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
return prioritized_task_list
# In[16]:
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = vectorstore.similarity_search_with_score(query, k=k)
if not results:
return []
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
return [str(item.metadata["task"]) for item in sorted_results]
def execute_task(
vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
) -> str:
"""Execute a task."""
context = _get_top_tasks(vectorstore, query=objective, k=k)
return execution_chain.run(objective=objective, context=context, task=task)
# In[17]:
class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: TaskCreationChain = Field(...)
task_prioritization_chain: TaskPrioritizationChain = Field(...)
execution_chain: AgentExecutor = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict):
self.task_list.append(task)
def print_task_list(self):
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict):
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str):
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Run the agent."""
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = execute_task(
self.vectorstore, self.execution_chain, objective, task["task_name"]
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = get_next_task(
self.task_creation_chain,
result,
task["task_name"],
[t["task_name"] for t in self.task_list],
objective,
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
prioritize_tasks(
self.task_prioritization_chain,
this_task_id,
list(self.task_list),
objective,
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
@classmethod
def from_llm(
cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=agent_executor,
vectorstore=vectorstore,
**kwargs,
)
'''
# ### Run the BabyAGI
#
# Now it's time to create the BabyAGI controller and watch it try to accomplish your objective.
# In[38]:
OBJECTIVE = "What happened to the First Republic Bank? Will the FED take the same action as it did on SVB's failure?"
llm = OpenAI(temperature=0)
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = 2
baby_agi = BabyAGI.from_llm(
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
)
baby_agi({"objective": OBJECTIVE})
'''
| [
"task_description",
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team: {objective}. Do not remove any tasks. Return the result as a numbered list, like: #. First task #. Second task Start the task list with number {next_task_id}.",
"You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {incomplete_tasks}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.",
"agent_scratchpad",
"task_names",
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}",
"context",
"next_task_id",
"incomplete_tasks"
] |
2024-01-10 | usepr/codeinterpreter-api | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = llm.predict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def aremove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(remove_download_link(example, llm))
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
test()
| [] |
2024-01-10 | UX-Decoder/LLaVA-Grounding | llava~eval~eval_gpt_review_visual.py | import argparse
import json
import os
import openai
import time
NUM_SECONDS_TO_SLEEP = 0.5
openai.api_type = "azure"
openai.api_base = "https://xdecoder.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
os.environ['OPENAI_API_KEY']='f0f8184713a549ba945bbcc19a06e032'
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
engine='gpt4a',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(NUM_SECONDS_TO_SLEEP)
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
parser.add_argument('-c', '--context')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
if os.path.isfile(os.path.expanduser(args.output)):
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
else:
cur_reviews = []
review_file = open(f'{args.output}', 'a')
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
image_to_context = {context['image']: context for context in context_list}
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
inst = image_to_context[ques['image']]
cap_str = '\n'.join(inst['captions'])
box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
category = json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
assert False, f"Visual QA category not found in rule file: {category}."
prompt = rule['prompt']
role = rule['role']
content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
cur_js = {
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1.get('answer_id', ans1['question_id']),
'answer2_id': ans2.get('answer_id', ans2['answer_id']) if 'answer_id' in ans2 else ans2['question_id'],
'category': category
}
if idx >= len(cur_reviews):
review = get_eval(content, args.max_tokens)
scores = parse_score(review)
cur_js['content'] = review
cur_js['tuple'] = scores
review_file.write(json.dumps(cur_js) + '\n')
review_file.flush()
else:
print(f'Skipping {idx} as we already have it.')
idx += 1
print(idx)
review_file.close()
| [
"You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | UX-Decoder/LLaVA-Grounding | llava~eval~eval_gpt_review_visual2.py | import argparse
import json
import os
import openai
import time
NUM_SECONDS_TO_SLEEP = 0.5
os.environ['OPENAI_API_KEY']='233c45550c614b72b8f3c9309efecf06'
openai.api_type = "azure"
openai.api_base = 'https://azureopenaifiahmedeastus.openai.azure.com/'
openai.api_version = '2023-03-15-preview'
openai.api_key = "233c45550c614b72b8f3c9309efecf06"
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
engine='gpt-4-32k-0314',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(NUM_SECONDS_TO_SLEEP)
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
parser.add_argument('-c', '--context')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
if os.path.isfile(os.path.expanduser(args.output)):
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
else:
cur_reviews = []
review_file = open(f'{args.output}', 'a')
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
image_to_context = {context['image']: context for context in context_list}
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
inst = image_to_context[ques['image']]
cap_str = '\n'.join(inst['captions'])
box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
category = json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
assert False, f"Visual QA category not found in rule file: {category}."
prompt = rule['prompt']
role = rule['role']
content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
cur_js = {
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1.get('answer_id', ans1['question_id']),
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
'category': category
}
if idx >= len(cur_reviews):
review = get_eval(content, args.max_tokens)
scores = parse_score(review)
cur_js['content'] = review
cur_js['tuple'] = scores
review_file.write(json.dumps(cur_js) + '\n')
review_file.flush()
else:
print(f'Skipping {idx} as we already have it.')
idx += 1
print(idx)
review_file.close()
| [
"You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | UX-Decoder/LLaVA-Grounding | llava~eval~eval_gpt_review_bench.py | import argparse
import json
import os
import openai
import time
NUM_SECONDS_TO_SLEEP = 0.5
openai.api_type = "azure"
openai.api_base = "https://xdecoder.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
os.environ['OPENAI_API_KEY']='f0f8184713a549ba945bbcc19a06e032'
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
engine='gpt4a',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(NUM_SECONDS_TO_SLEEP)
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
parser.add_argument('-c', '--context')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
if os.path.isfile(os.path.expanduser(args.output)):
cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
else:
cur_reviews = []
review_file = open(f'{args.output}', 'a')
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
image_to_context = {context['image']: context for context in context_list}
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
inst = image_to_context[ques['image']]
cap_str = '\n'.join(inst['caption'])
category = 'llava_bench_' + json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
assert False, f"Visual QA category not found in rule file: {category}."
prompt = rule['prompt']
role = rule['role']
content = (f'[Context]\n{cap_str}\n\n'
f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
cur_js = {
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1.get('answer_id', ans1['question_id']),
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
'category': category
}
if idx >= len(cur_reviews):
review = get_eval(content, args.max_tokens)
scores = parse_score(review)
cur_js['content'] = review
cur_js['tuple'] = scores
review_file.write(json.dumps(cur_js) + '\n')
review_file.flush()
else:
print(f'Skipping {idx} as we already have it.')
idx += 1
print(idx)
review_file.close()
| [
"You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | kamalshadi/NDT | waveTCP.py | #!/usr/bin/env python
import csv
import os
import sys
import subprocess
import statvfs
import pylab as pl
import pywt as wx
import numpy as num
from matplotlib.mlab import PCA
import math
from scipy.linalg import eigh
from scipy.stats import norm,mstats
from scipy.signal import hilbert
import cmath as cx
from matplotlib.mlab import cohere
from myStat import *
def dNorm(x):
s=num.std(x)
u=num.mean(x)
return [(xx-u)/s for xx in x]
def correlate(z,y,mode='p'):
# p for pearson, c for pcc and s for spectral coherence
l=len(y)
if len(z)!=len(y):
print 'Error'
return
if mode=='p':
a=num.correlate(dNorm(z),dNorm(y),'valid')
return a/len(y)
elif mode=='c' :
za=hilbert(z)
ya=hilbert(y)
phi1=[cx.phase(x) for x in za]
phi2=[cx.phase(x) for x in ya]
a=0
for i in range(l):
a=a+(abs(cx.exp(1j*phi1[i])+cx.exp(1j*phi2[i]))-\
abs(cx.exp(1j*phi1[i])-cx.exp(1j*phi2[i])))/2
return a/len(y)
elif mode=='s':
sc,f=cohere(z,y)
return sc,f
return
def featureNorm(fv,nonLin=False):
mn=num.mean(fv,0)
std=num.std(fv,0)
fvn=(num.array(fv)-mn)/std
if nonLin:
fvn=1.0/(1+num.exp(-1*fvn))
return fvn
def db(x):
print type(x)
if type(x)!=list:
return 20*math.log10(abs(x))
else:
return [20*math.log10(abs(xx)) for xx in x]
def usage():
return """
Summary:
./geoUoS -p False/True -f <filename>
locate the UoS in city resolution
"""
def parse_args():
from optparse import OptionParser
parser = OptionParser(usage=usage())
parser.add_option("-d", "--dirc", dest="dirc", default=None,
help="Required: sub_directory in Dump")
parser.add_option("-u", "--uos", dest="uos", default="1",
help="Required: filename for geo data")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
(options, args) = parser.parse_args()
if options.dirc is None:
print "Error: Please provide --dir to read data \n \
(do not include D- prefix)"
sys.exit(1)
return (options, args)
def order(v,w):
a=zip(v,w)
a.sort()
l=zip(*a)
v=list(l[0])
w=list(l[1])
return [v,w]
if __name__ == '__main__':
(options, args) = parse_args()
dirc=options.dirc
uos=options.uos
ad="Dump/D-"+dirc+"/uos_"+uos
i=0
dic={}
with open(ad,'r') as f:
val=csv.reader(f,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
fv1=[]
for i,line in enumerate(val):
if i==0:
l=len(line)
i=1
continue
else:
cIP=line[0]
server=line[-1]
log=int(line[1])
t=[float(xx)/1e6 for xx in line[2].strip('"').split(',')]
rtt=[float(xx) for xx in line[3].strip('"').split(',')]
cwnd=[float(xx) for xx in line[4].strip('"').split(',')]
cong=max([float(xx) for xx in line[5].strip('"').split(',')])
ro=correlate(rtt,cwnd)
acked=int(line[8])
down=float(acked)/(1e6*max(t))
t,w=order(t,zip(rtt,cwnd))
d=5 # level of wavelet decomposition
rtt,cwnd=[list(xx) for xx in zip(*w)]
wc = wx.wavedec(cwnd, 'haar', level=d)
wr = wx.wavedec(rtt, 'haar', level=d)
#~ fig=pl.figure()
#~ pl.subplot(2,1,1)
#~ pl.plot(range(len(cwnd)),cwnd)
#~ pl.subplot(2,1,2)
#~ pl.plot(range(len(rtt)),rtt)
#~ pl.title(str(ro))
#~ pl.suptitle(str(down))
f1=[]
#~ pl.show()
for i in range(1,d+1):
f1=f1+[num.std(wc[i]),num.mean(wc[i]),max(wc[i])-min(wc[i])]
f1=f1+[num.std(wr[i]),num.mean(wr[i]),max(wr[i])-min(wr[i])]
fv1.append(f1)
fv=num.array(fv1)
fvn=featureNorm(fv,nonLin=False)
y=PCA(fvn)
s=y.fracs
#~ print len(s)
#~ print len(s[0])
pl.plot(range(len(s)),list(s))
pl.xlabel('Dimension')
pl.ylabel('Significance')
pl.show()
| [] |
2024-01-10 | kamalshadi/NDT | pcaTCP.py | #!/usr/bin/env python
import csv
import os
import sys
import subprocess
import statvfs
import pylab as pl
import pywt as wx
import numpy as num
import math
from scipy.linalg import eigh
from scipy.stats import norm,mstats
from scipy.signal import hilbert
import cmath as cx
from matplotlib.mlab import cohere
from myStat import *
model_dim=5;
def dNorm(x):
s=num.std(x)
u=num.mean(x)
return [(xx-u)/s for xx in x]
def correlate(z,y,mode='p'):
# p for pearson, c for pcc and s for spectral coherence
l=len(y)
if len(z)!=len(y):
print 'Error'
return
if mode=='p':
a=num.correlate(dNorm(z),dNorm(y),'valid')
return a/len(y)
elif mode=='c' :
za=hilbert(z)
ya=hilbert(y)
phi1=[cx.phase(x) for x in za]
phi2=[cx.phase(x) for x in ya]
a=0
for i in range(l):
a=a+(abs(cx.exp(1j*phi1[i])+cx.exp(1j*phi2[i]))-\
abs(cx.exp(1j*phi1[i])-cx.exp(1j*phi2[i])))/2
return a/len(y)
elif mode=='s':
sc,f=cohere(z,y)
return sc,f
return
def feat(x):
a,b=mstats.mquantiles(x, prob=[0.1,.9])
return max(x)-min(x)
def fracs(a):
a=num.array(a)
b=sum(a)
return a/b
def featureNorm(fv,nonLin=False):
mn=num.mean(fv,0)
std=num.std(fv,0)
fvn=(num.array(fv)-mn)/std
if nonLin:
fvn=1.0/(1+num.exp(-1*fvn))
return (fvn,mn,std)
def db(x):
print type(x)
if type(x)!=list:
return 20*math.log10(abs(x))
else:
return [20*math.log10(abs(xx)) for xx in x]
def usage():
return """
Summary:
./geoUoS -p False/True -f <filename>
locate the UoS in city resolution
"""
def parse_args():
from optparse import OptionParser
parser = OptionParser(usage=usage())
parser.add_option("-d", "--dirc", dest="dirc", default=None,
help="Required: sub_directory in Dump")
parser.add_option("-u", "--uos", dest="uos", default="1",
help="Required: filename for geo data")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
(options, args) = parser.parse_args()
if options.dirc is None:
print "Error: Please provide --dir to read data \n \
(do not include D- prefix)"
sys.exit(1)
return (options, args)
def order(v,w):
a=zip(v,w)
a.sort()
l=zip(*a)
v=list(l[0])
w=list(l[1])
return [v,w]
def trainModel(X,dim=4,alpha=0.05):
S=num.corrcoef(num.transpose(X))
fL=num.size(S,0)
e,w=eigh(S)
wt=num.identity(fL)-num.dot(w[:,-dim:],num.transpose(w[:,-dim:]))
res=e[:-dim]
p1=sum(res)
p2=sum(res**2)
p3=sum(res**3)
h=1-2*(p1*p3/(3*p2**2))
ca=norm.ppf(1-alpha)
eps=p1*(((ca*num.sqrt(2*p2*(h**2))/p1)+1+p2*h*(h-1)/(p1**2))**(1.0/h))
return (wt,eps)
def train(f):
val=csv.reader(f,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
fv1=[]
i=0
fig=pl.figure()
X=[]
for i,line in enumerate(val):
if i==0:
l=len(line)
i=1
continue
else:
cIP=line[0]
server=line[-1]
log=int(line[1])
t=[float(xx)/1e6 for xx in line[2].strip('"').split(',')]
rtt=[float(xx) for xx in line[3].strip('"').split(',')]
cwnd=[float(xx) for xx in line[4].strip('"').split(',')]
cong=max([float(xx) for xx in line[5].strip('"').split(',')])
acked=int(line[8])
down=float(acked)/(1e6*max(t))
X.append(down)
t,w=order(t,zip(rtt,cwnd))
d=5 # level of wavelet decomposition
rtt,cwnd=[list(xx) for xx in zip(*w)]
rx=[xx for xx in rtt if xx>1]
wc = wx.wavedec(cwnd, 'db1', level=d)
wr = wx.wavedec(rtt, 'db1', level=d)
pl.plot(correlate(rtt,cwnd),down,'r*')
f1=[]
for i in range(1,d+1):
f1=f1+[num.std(wc[i]),num.mean(wc[i]),max(wc[i])-min(wc[i])]
f1=f1+[num.std(wr[i]),num.mean(wr[i]),max(wr[i])-min(wr[i])]
#~ f1=f1+[correlate(wr[i],wc[i]),max(wr[i])-min(wr[i]),max(wc[i])-min(wc[i])]
fv1.append(f1)
fv=num.array(fv1)
fvn,u,sigma=featureNorm(fv,nonLin=False)
wt,eps=trainModel(fvn,model_dim)
pl.xlabel('RTT-CWND correlation')
pl.ylabel('Throughput')
pl.show()
#~ return X
return (wt,eps,u,sigma)
if __name__ == '__main__':
(options, args) = parse_args()
dirc=options.dirc
uos=options.uos
ad="Dump/D-"+dirc+"/uos_"+uos
with open(ad,'r') as f:
wt,eps,u,sigma=train(f)
#~ bicMetric(X,True)
#~ pl.show()
#~ with open(ad,'r') as f:
#~ val=csv.reader(f,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#~ for i,line in enumerate(val):
#~ if i==0:
#~ l=len(line)
#~ i=1
#~ continue
#~ else:
#~ cIP=line[0]
#~ server=line[-1]
#~ log=int(line[1])
#~ t=[float(xx)/1e6 for xx in line[2].strip('"').split(',')]
#~ rtt=[float(xx) for xx in line[3].strip('"').split(',')]
#~ cwnd=[float(xx) for xx in line[4].strip('"').split(',')]
#~ cong=max([float(xx) for xx in line[5].strip('"').split(',')])
#~ acked=int(line[8])
#~ down=float(acked)/(1e6*max(t))
#~ t,w=order(t,zip(rtt,cwnd))
#~ d=5 # level of wavelet decomposition
#~ rtt,cwnd=[list(xx) for xx in zip(*w)]
#~ rx=[xx for xx in rtt if xx>1]
#~ wc = wx.wavedec(cwnd, 'db1', level=d)
#~ wr = wx.wavedec(rtt, 'db1', level=d)
#~ pl.plot(correlate(rtt[-1],cwnd[-1],'c'),down,'r*')
#~ f1=[]
#~ for i in range(1,d+1):
#~ f1=f1+[num.std(wc[i]),num.mean(wc[i]),max(wc[i])-min(wc[i])]
#~ f1=f1+[num.std(wr[i]),num.mean(wr[i]),max(wr[i])-min(wr[i])]
#~ fv1=num.array(f1)
#~ fv=(fv1-u)/sigma
#~ y=(num.linalg.norm(num.dot(wt,fv)))**2
#~ pl.plot(y,down,'r*')
#~ pl.xlabel('Residual energy')
#~ pl.ylabel('Download Throughput')
#~ pl.show()
| [] |
2024-01-10 | ChunyuanLI/Optimus | code~examples~big_ae~run_encoding_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from modules import VAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
while True:
# for _ in trange(length):
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
# pdb.set_trace()
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
return generated
# a wrapper function to choose between different play modes
def evaluate_latent_space(args, model_vae, encoder_tokenizer, decoder_tokenizer, prefix=""):
eval_dataloader = build_dataload_and_cache_examples(args, [encoder_tokenizer, decoder_tokenizer], evaluate=False)
# Eval!
logger.info("***** Running recontruction evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.per_gpu_eval_batch_size)
model_vae.eval()
model_vae = model_vae.module if hasattr(model_vae, 'module') else model_vae # Take care of distributed/parallel training
if args.play_mode == 'reconstrction':
result = calc_rec(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_recontruction_results.txt"
elif args.play_mode == 'interpolation':
result = calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_interpolation_results.txt"
else:
logger.info("Please specify the corrent play mode [reconstrction, interpolation]")
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval {} results *****".format(args.play_mode))
for key in sorted(result.keys()):
logger.info(" %s \n %s", key, str(result[key]))
writer.write("%s \n %s\n" % (key, str(result[key])))
return result
def calc_rec(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
result = defaultdict(str)
for batch in tqdm(eval_dataloader, desc="Evaluating recontruction"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x1 = x1[:,:max_len_values[1]]
x0 = x0.to(args.device)
x1 = x1.to(args.device)
x_lengths = x_lengths.to(args.device)
context_tokens = decoder_tokenizer.encode('<BOS>')
with torch.no_grad():
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
# result["INPUT TEXT " + str(count)].append(text_x0)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
# latent_z, loss_kl = model_vae.connect(pooled_hidden_fea)
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[text_x0] = text_x1
count += 1
if count>args.total_sents:
break
return result
def calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
latent_codes = []
sample_interval = 0
for batch in tqdm(eval_dataloader, desc="Evaluating interpolation"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x0 = x0.to(args.device)
x_lengths = x_lengths.to(args.device)
with torch.no_grad():
if sample_interval == 0 or sample_interval == args.total_sents:
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
latent_codes.append(latent_z)
if sample_interval == 5:
latent_codes.append(latent_z)
sample_interval = 0
continue
else:
sample_interval += 1
continue
count += 1
if count>args.total_sents:
break
context_tokens = decoder_tokenizer.encode('<BOS>')
result = defaultdict(str)
latent_codes_interpolation = []
num_steps = args.num_interpolation_steps
for step in range(num_steps+1):
latent_z = latent_codes[0] + (latent_codes[1] - latent_codes[0]) * step * 1.0/num_steps
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
result[step] = text_x1
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_interpolation_steps", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--play_mode", default="interpolation", type=str,
help="interpolation or reconstruction.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Load full model
output_full_dir = os.path.join(args.checkpoint_dir, 'checkpoint-full-{}'.format(global_step))
checkpoint = torch.load(os.path.join(output_full_dir, 'training.bin'))
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
model_vae = VAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
model_vae.load_state_dict(checkpoint['model_state_dict'])
logger.info("Pre-trained Optimus is successfully loaded")
model_vae.to(args.device)
result = evaluate_latent_space(args, model_vae, tokenizer_encoder, tokenizer_decoder, prefix=global_step)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ChunyuanLI/Optimus | code~examples~big_ae~run_latent_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from modules import VAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
while True:
# for _ in trange(length):
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
# pdb.set_trace()
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
return generated
def latent_code_from_text(text, tokenizer_encoder, model_vae, args):
tokenized1 = tokenizer_encoder.encode(text)
tokenized1 = [101] + tokenized1 + [102]
coded1 = torch.Tensor([tokenized1])
coded1 =torch.Tensor.long(coded1)
with torch.no_grad():
x0 = coded1
x0 = x0.to(args.device)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
coded_length = len(tokenized1)
return latent_z, coded_length
def text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder):
past = latent_z
context_tokens = tokenizer_decoder.encode('<BOS>')
length = 128 # maximum length, but not used
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length= length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = tokenizer_decoder
)
text_x1 = tokenizer_decoder.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
return text_x1
# a wrapper function to choose between different play modes
def evaluate_latent_space(args, model_vae, encoder_tokenizer, decoder_tokenizer, prefix=""):
eval_dataloader = build_dataload_and_cache_examples(args, [encoder_tokenizer, decoder_tokenizer], evaluate=False)
# Eval!
logger.info("***** Running recontruction evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.per_gpu_eval_batch_size)
model_vae.eval()
model_vae = model_vae.module if hasattr(model_vae, 'module') else model_vae # Take care of distributed/parallel training
if args.play_mode == 'reconstrction':
result = calc_rec(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_recontruction_results.txt"
elif args.play_mode == 'interpolation':
result = calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_interpolation_results.txt"
else:
logger.info("Please specify the corrent play mode [reconstrction, interpolation]")
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval {} results *****".format(args.play_mode))
for key in sorted(result.keys()):
logger.info(" %s \n %s", key, str(result[key]))
writer.write("%s \n %s\n" % (key, str(result[key])))
return result
def calc_rec(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
result = defaultdict(str)
for batch in tqdm(eval_dataloader, desc="Evaluating recontruction"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x1 = x1[:,:max_len_values[1]]
x0 = x0.to(args.device)
x1 = x1.to(args.device)
x_lengths = x_lengths.to(args.device)
context_tokens = decoder_tokenizer.encode('<BOS>')
with torch.no_grad():
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
# result["INPUT TEXT " + str(count)].append(text_x0)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
# latent_z, loss_kl = model_vae.connect(pooled_hidden_fea)
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[text_x0] = text_x1
count += 1
if count>args.total_sents:
break
return result
def calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
latent_codes = []
sample_interval = 0
for batch in tqdm(eval_dataloader, desc="Evaluating interpolation"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x0 = x0.to(args.device)
x_lengths = x_lengths.to(args.device)
with torch.no_grad():
if sample_interval == 0 or sample_interval == args.total_sents:
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
latent_codes.append(latent_z)
if sample_interval == 5:
latent_codes.append(latent_z)
sample_interval = 0
continue
else:
sample_interval += 1
continue
count += 1
if count>args.total_sents:
break
context_tokens = decoder_tokenizer.encode('<BOS>')
result = defaultdict(str)
latent_codes_interpolation = []
num_steps = args.num_interpolation_steps
for step in range(num_steps+1):
latent_z = latent_codes[0] + (latent_codes[1] - latent_codes[0]) * step * 1.0/num_steps
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
result[step] = text_x1
return result
def interpolate(model_vae, tokenizer_encoder, tokenizer_decoder, args):
# and then in the main function
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_vae, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_vae, args)
result = defaultdict(str)
num_steps = args.num_interpolation_steps + 1
for step in range(num_steps+1):
latent_z = latent_z1 + (latent_z2 - latent_z1) * step * 1.0/num_steps
text_interpolate = text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder)
result[step] = text_interpolate
print(text_interpolate)
return result
def analogy(model_vae, tokenizer_encoder, tokenizer_decoder, args):
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_vae, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_vae, args)
latent_z3, coded_length3 = latent_code_from_text(args.sent_input, tokenizer_encoder, model_vae, args)
result = defaultdict(str)
latent_z = latent_z3 + args.degree_to_target * (latent_z2 - latent_z1)
text_analogy = text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder)
result[0] = text_analogy
print(text_analogy)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_interpolation_steps", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--play_mode", default="interpolation", type=str,
help="interpolation or reconstruction.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
# Interact with users
parser.add_argument("--interact_with_user_input", action='store_true', help="Use user input to interact_with.")
parser.add_argument("--sent_source", type=str, default="")
parser.add_argument("--sent_target", type=str, default="")
parser.add_argument("--sent_input", type=str, default="")
parser.add_argument("--degree_to_target", type=float, default="1.0")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Load full model
output_full_dir = os.path.join(args.checkpoint_dir, 'checkpoint-full-{}'.format(global_step))
checkpoint = torch.load(os.path.join(output_full_dir, 'training.bin'))
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
model_vae = VAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
model_vae.load_state_dict(checkpoint['model_state_dict'])
logger.info("Pre-trained Optimus is successfully loaded")
model_vae.to(args.device)
if args.interact_with_user_input:
if args.play_mode == 'interpolation':
if len(args.sent_source) > 0 and len(args.sent_source) > 0:
result = interpolate(model_vae, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source and target sentences!')
if args.play_mode == 'analogy':
if len(args.sent_source) > 0 and len(args.sent_source) > 0 and len(args.sent_input) > 0:
result = analogy(model_vae, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source, target and input analogy sentences!')
else:
result = evaluate_latent_space(args, model_vae, tokenizer_encoder, tokenizer_decoder, prefix=global_step)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ChunyuanLI/Optimus | code~examples~big_ae~run_generation_from_prior.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
cwd = os.getcwd()
print(f"Current working dir is {cwd}")
import sys
sys.path.append('./')
pt_path = os.path.join( cwd, 'pytorch_transformers')
sys.path.append(pt_path)
print(f"Pytorch Transformer {pt_path}")
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
import pytorch_transformers
from collections import defaultdict
from modules import VAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
from metrics import Bleu, SelfBleu
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
# top-k
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
# top-p
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None, max_seq_length=-1):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
gen_seq_length = 0
with torch.no_grad():
while True:
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
gen_seq_length += 1
# pdb.set_trace()
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
if max_seq_length>0 and gen_seq_length>max_seq_length:
break
return generated
def evaluate_generation_fromp_prior(model_vae, decoder_tokenizer, args, ns=1):
loc = torch.zeros([args.nz]).to(args.device)
scale = torch.ones([args.nz]).to(args.device)
prior = torch.distributions.normal.Normal(loc, scale)
context_tokens = decoder_tokenizer.encode('<BOS>')
count = 0
result = defaultdict(str)
for i in tqdm(range(args.num_sents)):
with torch.no_grad():
latent_z = prior.sample()
# pdb.set_trace()
past = model_vae.decoder.linear(latent_z.unsqueeze(0))
# pdb.set_trace()
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=args.max_seq_length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer,
max_seq_length = args.max_seq_length
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[i] = text_x1
if args.use_philly:
print("PROGRESS: {}%".format( round(100 * i /args.num_sents , 4)))
with open(args.output_generation_file, "w") as writer:
logger.info("***** SHOW generated sentences from prior *****")
for key in sorted(result.keys()):
# logger.info(" %s \n %s", key, str(result[key]))
# writer.write("%s \n %s\n" % (key, str(result[key])))
writer.write("%s" % str(result[key]))
return result
# bleu = evaluate_bleu(results, args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_sents", default=10, type=int, help="Total sentences to generate.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# pdb.set_trace()
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
model_vae = VAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args).to(args.device)
if not os.path.exists(args.output_dir): os.makedirs(args.output_dir)
args.output_generation_file = os.path.join(args.output_dir, f"generation_from_vae_prior_t{args.temperature}_p{args.top_p}.txt")
# args.output_generation_file = args.train_data_file
result = evaluate_generation_fromp_prior(model_vae, tokenizer_decoder, args)
bleu5 = Bleu(test_text= args.output_generation_file,
real_text=args.eval_data_file,
num_real_sentences=args.num_sents,
num_fake_sentences=args.num_sents,
gram=5).get_score()
logger.info(f'The bleu score is {bleu5}')
sbleu5 = SelfBleu(test_text= args.output_generation_file,
num_sentences=args.num_sents,
gram=5).get_score()
logger.info(f'The self-bleu score is {sbleu5}')
args.eval_results_file = os.path.join(args.output_dir, f"eval_results_t{args.temperature}_p{args.top_p}.txt")
eval_results = {'bleu5':bleu5 , 'sbleu5':sbleu5}
with open(args.eval_results_file, "w") as writer:
logger.info("***** SHOW the quantative evalution results *****")
for key in sorted(eval_results.keys()):
writer.write("%s %s" % (key, str(eval_results[key])) )
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ChunyuanLI/Optimus | code~examples~big_ae~run_gpt2_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
cwd = os.getcwd()
print(f"Current working dir is {cwd}")
import sys
sys.path.append('./')
pt_path = os.path.join( cwd, 'pytorch_transformers')
sys.path.append(pt_path)
print(f"Pytorch Transformer {pt_path}")
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
import pytorch_transformers
from collections import defaultdict
from modules import VAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
from metrics import Bleu, SelfBleu
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
# top-k
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
# top-p
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu', decoder_tokenizer=None, max_seq_length=-1):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
gen_seq_length = 0
with torch.no_grad():
while True:
inputs = {'input_ids': generated}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
gen_seq_length += 1
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
if max_seq_length>0 and gen_seq_length>max_seq_length:
break
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None, max_seq_length=-1):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
gen_seq_length = 0
with torch.no_grad():
while True:
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
gen_seq_length += 1
# pdb.set_trace()
if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
break
if max_seq_length>0 and gen_seq_length>max_seq_length:
break
return generated
def evaluate_generation_from_gpt2(model, decoder_tokenizer, args, ns=1):
loc = torch.zeros([args.nz]).to(args.device)
scale = torch.ones([args.nz]).to(args.device)
prior = torch.distributions.normal.Normal(loc, scale)
context_tokens = decoder_tokenizer.encode('<BOS>')
count = 0
result = defaultdict(str)
for i in tqdm(range(args.num_sents)):
with torch.no_grad():
out = sample_sequence(
model=model,
context=context_tokens,
length=args.max_seq_length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer,
max_seq_length = args.max_seq_length
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[i] = text_x1
if args.use_philly:
print("PROGRESS: {}%".format( round(100 * i /args.num_sents , 4)))
with open(args.output_generation_file, "w") as writer:
logger.info("***** SHOW generated sentences from prior *****")
for key in sorted(result.keys()):
# logger.info(" %s \n %s", key, str(result[key]))
# writer.write("%s \n %s\n" % (key, str(result[key])))
writer.write("%s" % str(result[key]))
return result
# bleu = evaluate_bleu(results, args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=32, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_sents", default=10, type=int, help="Total sentences to generate.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-{}'.format(global_step))
checkpoints = [ output_decoder_dir ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# pdb.set_trace()
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
if not os.path.exists(args.output_dir): os.makedirs(args.output_dir)
args.output_generation_file = os.path.join(args.output_dir, f"generation_from_gpt2_t{args.temperature}_p{args.top_p}.txt")
# args.output_generation_file = args.train_data_file
result = evaluate_generation_from_gpt2(model_decoder, tokenizer_decoder, args)
bleu5 = Bleu(test_text= args.output_generation_file,
real_text=args.eval_data_file,
num_real_sentences=args.num_sents,
num_fake_sentences=args.num_sents,
gram=5).get_score()
logger.info(f'The bleu score is {bleu5}')
sbleu5 = SelfBleu(test_text= args.output_generation_file,
num_sentences=args.num_sents,
gram=5).get_score()
logger.info(f'The self-bleu score is {sbleu5}')
args.eval_results_file = os.path.join(args.output_dir, f"eval_results_t{args.temperature}_p{args.top_p}.txt")
eval_results = {'bleu5':bleu5 , 'sbleu5':sbleu5}
with open(args.eval_results_file, "w") as writer:
logger.info("***** SHOW the quantative evalution results *****")
for key in sorted(eval_results.keys()):
writer.write("%s %s" % (key, str(eval_results[key])) )
if __name__ == '__main__':
main()
| [] |
2024-01-10 | russelnelson/AutoPR | autopr~services~rail_service.py | from typing import Callable, Any, Optional, TypeVar
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential, retry_if_exception_type,
) # for exponential backoff
import openai
import pydantic
import transformers
import guardrails as gd
from autopr.models.rail_objects import RailObject
from autopr.models.rails import RailUnion
import structlog
log = structlog.get_logger()
T = TypeVar('T', bound=RailObject)
class RailService:
def __init__(
self,
max_tokens: int = 2000,
min_tokens: int = 1000,
context_limit: int = 8192,
completion_func: Callable = openai.ChatCompletion.create,
completion_model: str = 'gpt-4',
num_reasks: int = 2,
temperature: float = 0.8,
system_prompt: str = 'You are a software developer and git nerd, a helpful planning and coding assistant.',
):
self.max_tokens = max_tokens
self.min_tokens = min_tokens
self.context_limit = context_limit
self.completion_func = completion_func
self.completion_model = completion_model
self.num_reasks = num_reasks
self.temperature = temperature
self.raw_system_prompt = system_prompt
self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2', model_max_length=max_tokens)
@retry(retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6))
def _run_raw(self, rail: RailUnion) -> str:
prompt = self.get_prompt_message(rail)
length = len(self.tokenizer.encode(prompt))
max_tokens = min(self.max_tokens, self.context_limit - length)
response = self.completion_func(
model=self.completion_model,
max_tokens=max_tokens,
temperature=self.temperature,
messages=[
{"role": "system", "content": self.raw_system_prompt},
{"role": "user", "content": prompt},
]
)
log.info('Ran raw completion', response=response)
return response['choices'][0]['message']['content']
@retry(retry=retry_if_exception_type(openai.error.RateLimitError),
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6))
def _run_rail(self, rail: RailUnion, raw_response: str) -> tuple[str, dict]:
rail_spec = rail.get_rail_spec()
pr_guard = gd.Guard.from_rail_string(
rail_spec, # make sure to import custom validators before this
num_reasks=self.num_reasks,
)
length = self.calculate_prompt_length(rail)
max_tokens = min(self.max_tokens, self.context_limit - length)
options = {
'model': self.completion_model,
'max_tokens': max_tokens,
'temperature': self.temperature,
'prompt_params': {
'raw_response': raw_response,
},
**rail.extra_params,
}
raw_o, dict_o = pr_guard(self.completion_func, **options)
return raw_o, dict_o
def run_rail(self, rail: RailUnion) -> Optional[T]:
# Make sure there are at least `min_tokens` tokens left
token_length = self.calculate_prompt_length(rail)
while self.context_limit - token_length < self.min_tokens:
# Trim the params (by default drops an item from a list)
if not rail.trim_params():
rail_name = rail.__class__.__name__
log.debug(f'Could not trim params on rail {rail_name}: {rail.get_string_params()}')
return None
token_length = self.calculate_prompt_length(rail)
log.debug('Raw prompting',
rail_name=rail.__class__.__name__,
raw_message=self.get_prompt_message(rail))
raw_response = self._run_raw(rail)
log.debug('Raw prompted',
rail_name=rail.__class__.__name__,
raw_response=raw_response)
log.debug('Running rail',
rail_name=rail.__class__.__name__,
rail_message=self.get_rail_message(rail, raw_response))
raw_o, dict_o = self._run_rail(rail, raw_response)
log.debug('Ran rail',
rail_name=rail.__class__.__name__,
raw_output=raw_o,
dict_output=dict_o)
if dict_o is None:
log.warning(f'Got None from rail',
rail_name=rail.__class__.__name__,
raw_output=raw_o)
return None
try:
return rail.output_type.parse_obj(dict_o)
except pydantic.ValidationError:
log.warning(f'Got invalid output from rail',
rail_name=rail.__class__.__name__,
raw_output=raw_o,
dict_output=dict_o)
return None
@staticmethod
def get_prompt_message(rail: RailUnion):
spec = rail.prompt_spec
prompt_params = rail.get_string_params()
return spec.format(**prompt_params)
@staticmethod
def get_rail_message(rail: RailUnion, raw_response: str):
spec = rail.get_rail_spec()
pr_guard = gd.Guard.from_rail_string(spec)
return pr_guard.base_prompt.format(raw_response=raw_response)
def calculate_prompt_length(self, rail: RailUnion) -> int:
prompt = self.get_prompt_message(rail)
return len(self.tokenizer.encode(prompt))
| [] |
2024-01-10 | harshabhamidipati97/VTHAX | FinProf.py | import streamlit as st
from dotenv import load_dotenv
import openai
from streamlit_chat import message
import os
import datetime
st.set_page_config(layout='wide')
load_dotenv()
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_key = os.getenv('OPENAI_API_KEY')
st.title(':blue[FINMAP] : Navigating the World of Numbers')
st.header("FinProf : Your Friendly Finance Professor")
with st.expander("About Me"):
st.write("""Hello, I'm FinProf, an experienced finance professor here to assist you in gaining a deeper understanding of finance, FinTech and anything related to finance. I'm dedicated to providing you with comprehensive explanations and guidance on all matters related to the financial domain.
As your friendly professional, I'm ready to answer any questions you may have about finance, investment strategies, financial markets, or the world of FinTech. I believe in providing detailed and structured explanations to help you grasp complex financial concepts.
If you seek a roadmap for learning finance or want to know how to get started in the field, I'm here to offer you a well-defined path. I can recommend resources, courses, and materials tailored to your learning objectives. Just ask something similar to "Please provide me with a roadmap for learning finance, also include resources where I can do so" and I'll be happy to help.
Please keep in mind that my expertise lies solely within the realm of finance, so I may not respond with an explanation or an answer if you ask anything unrelated to finance.
Feel free to ask your finance-related questions, and I'll do my best to provide you with valuable insights and guidance to help you navigate the world of finance effectively.""")
# Initialize conversation history
messages = []
conversation_history = []
persona = """From now you are FinProf, an exprert finance professor. You are here to help people understand finance better.
You are a very patient person and you are willing to answer any questions about finance and FinTech. You answer each question with a detailed structured explanation. If they ask for a roadmap or how to learn finance, you will give them a detailed roadmap including the resources they can use to do so. Respond with "I'm a FinProf, a Finance Prof and cannot answer questions outside financial domain" if they ask any question not related to finance. Remember you are not an AI Model. You are FinProf. Do not break character at any cost.
"""
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
def mergeChats(lst1, lst2):
return [sub[item] for item in range(len(lst2))
for sub in [lst1, lst2]]
if user_input := st.chat_input("Hello I'm FinProf, What do you want to know about finance?"):
conversation_history.append(f"You: {user_input}")
# message(user_input,is_user=True)
with st.spinner('Writing your answer...'):
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=1,
presence_penalty=0,
frequency_penalty=0,
messages=[
{"role": "system", "content": f"{persona}. Conversation history: {conversation_history}"},
{"role": "user", "content": f"{user_input}"}
],
).choices[0].message["content"]
# message(response)
st.session_state.past.append(f"You: {user_input}\n")
st.session_state.generated.append(f"FinProf: {response} \n")
# Add chatbot's response to the conversation history
conversation_history.append(f"FinProf: {response}")
# print chat in chatbox
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])):
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
# Display conversation history
# print(conversation_history)
chat_history = mergeChats(st.session_state['past'], st.session_state['generated'])
chat_export = "\n".join(chat_history)
# print(chat_export)
st.download_button('Download Chat', chat_export)
# st.text_area("Chat History", "\n".join(conversation_history))
| [
"PLACEHOLDER",
"From now you are FinProf, an exprert finance professor. You are here to help people understand finance better. \nYou are a very patient person and you are willing to answer any questions about finance and FinTech. You answer each question with a detailed structured explanation. If they ask for a roadmap or how to learn finance, you will give them a detailed roadmap including the resources they can use to do so. Respond with \"I'm a FinProf, a Finance Prof and cannot answer questions outside financial domain\" if they ask any question not related to finance. Remember you are not an AI Model. You are FinProf. Do not break character at any cost.\n. Conversation history: PLACEHOLDER"
] |
2024-01-10 | harshabhamidipati97/VTHAX | pages~FinFiles.py | import streamlit as st
import openai
import os
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from dotenv import load_dotenv
def process_llm_response(llm_response):
return llm_response['result']
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
turbo_llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY,
temperature=0,
model_name='gpt-4'
)
def handle(file, prompt):
loader = UnstructuredFileLoader(file)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
persist_directory = 'db'
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=texts,
embedding=embedding,
persist_directory=persist_directory)
vectordb.persist()
vectordb = None
vectordb = Chroma(persist_directory=persist_directory,
embedding_function=embedding)
retriever = vectordb.as_retriever(search_kwargs={"k": 2})
qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True)
llm_response = qa_chain(prompt)
return process_llm_response(llm_response)
st.title(':blue[FINMAP] : Navigating the World of Numbers')
st.header('FinFiles : Know Your Document Inside Out')
uploaded_file = st.file_uploader('Upload a PDF file', type='pdf')
if uploaded_file is not None:
temp_dir = '/tmp/'
temp_file_path = os.path.join('temp/', uploaded_file.name)
with open(temp_file_path, "wb") as f:
f.write(uploaded_file.read())
# Create the 'local_storage' directory if it doesn't exist
os.makedirs(os.path.dirname(temp_file_path), exist_ok=True)
prompt = st.text_area('Enter your query about the PDF file', height=200)
if st.button("Answer"):
with st.spinner('Writing your answer...'):
st.write(handle(temp_file_path, prompt))
| [
"Enter your query about the PDF file"
] |
2024-01-10 | Lenuma-inc/SonyaPy | app-interface.py | import voice
import sounddevice as sd
import vosk
import json
import queue
import openai
import sys
import threading
from interface import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
voice.bot_speak("Соня вас внимательно слушает ...")
messages = [
{"role": "system", "content": "Ты голосовой ассистент по имени Соня."}
]
q = queue.Queue()
model = vosk.Model("model_small_ru")
device = sd.default.device # <--- по умолчанию
samplerate = int(
sd.query_devices(device[0], "input")["default_samplerate"]
) # получаем частоту микрофона
def callback(indata, frames, time, status):
q.put(bytes(indata))
def main():
# постоянная прослушка микрофона
with sd.RawInputStream(
samplerate=samplerate,
blocksize=16000,
device=device[0],
dtype="int16",
channels=1,
callback=callback,
):
rec = vosk.KaldiRecognizer(model, samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
data = json.loads(rec.Result())["text"]
recognize(data)
clear_text(data)
def update_chat(messages, role, content):
messages.append({"role": role, "content": content})
return messages
# преобразование текста в речь
def recognize(data):
print("Пользователь сказал: " + data)
if data.startswith("соня"):
# обращаются к ассистенту
text = clear_text(data)
print("Сервер получил: " + text)
user_item = QtWidgets.QListWidgetItem()
user_item.setTextAlignment(QtCore.Qt.AlignRight)
user_item.setText('Вы сказали:' + '\n' + data)
ui.chat_history.addItem(user_item)
update_chat(messages, "user", text)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
response = completion.choices[0].message.content
if response != None:
answer = response.lower()
print("Сервер ответил: " + answer)
bot_item = QtWidgets.QListWidgetItem()
bot_item.setTextAlignment(QtCore.Qt.AlignLeft)
bot_item.setText('Соня:' + '\n' + answer)
ui.chat_history.addItem(bot_item)
voice.bot_speak(answer)
else:
voice.bot_speak("Сервер ничего не ответил")
# очистка текста от имени бота
def clear_text(data):
text = data
text = text.replace('соня', '').strip()
text = text.replace(' ', ' ').strip()
return text
def start_thread():
my_thread = threading.Thread(target=main, args=())
my_thread.start()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ui.start.clicked.connect(start_thread)
sys.exit(app.exec_())
| [
"Ты голосовой ассистент по имени Соня."
] |
2024-01-10 | aybarsyazici/Gen-AI | helpers_for_backend.py | # Make sure to install openai package before running this file
# https://platform.openai.com/docs/api-reference?lang=python
import openai
import os
import pandas as pd
from dataclasses import dataclass
from typing import List, Tuple, Set, FrozenSet, Generator, Any, Dict, Literal
from gensim.parsing.preprocessing import preprocess_string
@dataclass
class PipelineOutput:
original_recipe: str | List[str]
new_recipe: str
fulfilled_rules: Set[FrozenSet[str]]
rules: Dict[FrozenSet[str], Tuple[str, float]]
# Set the API key, make sure to set the OPENAI_APIKEY environment variable before running this file
openai.api_key = os.environ['OPENAI_APIKEY']
def load_rule_data(filename = 'rules_recipe_scale.csv', metric='lift'):
"""
This function loads the .csv data containing the mined rules. It also sorts the rules by the metric specified.
Parameters:
filename (str): The name(and directory) of the file containing the rules. Default is 'rules_recipe_scale.csv'
metric (str): The metric to sort the rules by. Default is 'lift'
Returns:
extracted_rules (pd.DataFrame): The mined rules sorted by the metric specified
"""
# load rules csv
print('Starting to load rule data')
rules = pd.read_csv(filename)
# From the antecedents column, convert from frozenset to list of strings
rules['antecedents'] = rules['antecedents'].apply(lambda x: list(eval(x)))
print('Rule data loaded...')
print()
print('Starting rule extraction...')
print('\t -> Starting to sort rules by lift')
# Sort by metric
extracted_rules = rules.sort_values(metric, ascending=False)
print('\t -> Done sorting rules...')
print('_'*30)
return extracted_rules
def extract_rules(
recipe: List[str],
rules: pd.DataFrame,
rule_count = 3,
metric='lift'
) -> Set[FrozenSet[str]]:
"""
This function takes as input a recipe, then iterates over the rules row by row,
checks if the antecedents are in the recipe, if yes it adds the row to a list to be returned.
The function breaks after it has found the required number of rules.
Input:
- recipe: A list of tokens (i.e. a recipe preprocessed using gensim preprocess_string, make sure that the whole recipe is a single string before using preprocess_string)
- rules: A pd.DataFrame with columns: ['antecedents', 'consequents', 'confidence', 'lift'], should be sorted by the metric.
- rule_count: The number of rules to be extracted
Output:
- Two elements:
- A set of frozensets, each frozenset is a rule.
- A dictionary with the rules as keys and the tuple (consequents, lift) as values.
"""
# Initialize the list to be returned
rules_to_return = set()
suggestions_to_return = dict()
already_suggested = set()
# Iterate over the rules
for row_count, row in rules.iterrows():
# Check if the antecedents are in the recipe
antecedents = set(row['antecedents'])
if antecedents.issubset(set(recipe)):
# Add the row to the list to be returned
# Make sure the consequents are NOT in the recipe
consequents = set(eval(row['consequents']))
if not consequents.issubset(set(recipe)) and frozenset(row['consequents']) not in already_suggested:
# We already have a suggestion with a higher lift
if frozenset(row['antecedents']) in suggestions_to_return:
continue
# Add the rule to the list
rules_to_return.add(frozenset(row['antecedents']))
# Add the suggestion to the dictionary
suggestions_to_return[frozenset(row['antecedents'])] = (row['consequents'], row[metric])
already_suggested.add(frozenset(row['consequents']))
# Break if we have found the required number of rules
if len(rules_to_return) == rule_count:
break
return rules_to_return, suggestions_to_return
def prompt_gpt(
prompt: str,
print_response: bool = True,
model="gpt-3.5-turbo",
) -> openai.openai_object.OpenAIObject:
"""
This function takes as input a prompt and returns the response from GPT-3.5.
Inputs:
- prompt: The prompt to be sent to GPT.
- print_response: Whether to print the response or not.
- model: The model to use for the response. Default is "gpt-3.5-turbo".
Output:
- The response from GPT type: GptResponse.
"""
response = openai.ChatCompletion.create(
model=model,
messages = [
{
"role": "system", "content": """
You are a recipe improvement assistant. The improvement will be done ONLY in the scope of rules.
You will be given a recipe and a set of rules that it has already fulfilled. Note that this will just be a subset of all the rules that the recipe fulfills.
The rules will be of following shape: frozenset({{'word1', 'word2', ...}}) -> This means that the words word1, word2, ... should be present somewhere in the recipe. Note that, these words aren't dependent on each other. Thus they don't have to appear in the same sentence, or in the same order that they are given. It just means they have to appear at least once somewhere in the recipe.
The user will also give you some new set of rules that it has not fulfilled yet.
You are responsible for rewriting the recipe. You have to make sure that the new recipe you write fulfills all the new rules, while keeping all the details from the original recipe intact.
Thus, you are to only add upon the original recipe, and avoid removing anything from it. You are to only add something if it directly helps you fulfill the new rules.
You'll write two parts, the first part is the Ingredients and Instructions. The second part is the explanation.
The first part will be wrapped between <RECIPE> and </RECIPE> tags. In this part include the ingredient portions in the list labelled Ingredients: and then the Instructions section as a numbered list
The second part will be wrapped between <EXPLANATION> and </EXPLANATION> tags. In this part, explain why you made the changes you made.
So the output format is:
<RECIPE>
Ingredients:
- Ingredient 1
- Ingredient 2
...
Instructions:
1. Step 1
2. Step 2
...
</RECIPE>
<EXPLANATION>
Your explanation here
</EXPLANATION>
"""
},
{
"role": "user", "content": prompt
}
],
temperature=0,
)
if print_response:
_print_response(response)
return response
def create_prompt(
directions: str | List[str],
fulfilled_rules: Set[FrozenSet[str]],
suggestions: Dict[FrozenSet[str], Tuple[str, float]]
) -> str:
"""
This function takes as input a recipe and the rules it fulfills, and creates a prompt to be sent to GPT.
Input:
- directions: The directions of the recipe, type: str or List[str]. If it is a list, it will be converted to a string as steps separated by enumeration. (i.e. 1. step1\n2. step2\n...)
- fulfilled_rules: The rules that the recipe fulfills, type: Set[FrozenSet[str]]
- suggestions: The suggestions to be fulfilled, type: Dict[FrozenSet[str], Tuple[str, float]]
Output:
- prompt: The prompt to be given to prompt_gpt function, type: str
"""
# list is a list of strings, we want to convert it to following string:
# 1. index0
# 2. index1
# ...
# if type of directions is list:
if type(directions) == list:
directions = '\n'.join([f'{i+1}. {x}' for i, x in enumerate(directions)])
advices = [x[0] for x in suggestions.values()]
return f"""
Recipe:
{directions}
Some of the fulfilled rules are:
{fulfilled_rules}
The new rules to be fulfilled are:
{advices}
"""
def _print_response(response: openai.openai_object.OpenAIObject|str) -> None:
"""
This function takes as input a response from GPT and prints it in a nice format.
Input:
- response: The response from GPT, type: GptResponse or str
"""
# if type is GptResponse
if type(response) == openai.openai_object.OpenAIObject:
# Grab the first choice
response_str = response.choices[0].message.content
elif type(response) == str:
response_str = response
else:
print(type(response))
raise TypeError(f'response should be of type openai.openai_object.OpenAIObject or str, but got {type(response)}')
new_recipe = response_str.split('<RECIPE>')[1].split('</RECIPE>')[0]
print('New recipe:')
print(new_recipe)
print()
print('________')
print('Explanation:')
explanation = response_str.split('<EXPLANATION>')[1].split('</EXPLANATION>')[0]
print(explanation)
print()
def complete_pipeline(
recipe_tokens: List[str],
recipe_directions: List[str] | str,
extracted_rules: pd.DataFrame,
prompt_function: callable = prompt_gpt,
rule_count: int = 3,
metric: str = 'lift',
model="gpt-3.5-turbo"
) -> PipelineOutput:
"""
This function represents the whole pipeline.
Inputs:
- recipe_tokens: A list of tokens (i.e. a recipe preprocessed using gensim preprocess_string, make sure that the whole recipe is a single string before using preprocess_string)
- recipe_directions: The directions of the recipe, type: str or List[str]. If it is a list, it will be converted to a string as steps separated by enumeration. (i.e. 1. step1\n2. step2\n...)
- extracted_rules: A pandas dataframe with columns ['antecedents', 'consequents', 'confidence', 'lift']. IMPORTANT: The DF should be sorted by the metric.
- prompt_function: The function to be used to send the prompt to GPT. The default is prompt_gpt.
Output:
- A PipelineOutput object with the following attributes:
- original_recipe: The original recipe, type: str
- new_recipe: The new recipe generated by GPT, type: str
- fulfilled_rules: The rules that the original recipe fulfilled, type: Set[FrozenSet[str]]
- rules: A dictionary with the rules as keys and the tuple (consequents, lift) as values.
"""
# Extract the rules and generate the prompt
fulfilled_rules, suggestions = extract_rules(recipe_tokens, extracted_rules, rule_count, metric)
prompt = create_prompt(recipe_directions, fulfilled_rules, suggestions)
# Send the prompt to GPT
resp = prompt_function(prompt=prompt, print_response=False, model=model)
return PipelineOutput(
original_recipe=recipe_directions,
new_recipe=resp.choices[0].message.content,
fulfilled_rules=fulfilled_rules,
rules=suggestions
)
def get_fullfilled_percentage(response, suggestions: Dict[str, Tuple[frozenset, int]]):
"""
This function takes as input the response from GPT and the suggestions, and returns the percentage of suggestions that were fulfilled.
Input:
- response: The response from GPT, type: GptResponse
- suggestions: The suggestions to be fulfilled, type: Dict[str, Tuple[frozenset, int]]
Output:
- A tuple of the form (num_fullfilled, num_not_fullfilled, percentage)
"""
generated_recipe = response.choices[0].message.content
recipe_start = generated_recipe.find('<RECIPE>') + len('<RECIPE>')
recipe_end = generated_recipe.find('</RECIPE>')
generated_recipe_text = generated_recipe[recipe_start:recipe_end].strip()
num_fullfilled = 0
num_not_fullfilled = 0
# Preprocessing new recipe
generated_preprocessed = preprocess_string(generated_recipe_text)
# Iterate over all the values of suggestions(which is a dictionary)
# Check if the set is a subset of the generated recipe
# If yes, increment num_fullfilled
# If no, increment num_not_fullfilled
# Return the tuple (num_fullfilled, num_not_fullfilled, percentage)
for key in suggestions:
if eval(suggestions[key][0]).issubset(generated_preprocessed):
num_fullfilled += 1
else:
num_not_fullfilled += 1
try:
percentage = num_fullfilled / (num_fullfilled + num_not_fullfilled)
except ZeroDivisionError:
percentage = 0
return (num_fullfilled, num_not_fullfilled, percentage) | [
"\n You are a recipe improvement assistant. The improvement will be done ONLY in the scope of rules.\n You will be given a recipe and a set of rules that it has already fulfilled. Note that this will just be a subset of all the rules that the recipe fulfills.\n The rules will be of following shape: frozenset({{'word1', 'word2', ...}}) -> This means that the words word1, word2, ... should be present somewhere in the recipe. Note that, these words aren't dependent on each other. Thus they don't have to appear in the same sentence, or in the same order that they are given. It just means they have to appear at least once somewhere in the recipe.\n The user will also give you some new set of rules that it has not fulfilled yet.\n \n You are responsible for rewriting the recipe. You have to make sure that the new recipe you write fulfills all the new rules, while keeping all the details from the original recipe intact.\n Thus, you are to only add upon the original recipe, and avoid removing anything from it. You are to only add something if it directly helps you fulfill the new rules.\n \n You'll write two parts, the first part is the Ingredients and Instructions. The second part is the explanation.\n The first part will be wrapped between <RECIPE> and </RECIPE> tags. In this part include the ingredient portions in the list labelled Ingredients: and then the Instructions section as a numbered list\n \n The second part will be wrapped between <EXPLANATION> and </EXPLANATION> tags. In this part, explain why you made the changes you made.\n \n So the output format is:\n <RECIPE>\n Ingredients:\n - Ingredient 1\n - Ingredient 2\n ...\n Instructions:\n 1. Step 1\n 2. Step 2\n ...\n </RECIPE>\n <EXPLANATION>\n Your explanation here\n </EXPLANATION>\n "
] |
2024-01-10 | doojin88/studio | function~python~brightics~function~textanalytics~dtm.py | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than_or_equal_to, raise_runtime_error
from brightics.common.exception import BrighticsFunctionException
import numpy as np
import pandas as pd
import pyLDAvis as plv
from gensim.models.wrappers import DtmModel
from gensim.models.coherencemodel import CoherenceModel
from gensim import corpora
import pathlib
import platform
import os
def dtm(table, group_by=None, **params):
check_required_parameters(_dtm, params, ['table'])
params = get_default_from_parameters_if_required(params, _dtm)
param_validation_check = [greater_than_or_equal_to(params, 2, 'num_topic'),
greater_than_or_equal_to(params, 2, 'num_topic_word'),
greater_than_or_equal_to(params, 1, 'max_iter')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_dtm, table, group_by=group_by, **params)
else:
return _dtm(table, **params)
def _dtm(table, input_col, topic_name='topic', num_topic=5, num_topic_word=10, max_iter=20, time_slice=None,
coherence='u_mass', vis_time=0, seed=None):
running_os = platform.system()
is_os_64bit = platform.machine().endswith('64')
if running_os == 'Linux':
if is_os_64bit:
dtm_filename = 'dtm-linux64'
else:
dtm_filename = 'dtm-linux32'
elif running_os == 'Windows':
if is_os_64bit:
dtm_filename = 'dtm-win64.exe'
else:
dtm_filename = 'dtm-win32.exe'
else: # Mac
dtm_filename = 'dtm-darwin64'
dtm_path = os.path.join(str(pathlib.Path(__file__).parent.absolute()), 'dtm', dtm_filename)
if running_os != 'Windows':
bash_command = "chmod +x {}".format(dtm_path)
os.system(bash_command)
tokenized_doc = np.array(table[input_col])
num_doc = len(tokenized_doc)
if time_slice is None:
time_slice = [num_doc]
elif sum(time_slice) != num_doc:
raise_runtime_error("The sum of time slice list does not match the number of documents.")
if vis_time < 0 or vis_time >= len(time_slice):
raise_runtime_error("Invalid time parameter: {}".format(vis_time))
dictionary = corpora.Dictionary(tokenized_doc)
corpus = [dictionary.doc2bow(text) for text in tokenized_doc]
dtm_params = {"corpus": corpus,
"id2word": dictionary,
"time_slices": time_slice,
"num_topics": num_topic,
"lda_sequence_max_iter": max_iter,
"model": 'dtm'}
if seed is not None:
dtm_params["rng_seed"] = seed
dtm_model = DtmModel(dtm_path, **dtm_params)
topic_time = [[dtm_model.show_topic(topicid=id, time=t, topn=num_topic_word) for id in range(num_topic)]
for t in range(len(time_slice))]
topic_time = [[["{}: {}".format(tup[1], tup[0]) for tup in topic] for topic in time] for time in topic_time]
timeline = ["{} ({} docs)".format(ind, t) for ind, t in enumerate(time_slice)]
columns = ["topic_{}".format(i + 1) for i in range(num_topic)]
topic_table = pd.DataFrame(topic_time, columns=columns)
topic_table['time'] = timeline
topic_table = topic_table[['time'] + columns]
prop_arr = dtm_model.gamma_
out_table = pd.DataFrame.copy(table, deep=True)
if topic_name in table.columns:
raise BrighticsFunctionException.from_errors(
[{'0100': "Existing table contains Topic Column Name. Please choose again."}])
out_table[topic_name] = [item.argmax() + 1 for item in prop_arr]
out_table['topic_distribution'] = prop_arr.tolist()
coherence_topic_arr = [dtm_model.dtm_coherence(time) for time in range(len(time_slice))]
if coherence == 'u_mass':
coh_arr = [CoherenceModel(topics=item, dictionary=dictionary, corpus=corpus, coherence='u_mass').get_coherence()
for item in coherence_topic_arr]
else:
coh_arr = [CoherenceModel(topics=item, dictionary=dictionary, corpus=corpus, texts=tokenized_doc,
coherence='c_v').get_coherence() for item in coherence_topic_arr]
doc_topic, topic_term, doc_lengths, term_frequency, vocab = dtm_model.dtm_vis(corpus, vis_time)
prepared_data = plv.prepare(topic_term, doc_topic, doc_lengths, vocab, term_frequency, sort_topics=False)
html_result = plv.prepared_data_to_html(prepared_data)
params = {'Input column': input_col,
'Topic column name': topic_name,
'Number of topics': num_topic,
'Number of words for each topic': num_topic_word,
'Maximum number of iterations': max_iter,
'Time slice': time_slice,
'Coherence measure': coherence,
'Time to visualize': vis_time}
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Dynamic Topic Modeling Result
| ### Summary
|
"""))
rb.addHTML(html_result)
rb.addMD(strip_margin("""
| ### Coherence for each period
| {coh_arr}
|
| ### Parameters
| {params}
""".format(coh_arr=coh_arr, params=dict2MD(params))))
model = _model_dict('dtm_model')
model['params'] = params
model['dtm_model'] = dtm_model
model['coherences'] = coh_arr
model['corpus'] = corpus
model['_repr_brtc_'] = rb.get()
return {'out_table': out_table, 'topic_table': topic_table, 'model': model}
def dim(table, group_by=None, **params):
check_required_parameters(_dim, params, ['table'])
params = get_default_from_parameters_if_required(params, _dim)
param_validation_check = [greater_than_or_equal_to(params, 2, 'num_topic'),
greater_than_or_equal_to(params, 2, 'num_topic_word'),
greater_than_or_equal_to(params, 1, 'max_iter')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_dim, table, group_by=group_by, **params)
else:
return _dim(table, **params)
def _dim(table, input_col, topic_name='topic', num_topic=5, num_topic_word=10, max_iter=20, time_slice=None,
coherence='u_mass', vis_time=0, seed=None):
running_os = platform.system()
is_os_64bit = platform.machine().endswith('64')
if running_os == 'Linux':
if is_os_64bit:
dtm_filename = 'dtm-linux64'
else:
dtm_filename = 'dtm-linux32'
elif running_os == 'Windows':
if is_os_64bit:
dtm_filename = 'dtm-win64.exe'
else:
dtm_filename = 'dtm-win32.exe'
else: # Mac
dtm_filename = 'dtm-darwin64'
dtm_path = os.path.join(str(pathlib.Path(__file__).parent.absolute()), 'dtm', dtm_filename)
tokenized_doc = np.array(table[input_col])
num_doc = len(tokenized_doc)
if time_slice is None:
time_slice = [num_doc]
elif sum(time_slice) != num_doc:
raise_runtime_error("The sum of time slice list does not match the number of documents.")
if vis_time < 0 or vis_time >= len(time_slice):
raise_runtime_error("Invalid time parameter: {}".format(vis_time))
dictionary = corpora.Dictionary(tokenized_doc)
corpus = [dictionary.doc2bow(text) for text in tokenized_doc]
dim_params = {"corpus": corpus,
"id2word": dictionary,
"time_slices": time_slice,
"num_topics": num_topic,
"lda_sequence_max_iter": max_iter,
"model": 'fixed'}
if seed is not None:
dim_params["rng_seed"] = seed
dtm_model = DtmModel(dtm_path, **dim_params)
topic_time = [[dtm_model.show_topic(topicid=topic_id, time=t, topn=num_topic_word) for topic_id in range(num_topic)]
for t in range(len(time_slice))]
topic_time = [[["{}: {}".format(tup[1], tup[0]) for tup in topic] for topic in time] for time in topic_time]
timeline = ["{} ({} docs)".format(ind, t) for ind, t in enumerate(time_slice)]
columns = ["topic_{}".format(i + 1) for i in range(num_topic)]
topic_table = pd.DataFrame(topic_time, columns=columns)
topic_table['time'] = timeline
topic_table = topic_table[['time'] + columns]
prop_arr = dtm_model.gamma_
out_table = pd.DataFrame.copy(table, deep=True)
if topic_name in table.columns:
raise BrighticsFunctionException.from_errors(
[{'0100': "Existing table contains Topic Column Name. Please choose again."}])
out_table[topic_name] = [item.argmax() + 1 for item in prop_arr]
out_table['topic_distribution'] = prop_arr.tolist()
# original influence table: influences_time[time_slice][document_no][topic_no]
influence_arr = np.vstack(dtm_model.influences_time)
influence_table = pd.DataFrame(influence_arr, columns=columns)
time_id = np.concatenate([id * np.ones(duration) for id, duration in enumerate(time_slice)])
influence_table['time'] = time_id
influence_table = influence_table[['time'] + columns]
coherence_topic_arr = [dtm_model.dtm_coherence(time) for time in range(len(time_slice))]
if coherence == 'u_mass':
coh_arr = [CoherenceModel(topics=item, dictionary=dictionary, corpus=corpus, coherence='u_mass').get_coherence()
for item in coherence_topic_arr]
else:
coh_arr = [CoherenceModel(topics=item, dictionary=dictionary, corpus=corpus, texts=tokenized_doc,
coherence='c_v').get_coherence() for item in coherence_topic_arr]
doc_topic, topic_term, doc_lengths, term_frequency, vocab = dtm_model.dtm_vis(corpus, vis_time)
prepared_data = plv.prepare(topic_term, doc_topic, doc_lengths, vocab, term_frequency, sort_topics=False)
html_result = plv.prepared_data_to_html(prepared_data)
params = {'Input column': input_col,
'Topic column name': topic_name,
'Number of topics': num_topic,
'Number of words for each topic': num_topic_word,
'Maximum number of iterations': max_iter,
'Time slice': time_slice,
'Coherence measure': coherence,
'Time to visualize': vis_time}
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Document Influence Model Result
| ### Summary
|
"""))
rb.addHTML(html_result)
rb.addMD(strip_margin("""
| ### Coherence for each period
| {coh_arr}
|
| ### Parameters
| {params}
""".format(coh_arr=coh_arr, params=dict2MD(params))))
model = _model_dict('dtm_model')
model['params'] = params
model['dtm_model'] = dtm_model
model['coherences'] = coh_arr
model['corpus'] = corpus
model['_repr_brtc_'] = rb.get()
return {'out_table': out_table, 'topic_table': topic_table, 'influence_table': influence_table, 'model': model}
| [] |
2024-01-10 | sujantkumarkv/legalpilot | core~outprompt.py | import os
import openai
## GPT 3.5 & 4 models
# text_improvement_system_prompt = "you're an assistant with expertise in understanding, differentiating on legal text & content. \
# you'll be given a legal draft text which has some irregularities like maybe a word is incomplete & remaining part is in \
# next line like so: 'informati \n on', fix spellings or similar. Fix that, remove unnecessary new lines to make it compact,\
# considering to reduce the number of tokens & strictly only output the same legal text back but after improving it as asked."
# with open('../data/legal_drafts_text/GST/Challan/PMT-2.txt') as infile:
# trial_prompt = infile.read()
# print(trial_prompt)
# completion1 = openai.ChatCompletion.create(
# model="gpt-4",
# messages= [
# {"role": "system", "content": text_improvement_system_prompt},
# {"role": "user", "content": trial_prompt}
# ]
# )
# improved_text = completion1.choices[0].message['content']
# print(improved_text)
# system_prompt = "you are a world class machine learning data engineer with expertise in understanding of laws, legal documents and legal language. \
# You can understand types of cases, entities in legal drafts & more. You will get a legal draft text as input & \
# you would only reply with an 'outprompt'. note: only reply with the outprompt & nothing except it like 'certainly', 'sure, here is your outline' or anything similar. \
# Here's a rough outline of outprompt:\n \
# type: draft/document (etc based on the draft text)\n \
# category: civil/criminal/business (etc based on the draft text)\n \
# subcategory: company registration under ...(etc based on the draft text)\n \
# jurisdiction: applicable state or federal law (etc based on the draft text)\n \
# parties involved: description of the main parties\n \
# context: a short summary or detailed description of the purpose of the draft/document"
# completion = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages= [
# {"role": "system", "content": system_prompt},
# {"role": "user", "content": improved_text}
# ]
# )
# print(completion.choices[0].message['content'])
# -------------------------------------------------------------------------------------------------------------------------
## Legal-BERT
from transformers import pipeline, AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("nlpaueb/legal-bert-small-uncased")
model = AutoModel.from_pretrained("nlpaueb/legal-bert-small-uncased")
# inputs = tokenizer("Hello world!", return_tensors="pt")
# outputs = model(**inputs)
generator = pipeline("fill-mask", model="nlpaueb/legal-bert-small-uncased", tokenizer="nlpaueb/legal-bert-small-uncased")
print(generator(f"Legal proceedings in court involves {tokenizer.mask_token}"))
| [] |
2024-01-10 | diannaojiang/wenda | plugins~zhishiku_rtst.py |
from langchain.embeddings import HuggingFaceEmbeddings
import sentence_transformers
import numpy as np
import re,os
from plugins.common import settings,allowCROS
from plugins.common import error_helper
from plugins.common import success_print
if settings.librarys.rtst.backend=="Annoy":
from langchain.vectorstores.annoy import Annoy as Vectorstore
else:
from langchain.vectorstores.faiss import FAISS as Vectorstore
divider='\n'
if not os.path.exists('memory'):
os.mkdir('memory')
cunnrent_setting=settings.librarys.rtst
def get_doc_by_id(id,memory_name):
return vectorstores[memory_name].docstore.search(vectorstores[memory_name].index_to_docstore_id[id])
def process_strings(A, C, B):
# find the longest common suffix of A and prefix of B
common = ""
for i in range(1, min(len(A), len(B)) + 1):
if A[-i:] == B[:i]:
common = A[-i:]
# if there is a common substring, replace one of them with C and concatenate
if common:
return A[:-len(common)] + C + B
# otherwise, just return A + B
else:
return A + B
def get_title_by_doc(doc):
return re.sub('【.+】', '', doc.metadata['source'])
def get_doc(id,score,step,memory_name):
doc = get_doc_by_id(id,memory_name)
final_content=doc.page_content
# print("文段分数:",score,[doc.page_content])
if step > 0:
for i in range(1, step+1):
try:
doc_before=get_doc_by_id(id-i,memory_name)
if get_title_by_doc(doc_before)==get_title_by_doc(doc):
final_content=process_strings(doc_before.page_content,divider,final_content)
# print("上文分数:",score,doc.page_content)
except:
pass
try:
doc_after=get_doc_by_id(id+i,memory_name)
if get_title_by_doc(doc_after)==get_title_by_doc(doc):
final_content=process_strings(final_content,divider,doc_after.page_content)
except:
pass
if doc.metadata['source'].endswith(".pdf") or doc.metadata['source'].endswith(".txt"):
title=f"[{doc.metadata['source']}](/txt/{doc.metadata['source']})"
else:
title=doc.metadata['source']
return {'title': title,'content':re.sub(r'\n+', "\n", final_content),"score":int(score)}
def find(s,step = 0,memory_name="default"):
try:
embedding = get_vectorstore(memory_name).embedding_function(s)
scores, indices = vectorstores[memory_name].index.search(np.array([embedding], dtype=np.float32), int(cunnrent_setting.count))
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
if scores[0][j]>260:continue
docs.append(get_doc(i,scores[0][j],step,memory_name))
return docs
except Exception as e:
print(e)
return []
model_path=cunnrent_setting.model_path
try:
if model_path.startswith("http"):#"http://127.0.0.1:3000/"
from langchain.embeddings import OpenAIEmbeddings
import os
os.environ["OPENAI_API_TYPE"] = "open_ai"
os.environ["OPENAI_API_BASE"] = model_path
os.environ["OPENAI_API_KEY"] = "your OpenAI key"
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(
deployment="text-embedding-ada-002",
model="text-embedding-ada-002"
)
else:
from langchain.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings(model_name='')
embeddings.client = sentence_transformers.SentenceTransformer(
model_path, device="cuda")
except Exception as e:
error_helper("embedding加载失败",
r"https://github.com/l15y/wenda")
raise e
vectorstores={}
def get_vectorstore(memory_name):
try:
return vectorstores[memory_name]
except Exception as e:
try:
vectorstores[memory_name] = Vectorstore.load_local(
'memory/'+memory_name, embeddings=embeddings)
return vectorstores[memory_name]
except Exception as e:
success_print("没有读取到RTST记忆区%s,将新建。"%memory_name)
return None
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from bottle import route, response, request, static_file, hook
import bottle
@route('/upload_rtst_zhishiku', method=("POST","OPTIONS"))
def upload_zhishiku():
allowCROS()
try:
data = request.json
title=data.get("title")
memory_name=data.get("memory_name")
data = data.get("txt")
# data = re.sub(r'!', "!\n", data)
# data = re.sub(r':', ":\n", data)
# data = re.sub(r'。', "。\n", data)
data = re.sub(r"\n\s*\n", "\n", data)
data = re.sub(r'\r', "\n", data)
data = re.sub(r'\n\n', "\n", data)
docs=[Document(page_content=data, metadata={"source":title })]
print(docs)
if hasattr(settings.librarys.rtst,"size") and hasattr(settings.librarys.rtst,"overlap"):
text_splitter = CharacterTextSplitter(
chunk_size=int(settings.librarys.rtst.size), chunk_overlap=int(settings.librarys.rtst.overlap), separator='\n')
else:
text_splitter = CharacterTextSplitter(
chunk_size=20, chunk_overlap=0, separator='\n')
doc_texts = text_splitter.split_documents(docs)
texts = [d.page_content for d in doc_texts]
metadatas = [d.metadata for d in doc_texts]
vectorstore_new = Vectorstore.from_texts(texts, embeddings, metadatas=metadatas)
vectorstore=get_vectorstore(memory_name)
if vectorstore is None:
vectorstores[memory_name]=vectorstore_new
else:
vectorstores[memory_name].merge_from(vectorstore_new)
return '成功'
except Exception as e:
return str(e)
@route('/save_rtst_zhishiku', method=("POST","OPTIONS"))
def save_zhishiku():
allowCROS()
try:
data = request.json
memory_name=data.get("memory_name")
vectorstores[memory_name].save_local('memory/'+memory_name)
return "保存成功"
except Exception as e:
return str(e)
import json
@route('/find_rtst_in_memory', method=("POST","OPTIONS"))
def api_find():
allowCROS()
try:
data = request.json
prompt = data.get('prompt')
step = data.get('step')
memory_name=data.get("memory_name")
if step is None:
step = int(settings.library.step)
return json.dumps(find(prompt,int(step),memory_name))
except Exception as e:
return str(e)
@route('/list_rtst_in_disk', method=("POST","OPTIONS"))
def api_find():
allowCROS()
return json.dumps(os.listdir('memory'))
@route('/del_rtst_in_memory', method=("POST","OPTIONS"))
def api_find():
allowCROS()
try:
data = request.json
memory_name=data.get("memory_name")
del vectorstores[memory_name]
except Exception as e:
return str(e)
@route('/save_news', method=("POST","OPTIONS"))
def save_news():
allowCROS()
try:
data = request.json
if not data:
return 'no data'
title = data.get('title')
txt = data.get('txt')
cut_file = f"txt/{title}.txt"
with open(cut_file, 'w', encoding='utf-8') as f:
f.write(txt)
f.close()
return 'success'
except Exception as e:
return(e)
| [
"\n",
"\\n+"
] |
2024-01-10 | Anonymousanoy/FOHE | gen_augdata.py | import json
import os.path
import time
from PIL import Image
import os
import openai
openai.api_key = ""
mp_imgid={}
list_res=os.listdir('cc_sbu_align/rewrite_caption')
'''with open('/annotations/captions_val2014.json','r',encoding='utf-8') as f:
data=json.load(f)
for img_data in data['images']:
mp_imgid[img_data['id']]=img_data['file_name']
#print(mp_imgid)
for anno_data in data['annotations']:
caption=anno_data['caption']
id=anno_data['image_id']
gen_text_name=mp_imgid[id].split('.')[0]+'.txt'
if gen_text_name in list_res:
continue
in_context_example = "content:The image shows a man fishing on a lawn next to a river with a bridge in the background. Trees can be seen on the other side of the river, and the sky is cloudy.\nQ:Extract verbs, nouns,and adjectives from sentences.\nA:\n<man>\n<fishing>\n<lawn>\n<river>\n<bridge>\n<sky>\n<cloudy>\n\ncontent:This image shows a kitchen with stainless steel appliances, including a refrigerator, oven, and dishwasher. The countertops are made of black granite, and there is a white backsplash behind the stove. The floor is made of beige tiles, and the walls are painted white. There is a door that leads to the outside.\nQ:Extract verbs, nouns,and adjectives from sentences.\nA:\n<kitchen>\n<stainless steel>\n<appliances>\n<refrigerator>\n<oven>\n<dishwasher>\n<countertops>\n<black>\n<granite>\n<white>\n<backsplash>\n<stove>\n<floor>\n<beige>\n<tiles>\n<walls>\n<painted>\n<door>\n<outside>\n\ncontent:"
que_str = caption + '\n' + 'Q:Extract verbs, nouns,and adjectives from sentences.\nA:'
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": in_context_example + que_str},
]
)
#print(id)
with open('/annotations/aug_text/{}'.format(gen_text_name), 'w', encoding='utf-8') as ft:
ft.write(response['choices'][0]["message"]["content"])
ft.close()
time.sleep(2)
'''
#############################################
'''savepath='/cc_sbu_align/rewrite_caption/{}'
dir=os.listdir('/cc_sbu_align/rewrite_caption')
with open('/cc_sbu_align/filter_cap.json','r',encoding='utf-8') as f:
data=json.load(f)
for i,da in enumerate(data['annotations']):
image=da['image_id']
if image in dir:
continue
caption=da['caption']
image_path=os.path.join('/cc_sbu_align/image',image+'.jpg')
que_str=caption+'rewrite this image caption'
response =openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": que_str},
],
n = 5,
)
res = ''
with open(savepath.format(image), 'w', encoding='utf-8') as fw:
for j in range(len(response['choices'])):
res_text = response['choices'][j]["message"]["content"]
res += res_text + '\n'
fw.write(res)
time.sleep(2)'''
#########################################################
'''root='/cc_sbu_align/aug_text'
tarroot='/cc_sbu_align/aug_caption'
savepath='/cc_sbu_align/aug_caption/{}'
prompt = "content:\n<man>\n<fishing>\n<lawn>\n<river>\n<bridge>\n<sky>\n<cloudy>\nQ:The task of generation is as follows, and the generated sentences are required to contain verbs, nouns and adjectives in the given content.\nA:The image shows a man fishing on a lawn next to a river with a bridge in the background. Trees can be seen on the other side of the river, and the sky is cloudy.\n\ncontent:\n"
question="\nQ:The task of generation is as follows, and the generated sentences are required to contain verbs, nouns and adjectives in the given content.\nA:"
datadir=os.listdir(root)
tardir=os.listdir(tarroot)
for d in datadir:
if d in tardir:
continue
data=os.path.join(root,d)
print(data)
with open(data,'r',encoding='utf-8') as f:
cur_data=f.read()
res_prompt=prompt+cur_data+question
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": res_prompt},
],
n=5,
)
res=''
with open(savepath.format(d), 'w', encoding='utf-8') as fw:
for j in range(len(response['choices'])):
res_text=response['choices'][j]["message"]["content"]
res+=res_text+'\n'
fw.write(res)
time.sleep(2)'''
'''aug_root='/cc_sbu_align/aug_caption'
aug_captions=os.listdir(aug_root)
annotations=[]
res={}
for aug_ in aug_captions:
annotation={}
image_id=aug_.split('.')[0]
aug_data=os.path.join(aug_root,aug_)
with open(aug_data,'r',encoding='utf-8') as fa:
aug_caption=fa.read().strip().split('\n')
for aug_c in aug_caption:
annotation['image_id']=image_id
annotation['caption']=aug_c
annotations.append(annotation)
res["annotations"]=annotations
with open('./filter_cap.json','w',encoding='utf-8') as fc:
json.dump(res,fc)'''
| [] |
2024-01-10 | iababio/LLamaGPT_Private | app~llamaGPT.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All, LlamaCpp
from constants import CHROMA_SETTINGS
import os
import argparse
import time
load_dotenv()
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME")
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_type = os.environ.get('MODEL_TYPE')
model_path = os.environ.get('MODEL_PATH')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = int(os.environ.get('MODEL_N_BATCH', 8))
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS', 4))
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
# Prepare the LLM
match model_type:
case "LlamaCpp":
llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, n_batch=model_n_batch, callbacks=callbacks,
verbose=False)
case "GPT4All":
llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', n_batch=model_n_batch,
callbacks=callbacks, verbose=False)
case _default:
print(f"Model {model_type} not supported!")
exit;
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever,
return_source=not args.hide_source)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(f"\n> Answer (took {round(end - start, 2)} s.):")
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(
description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | truesoni/mindsdb | mindsdb~integrations~handlers~file_handler~file_handler.py | import codecs
import csv
import json
import os
import tempfile
import traceback
from io import BytesIO, StringIO
from pathlib import Path
from urllib.parse import urlparse
import magic
import pandas as pd
import requests
from charset_normalizer import from_bytes
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast import DropTables, Select
from mindsdb_sql.parser.ast.base import ASTNode
from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df
from mindsdb.integrations.libs.base import DatabaseHandler
from mindsdb.integrations.libs.response import RESPONSE_TYPE
from mindsdb.integrations.libs.response import HandlerResponse as Response
from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse
DEFAULT_CHUNK_SIZE = 200
DEFAULT_CHUNK_OVERLAP = 50
def clean_cell(val):
if str(val) in ["", " ", " ", "NaN", "nan", "NA"]:
return None
return val
class FileHandler(DatabaseHandler):
"""
Handler for files
"""
name = "files"
def __init__(
self,
name=None,
file_storage=None,
connection_data={},
file_controller=None,
**kwargs,
):
super().__init__(name)
self.parser = parse_sql
self.fs_store = file_storage
self.custom_parser = connection_data.get("custom_parser", None)
self.clean_rows = connection_data.get("clean_rows", True)
self.chunk_size = connection_data.get("chunk_size", DEFAULT_CHUNK_SIZE)
self.chunk_overlap = connection_data.get("chunk_overlap", DEFAULT_CHUNK_OVERLAP)
self.file_controller = file_controller
def connect(self, **kwargs):
return
def disconnect(self, **kwargs):
return
def check_connection(self) -> StatusResponse:
return StatusResponse(True)
def query(self, query: ASTNode) -> Response:
if type(query) == DropTables:
for table_identifier in query.tables:
if (
len(table_identifier.parts) == 2
and table_identifier.parts[0] != self.name
):
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table from database '{table_identifier.parts[0]}'",
)
table_name = table_identifier.parts[-1]
try:
self.file_controller.delete_file(table_name)
except Exception as e:
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table '{table_name}': {e}",
)
return Response(RESPONSE_TYPE.OK)
elif type(query) == Select:
table_name = query.from_table.parts[-1]
file_path = self.file_controller.get_file_path(table_name)
df, _columns = self._handle_source(
file_path,
self.clean_rows,
self.custom_parser,
self.chunk_size,
self.chunk_overlap,
)
result_df = query_df(df, query)
return Response(RESPONSE_TYPE.TABLE, data_frame=result_df)
else:
return Response(
RESPONSE_TYPE.ERROR,
error_message="Only 'select' and 'drop' queries allowed for files",
)
def native_query(self, query: str) -> Response:
ast = self.parser(query, dialect="mindsdb")
return self.query(ast)
@staticmethod
def _handle_source(
file_path,
clean_rows=True,
custom_parser=None,
chunk_size=DEFAULT_CHUNK_SIZE,
chunk_overlap=DEFAULT_CHUNK_OVERLAP,
):
"""
This function takes a file path and returns a pandas dataframe
"""
# get file data io, format and dialect
data, fmt, dialect = FileHandler._get_data_io(file_path)
data.seek(0) # make sure we are at 0 in file pointer
if custom_parser:
header, file_data = custom_parser(data, fmt)
df = pd.DataFrame(file_data, columns=header)
elif fmt == "parquet":
df = pd.read_parquet(data)
elif fmt == "csv":
df = pd.read_csv(data, sep=dialect.delimiter, index_col=False)
elif fmt in ["xlsx", "xls"]:
data.seek(0)
df = pd.read_excel(data)
elif fmt == "json":
data.seek(0)
json_doc = json.loads(data.read())
df = pd.json_normalize(json_doc, max_level=0)
elif fmt == "txt" or fmt == "pdf":
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
if fmt == "txt":
from langchain.document_loaders import TextLoader
loader = TextLoader(file_path, encoding="utf8")
docs = text_splitter.split_documents(loader.load())
df = pd.DataFrame([{"text": doc.page_content} for doc in docs])
elif fmt == "pdf":
from langchain.document_loaders import UnstructuredPDFLoader
loader = UnstructuredPDFLoader(file_path)
docs = text_splitter.split_documents(loader.load())
df = pd.DataFrame([{"text": doc.page_content} for doc in docs])
else:
raise ValueError(
"Could not load file into any format, supported formats are csv, json, xls, xlsx"
)
header = df.columns.values.tolist()
df = df.rename(columns={key: key.strip() for key in header})
df = df.applymap(clean_cell)
header = [x.strip() for x in header]
col_map = dict((col, col) for col in header)
return df, col_map
@staticmethod
def is_it_parquet(data: BytesIO) -> bool:
# Check first and last 4 bytes equal to PAR1.
# Refer: https://parquet.apache.org/docs/file-format/
parquet_sig = b"PAR1"
data.seek(0, 0)
start_meta = data.read(4)
data.seek(-4, 2)
end_meta = data.read()
data.seek(0)
if start_meta == parquet_sig and end_meta == parquet_sig:
return True
return False
@staticmethod
def is_it_xlsx(file_path: str) -> bool:
file_type = magic.from_file(file_path, mime=True)
if file_type in [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.ms-excel",
]:
return True
return False
@staticmethod
def is_it_json(data_str: StringIO) -> bool:
# see if its JSON
text = data_str.read(100).strip()
data_str.seek(0)
if len(text) > 0:
# it it looks like a json, then try to parse it
if text.startswith("{") or text.startswith("["):
try:
json.loads(data_str.read())
return True
except Exception:
return False
finally:
data_str.seek(0)
return False
@staticmethod
def is_it_csv(data_str: StringIO) -> bool:
sample = data_str.readline() # trying to get dialect from header
data_str.seek(0)
try:
csv.Sniffer().sniff(sample)
return True
except Exception:
return False
@staticmethod
def _get_data_io(file_path):
"""
@TODO: Use python-magic to simplify the function and detect the file types as the xlsx example
This gets a file either url or local file and defines what the format is as well as dialect
:param file: file path or url
:return: data_io, format, dialect
"""
data = BytesIO()
data_str = None
dialect = None
try:
with open(file_path, "rb") as fp:
data = BytesIO(fp.read())
except Exception as e:
error = "Could not load file, possible exception : {exception}".format(
exception=e
)
print(error)
raise ValueError(error)
suffix = Path(file_path).suffix.strip(".").lower()
if suffix not in ("csv", "json", "xlsx", "parquet"):
if FileHandler.is_it_parquet(data):
suffix = "parquet"
elif FileHandler.is_it_xlsx(file_path):
suffix = "xlsx"
if suffix == "parquet":
return data, "parquet", dialect
if suffix == "xlsx":
return data, "xlsx", dialect
if suffix == "txt":
return data, "txt", dialect
if suffix == "pdf":
return data, "pdf", dialect
byte_str = data.read()
# Move it to StringIO
try:
# Handle Microsoft's BOM "special" UTF-8 encoding
if byte_str.startswith(codecs.BOM_UTF8):
data_str = StringIO(byte_str.decode("utf-8-sig"))
else:
file_encoding_meta = from_bytes(
byte_str[: 32 * 1024],
steps=32, # Number of steps/block to extract from my_byte_str
chunk_size=1024, # Set block size of each extraction)
explain=False,
)
best_meta = file_encoding_meta.best()
errors = "strict"
if best_meta is not None:
encoding = file_encoding_meta.best().encoding
try:
data_str = StringIO(byte_str.decode(encoding, errors))
except UnicodeDecodeError:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
else:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
except Exception:
print(traceback.format_exc())
print("Could not load into string")
if suffix not in ("csv", "json"):
if FileHandler.is_it_json(data_str):
suffix = "json"
elif FileHandler.is_it_csv(data_str):
suffix = "csv"
if suffix == "json":
return data_str, suffix, dialect
if suffix == "csv":
try:
dialect = FileHandler._get_csv_dialect(data_str)
if dialect:
return data_str, "csv", dialect
except Exception:
print("Could not detect format for this file")
print(traceback.format_exc())
data_str.seek(0)
data.seek(0)
# No file type identified
return data, None, dialect
@staticmethod
def _get_file_path(path) -> str:
try:
is_url = urlparse(path).scheme in ("http", "https")
except Exception:
is_url = False
if is_url:
path = FileHandler._fetch_url(path)
return path
@staticmethod
def _get_csv_dialect(buffer) -> csv.Dialect:
sample = buffer.readline() # trying to get dialect from header
buffer.seek(0)
try:
if isinstance(sample, bytes):
sample = sample.decode()
accepted_csv_delimiters = [",", "\t", ";"]
try:
dialect = csv.Sniffer().sniff(
sample, delimiters=accepted_csv_delimiters
)
dialect.doublequote = (
True # assume that all csvs have " as string escape
)
except Exception:
dialect = csv.reader(sample).dialect
if dialect.delimiter not in accepted_csv_delimiters:
raise Exception(
f"CSV delimeter '{dialect.delimiter}' is not supported"
)
except csv.Error:
dialect = None
return dialect
@staticmethod
def _fetch_url(url: str) -> str:
temp_dir = tempfile.mkdtemp(prefix="mindsdb_file_url_")
try:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(os.path.join(temp_dir, "file"), "wb") as f:
for chunk in r:
f.write(chunk)
else:
raise Exception(f"Response status code is {r.status_code}")
except Exception as e:
print(f"Error during getting {url}")
print(e)
raise
return os.path.join(temp_dir, "file")
def get_tables(self) -> Response:
"""
List all files
"""
files_meta = self.file_controller.get_files()
data = [
{
"TABLE_NAME": x["name"],
"TABLE_ROWS": x["row_count"],
"TABLE_TYPE": "BASE TABLE",
}
for x in files_meta
]
return Response(RESPONSE_TYPE.TABLE, data_frame=pd.DataFrame(data))
def get_columns(self, table_name) -> Response:
file_meta = self.file_controller.get_file_meta(table_name)
result = Response(
RESPONSE_TYPE.TABLE,
data_frame=pd.DataFrame(
[
{
"Field": x["name"].strip()
if isinstance(x, dict)
else x.strip(),
"Type": "str",
}
for x in file_meta["columns"]
]
),
)
return result
| [] |
2024-01-10 | juansensio/llms | twitter-bot~blog.py | from dotenv import load_dotenv
import tweepy
import os
import logging
import requests
from bs4 import BeautifulSoup
import random
from langchain import OpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain import LLMChain
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler(filename="twitter-bot-blog.log")
logger.addHandler(handler)
MAX_TRIES = 5
def main():
# sacar lista de posts
url = "https://www.sensiocoders.com/blog/"
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
posts = soup.find_all('a', href=lambda href: href and "/blog/" in href)
# setup langcahin
model = 'gpt-3.5-turbo-16k'
llm = OpenAI(model_name=model, temperature=0, openai_api_key=os.environ['OPENAI_API_KEY'], max_tokens=256)
template = """Tu tarea es leer el siguiente post y generar un tweet educativo en castellano que haga crecer el número de seguidores de la cuenta.
Dado el siguiente post, genera un tweet de máximo 200 caracteres en castellano.
El tweet debe ser educativo y accesible para diferentes niveles de expertos.
Responde siempre en español.
Utiliza hashtags apropiados para llegar a más personas.
POST:
{post}
TWEET:"""
prompt = PromptTemplate(template=template, input_variables=['post'])
llm_chain = LLMChain(prompt=prompt, llm=llm)
# cliente de twitter
client = tweepy.Client(
bearer_token=os.getenv("BEARER_TOKEN"),
access_token=os.getenv("ACCESS_TOKEN"),
access_token_secret=os.getenv("ACCESS_TOKEN_SECRET"),
consumer_key=os.getenv("API_KEY"),
consumer_secret=os.getenv("API_KEY_SECRET")
)
# generar tweet
i = 1
while i <= MAX_TRIES:
try:
# elegir post al azar
post = random.choice(posts)
# leer post
post_url = post['href'].split('/')[-1]
post_response = requests.get(url + post_url)
post_soup = BeautifulSoup(post_response.content, 'html.parser')
content = post_soup.find('div', class_="post").text
# generar tweet
tweet = llm_chain.run(content)
tweet += " https://www.sensiocoders.com/blog/" + post_url
assert len(tweet) <= 280
print(tweet)
client.create_tweet(text=tweet)
return
except:
continue
finally:
i += 1
print("No se pudo generar un tweet :(")
return
if __name__ == "__main__":
load_dotenv()
main() | [
"Tu tarea es leer el siguiente post y generar un tweet educativo en castellano que haga crecer el número de seguidores de la cuenta.\nDado el siguiente post, genera un tweet de máximo 200 caracteres en castellano.\nEl tweet debe ser educativo y accesible para diferentes niveles de expertos.\nResponde siempre en español.\nUtiliza hashtags apropiados para llegar a más personas.\n\nPOST:\n\n{post} \n\nTWEET:"
] |
2024-01-10 | stochastictalk/dynaprompt | dynaprompt~inputs~_OpenAIChat.py | from multiprocessing.managers import ValueProxy
from multiprocessing.synchronize import Event
from time import sleep
from typing import Callable, Dict, List
from dotenv import dotenv_values
import openai
from dynaprompt.utils import random_hash
config = dotenv_values(".env")
openai.api_key = config["OPENAI_API_KEY"]
def map_message_log_to_openai_messages(message_log: List[Dict]):
return [{i: d[i] for i in d if i != "id"} for d in message_log if d["role"] not in ["error", "conversation_manager"]]
class OpenAIChat:
def __init__(
self,
role: str = "assistant",
system_prompt: str = "You are a helpful AI assistant called PLEX.\
You always speak like a pirate. You also love cracking wise.",
temperature: float = 0.3,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
):
self.role = role
self.system_prompt = system_prompt
self.temperature = temperature
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
def __call__(self, message_log_proxy: ValueProxy, stop_event: Event):
if len(map_message_log_to_openai_messages(message_log_proxy.value)) == 0:
message_log_proxy.value = message_log_proxy.value + [
{"role": "system", "content": self.system_prompt, "id": random_hash()}
]
id_of_last_message_chatbot_sent = None
while not stop_event.is_set():
sleep(0.1)
try:
id_of_last_message_in_log = message_log_proxy.value[-1]["id"]
if id_of_last_message_in_log != id_of_last_message_chatbot_sent:
try:
stop = False
message = ""
while not stop:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=map_message_log_to_openai_messages(message_log_proxy.value),
temperature=self.temperature,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
)
message += response["choices"][0]["message"]["content"].lstrip()
stop = "stop" == response["choices"][0]["finish_reason"]
id_of_last_message_chatbot_sent = random_hash("assistant")
role = self.role
except openai.error.APIError as exception:
message = f"OpenAI API returned an API Error: {exception}"
role = "error"
except openai.error.APIConnectionError:
message = "Failed to connect to OpenAI API."
role = "error"
except openai.error.RateLimitError:
message = "OpenAI API request exceeded rate limit."
role = "error"
except openai.error.AuthenticationError:
message = (
"No OpenAI API key provided. Please provide an OpenAI API key via the command arguments."
)
role = "error"
message_log_proxy.value = message_log_proxy.value + [
{"role": role, "content": message, "id": id_of_last_message_chatbot_sent}
]
except IndexError:
pass
| [] |
2024-01-10 | CNugteren/keras-onnx | applications~nightly_build~test_transformers.py | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import json
import urllib.request
import pickle
from os.path import dirname, abspath
from keras2onnx.proto import keras
import numpy as np
import tensorflow as tf
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_onnx_runtime
from keras2onnx.proto import is_tensorflow_older_than
enable_full_transformer_test = False
if os.environ.get('ENABLE_FULL_TRANSFORMER_TEST', '0') != '0':
enable_transformer_test = True
@unittest.skipIf(is_tensorflow_older_than('2.1.0'),
"Transformers conversion need tensorflow 2.1.0+")
class TestTransformers(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def _get_token_path(self, file_name):
return 'https://lotus.blob.core.windows.net/converter-models/transformer_tokenizer/' + file_name
def _get_tokenzier(self, tokenizer_file):
token_path = self._get_token_path(tokenizer_file)
if not os.path.exists(tokenizer_file):
urllib.request.urlretrieve(token_path, tokenizer_file)
with open(tokenizer_file, 'rb') as handle:
tokenizer = pickle.load(handle)
return tokenizer
def _prepare_inputs(self, tokenizer, batch_size=3):
raw_data = json.dumps({
'text': 'The quick brown fox jumps over lazy dog.'
})
text = json.loads(raw_data)['text']
# The tokenizers are generated using transformers 2.5.0, but model_max_length is introduced and needed in 2.9.0.
if not hasattr(tokenizer, 'model_max_length'):
tokenizer.model_max_length = 1024
inputs_raw = tokenizer.encode_plus(text, add_special_tokens=True)
inputs_onnx = {k_: np.repeat(np.expand_dims(v_, axis=0), batch_size, axis=0) for k_, v_ in inputs_raw.items()}
inputs = {k_: tf.constant(v_) for k_, v_ in inputs_onnx.items()}
return text, inputs, inputs_onnx
@unittest.skip("Output shape mismatch for tf model prediction.")
def test_3layer_gpt2(self):
from transformers import GPT2Config, TFGPT2Model, BertTokenizer
keras2onnx.proto.keras.backend.set_learning_phase(0)
config = GPT2Config(n_layer=3)
model = TFGPT2Model(config)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
inputs = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='tf')
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertModel(self):
from transformers import BertConfig, TFBertModel
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFBertForPreTraining(self):
from transformers import BertConfig, TFBertForPreTraining
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForPreTraining(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFBertForMaskedLM(self):
from transformers import BertConfig, TFBertForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFBertForNextSentencePrediction(self):
from transformers import BertConfig, TFBertForNextSentencePrediction
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForNextSentencePrediction(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForSequenceClassification(self):
from transformers import BertConfig, TFBertForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForTokenClassification(self):
from transformers import BertConfig, TFBertForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFBertForQuestionAnswering(self):
from transformers import BertConfig, TFBertForQuestionAnswering
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForQuestionAnswering(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFGPT2(self):
if enable_full_transformer_test:
from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
model_list = [TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel]
else:
from transformers import GPT2Config, TFGPT2Model
model_list = [TFGPT2Model]
# pretrained_weights = 'gpt2'
tokenizer_file = 'gpt2_gpt2.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = GPT2Config()
for model_instance_ in model_list:
keras.backend.clear_session()
model = model_instance_(config)
model._set_inputs(inputs)
predictions_original = model(inputs)
predictions = [predictions_original[0]] + list(v_.numpy() for v_ in predictions_original[1])
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFOpenAIGPTModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = OpenAIGPTConfig()
model = TFOpenAIGPTModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFOpenAIGPTLMHeadModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTLMHeadModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = OpenAIGPTConfig()
model = TFOpenAIGPTLMHeadModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFOpenAIGPTDoubleHeadsModel(self):
from transformers import OpenAIGPTConfig, TFOpenAIGPTDoubleHeadsModel
keras.backend.clear_session()
# pretrained_weights = 'openai-gpt'
tokenizer_file = 'openai_openai-gpt.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
# tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2), batch_dims = 1 in this case
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer, batch_size=1)
config = OpenAIGPTConfig()
model = TFOpenAIGPTDoubleHeadsModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMModel(self):
from transformers import XLMConfig, TFXLMModel
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMWithLMHeadModel(self):
from transformers import XLMConfig, TFXLMWithLMHeadModel
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMWithLMHeadModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMForSequenceClassification(self):
from transformers import XLMConfig, TFXLMForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skip('tensorflow.GraphDef exceeds maximum protobuf size of 2GB')
def test_TFXLMForQuestionAnsweringSimple(self):
from transformers import XLMConfig, TFXLMForQuestionAnsweringSimple
keras.backend.clear_session()
# pretrained_weights = 'xlm-mlm-enfr-1024'
tokenizer_file = 'xlm_xlm-mlm-enfr-1024.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = XLMConfig()
model = TFXLMForQuestionAnsweringSimple(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertModel(self):
from transformers import DistilBertConfig, TFDistilBertModel
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForMaskedLM(self):
from transformers import DistilBertConfig, TFDistilBertForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFDistilBertForSequenceClassification(self):
from transformers import DistilBertConfig, TFDistilBertForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForTokenClassification(self):
from transformers import DistilBertConfig, TFDistilBertForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFDistilBertForQuestionAnswering(self):
from transformers import DistilBertConfig, TFDistilBertForQuestionAnswering
keras.backend.clear_session()
# pretrained_weights = 'distilbert-base-uncased'
tokenizer_file = 'distilbert_distilbert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = DistilBertConfig()
model = TFDistilBertForQuestionAnswering(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFRobertaModel(self):
from transformers import RobertaConfig, TFRobertaModel
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
def test_TFRobertaForMaskedLM(self):
from transformers import RobertaConfig, TFRobertaForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
def test_TFRobertaForSequenceClassification(self):
from transformers import RobertaConfig, TFRobertaForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
@unittest.skipIf(not enable_full_transformer_test, "Full transfomer test is not enabled")
def test_TFRobertaForTokenClassification(self):
from transformers import RobertaConfig, TFRobertaForTokenClassification
keras.backend.clear_session()
# pretrained_weights = 'roberta-base'
tokenizer_file = 'roberta_roberta-base.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = RobertaConfig()
model = TFRobertaForTokenClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | uzh-rpg/high_mpc | high_mpc~common~logger.py | """
A logger file copied from OpenAI Baselines (https://github.com/openai/baselines/blob/master/baselines/logger.py)
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from baselines.common import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo() | [] |
2024-01-10 | PrabhuKiran8790/PDF-Assistant | Home.py | import streamlit as st
from components.sidebar.OpenAI_API import openai_api_insert_component
from components.body.file_uploader import file_uploader
from components.body.prompt import prompt_box
from components.body import langchain_PDF
from components.sidebar.Auth import authentication_comp, db
import pandas as pd
import os
st.set_page_config(page_title="PDF Assistant", page_icon="📖", layout="wide", initial_sidebar_state='expanded')
if 'logged_in' not in st.session_state:
st.session_state['logged_in'] = False
if 'username' not in st.session_state:
st.session_state['username'] = None
if 'login_btn_clicked' not in st.session_state:
st.session_state['login_btn_clicked'] = None
if 'uuid' not in st.session_state:
st.session_state['uuid'] = None
if 'login_failed' not in st.session_state:
st.session_state['login_failed'] = None
if 'response' not in st.session_state:
st.session_state['response'] = None
def main():
st.header(":red[PDF Assistant]: AI-Powered Q&A for _PDFs_")
if st.session_state['logged_in'] != False and st.session_state['username'] is not None:
st.sidebar.write(f"Welcome **:green[{st.session_state['username']}]** 👋")
# st.write(os.getenv("FIREBASE_API"))
openai_api_insert_component() # Insert OpenAI API component in sidebar
# if not logged in, show authentication component
if st.session_state['logged_in'] == False:
with st.sidebar:
authentication_comp()
# if logged in, show logout button
if st.session_state['logged_in'] == True:
with st.sidebar:
logout = st.button("Logout 🔒")
if logout:
st.session_state['logged_in'] = False
st.session_state['login_btn_clicked'] = None
st.session_state['username'] = None
st.session_state['uuid'] = None
st.session_state['signup_btn_clicked'] = None
st.button("dummy", on_click=st.experimental_rerun()) # dummy button to rerun the app. This is a hacky way to rerun the app. dummy btn is not shown to user.
file_uploader_col, prompt_col = st.columns([0.5, 1])
with file_uploader_col:
file_uploader()
with prompt_col:
prompt_box()
generate_answer_button = st.button("Generate Answer")
if generate_answer_button:
st.session_state['generate_answer_button'] = True
# check if all are empty
if st.session_state['OPENAI_API_KEY'] == "" and st.session_state['uploaded_file'] is None and st.session_state['prompt'] == "":
st.error("Please set your OpenAI API key in the sidebar, upload a PDF and enter a prompt")
st.session_state['cancel_btn_active'] = True
# st.stop()
# check if API key is empty
elif st.session_state['OPENAI_API_KEY'] == "" or st.session_state['OPENAI_API_KEY'] is None:
st.sidebar.error("Please set your OpenAI API key in the sidebar.")
st.session_state['cancel_btn_active'] = True
# st.stop()
# check if file is not uploaded and prompt is empty
elif st.session_state['uploaded_file'] is None and st.session_state['prompt'] == "":
st.error("Please upload a PDF and enter a prompt")
st.session_state['cancel_btn_active'] = True
# st.stop()
# check if file is not uploaded
elif st.session_state['uploaded_file'] is None:
st.error("Please upload a PDF")
st.session_state['cancel_btn_active'] = True
# st.stop()
# check if prompt is empty
elif st.session_state['prompt'] == "":
st.error("Please enter a prompt")
st.session_state['cancel_btn_active'] = True
# st.stop()
else: # if everything is fine
os.environ['OPENAI_API_KEY'] = st.session_state['OPENAI_API_KEY']
st.caption(f"Filename: :red[{st.session_state['uploaded_file'].name}]")
response = langchain_PDF.get_response_from_OpenAI_LangChain(st.session_state['uploaded_file'], st.session_state['prompt'])
# st.session_state['response'] = response
st.warning('⚠️ Please note that the response is dependent on the :red[Quality of the PDF] and the :red[Quality of the prompt] and it may not be accurate at times. Please use the response as a reference and not as a final answer.')
if st.session_state['response'] is not None:
st.write("")
st.write("###### :blue[🤖 **AI Response**]")
st.write(f"#### :green[{st.session_state['response']}]")
st.markdown("------------")
if st.session_state['logged_in'] == True and st.session_state['username'] is not None:
show_history = st.checkbox("Show History")
if show_history:
st.write("Your previous interactions are as follows:")
past_docs = db.child("users").child(st.session_state['uuid']).child('pdf_files').get().val()
if past_docs:
selected_doc = st.selectbox("Select a PDF file", options=list(past_docs.keys()))
df = pd.DataFrame.from_dict(past_docs[selected_doc]['Prompts'], orient='index', columns=['prompt', 'response'])
hide_table_row_index = """
<style>
thead tr th:first-child {display:none}
tbody th {display:none}
</style>
"""
st.markdown(hide_table_row_index, unsafe_allow_html=True)
st.table(df)
else:
st.write("##### 😔 :red[No history found.]")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | aws-samples/fsi-genai-bootcamp | 07_capstone~rag_chatbot_lib_claude.py | import os
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms.bedrock import Bedrock
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import BedrockEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.text_splitter import CharacterTextSplitter
# from IPython.display import Markdown, display
import json
import os
import sys
import boto3
import botocore
sys.path.append("/home/sagemaker-user/fsi-genai-bootcamp/")
from utils import bedrock, print_ww
def get_client():
#Since the Lab accounts provisioned for this workshop doesn't have access to Bedrock Models, the role "aws:iam::067564772063:role/Crossaccountbedrock" is added to inherit Bedrock Model Access from the Parent Account which has access to Bedrock. If you running in your own account and you can follow through the bedrock "model management section" and can request access for the specific models.
os.environ["BEDROCK_ASSUME_ROLE"] = "arn:aws:iam::067564772063:role/Crossaccountbedrock" # E.g. "arn:aws:..."
boto3_bedrock = bedrock.get_bedrock_client(
assumed_role=os.environ.get("BEDROCK_ASSUME_ROLE", None),
region=os.environ.get("AWS_DEFAULT_REGION", None)
)
return boto3_bedrock
def get_llm():
model_kwargs = {
"max_tokens_to_sample": 8000,
"temperature": 0,
}
llm = Bedrock(
client=get_client(),
model_id="anthropic.claude-instant-v1",
model_kwargs={"max_tokens_to_sample": 500, "temperature": 0.9}
)
return llm
def get_index(file_name): #creates and returns an in-memory vector store to be used in the application
embeddings = BedrockEmbeddings(client=get_client(), model_id="amazon.titan-embed-text-v1")
loader = PyPDFLoader(file_path=file_name) #load the pdf file
text_splitter = RecursiveCharacterTextSplitter( #create a text splitter
separators=["\n\n", "\n", " ",""], #split chunks at (1) paragraph, (2) line, (3) sentence, or (4) word, in that order
chunk_size=300, #divide into 1000-character chunks using the separators above
chunk_overlap=10 #number of characters that can overlap with previous chunk
)
'''
Experiment with different chunking strategies below
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 500,
chunk_overlap = 20,
length_function = len,
is_separator_regex = False,
)
text_splitter = CharacterTextSplitter(
separator = "\n\n",
chunk_size = 500,
chunk_overlap = 200,
length_function = len,
is_separator_regex = True,
)
'''
index_creator = VectorstoreIndexCreator( #create a vector store factory
vectorstore_cls=FAISS, #use an in-memory vector store for demo purposes
embedding=embeddings, #use Titan embeddings
text_splitter=text_splitter, #use the recursive text splitter
)
index_from_loader = index_creator.from_loaders([loader]) #create an vector store index from the loaded PDF
return index_from_loader #return the index to be cached by the client app
def get_memory(): #create memory for this chat session
memory = ConversationBufferWindowMemory(memory_key="chat_history", return_messages=True) #Maintains a history of previous messages
return memory
def get_rag_chat_response(input_text, memory, index): #chat client function
llm = get_llm()
conversation_with_retrieval = ConversationalRetrievalChain.from_llm(llm, index.vectorstore.as_retriever(), memory=memory)
input_text_updated = f"""Human: {input_text}
Assistant:
"""
chat_response = conversation_with_retrieval({"question": input_text_updated}) #pass the user message, history, and knowledge to the model
return chat_response['answer']
| [] |
2024-01-10 | kakao-aicoursework/lunarvel.vet | 03~callback.py | from dto import ChatbotRequest
from samples import list_card
import aiohttp
import time
import logging
import openai
import os
from os import environ
# from langchain import LLMChain
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.prompts.chat import HumanMessagePromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferMemory
from langchain.memory import FileChatMessageHistory
from langchain.tools import Tool
from langchain.utilities import GoogleSearchAPIWrapper
import chromadb
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
openai.api_key = environ.get("API_KEY")
CUR_DIR = os.getcwd()
# BUG_STEP1_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/bug_analyze.txt")
# BUG_STEP2_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/bug_solution.txt")
# ENHANCE_STEP1_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/enhancement_say_thanks.txt")
KAKAO_CHANNEL_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/bug_analyze.txt")
DEFAULT_RESPONSE_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/default_response.txt")
INTENT_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/parse_intent.txt")
INTENT_LIST_TXT = os.path.join(CUR_DIR, "data/intent_list.txt")
SEARCH_VALUE_CHECK_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/search_value_check.txt")
SEARCH_COMPRESSION_PROMPT_TEMPLATE = os.path.join(CUR_DIR, "data/search_compress.txt")
HISTORY_DIR = os.path.join(CUR_DIR, "chat_histories")
CHROMA_DIR = os.path.join(CUR_DIR, "chromadb")
intent_list_content = ["""
kakao_sync: Questions about kakao sync. This service syncs messages, photos, and videos from KakaoTalk between your mobile and PC.
kakao_talkchannel: Questions about kakao channel. It's a brand marketing platform that facilitates communication between businesses and users.
kakao_social: Questions about kakao social. This is kakao's social media service where users can share information and communicate with each other.
""", """
kakao_sync: 카카오 싱크에 대한 질문. 이 서비스는 모바일과 PC 간의 카카오톡 메시지, 사진, 동영상 등을 동기화해주는 서비스입니다.
kakao_talkchannel: 카카오 채널에 대한 질문. 기업과 사용자 사이의 원활한 소통을 위해 제공되는 브랜드 마케팅 플랫폼입니다.
kakao_social: 카카오 소셜에 대한 질문. 사용자들이 정보를 공유하고 소통할 수 있는 카카오의 소셜 미디어 서비스.
""" ]
parse_intent_content = """
Your job is to select one intent from the <intent_list>.
<intent_list>
{intent_list}
</intent_list>
User: {user_message}
Intent:
"""
def read_file(path: str) -> str:
with open(path, "r", encoding="utf-8") as f:
return f.read()
def read_prompt_template(path: str) -> str:
return read_file(path)
def create_chain(llm, template_path, output_key):
return LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file(template_path)
),
output_key=output_key,
verbose=True,
)
from enum import Enum
g_intent_str = ["kakao_social", "kakao_sync", "kakao_talkchannel"]
class Intent:
NONE = -1
kakao_social = 0
kakao_sync = 1
kakao_talkchannel = 2
str = ["kakao_social", "kakao_sync", "kakao_talkchannel"]
def to_str(self) -> str:
return self.str[self.value]
def to_idx(self) -> int:
return self.value
def __init__(self, value: int = -1):
self.value = value
@classmethod
def init(cls, value_str: str):
value_str = value_str.lower()
ret = cls(cls.NONE)
if value_str == 'kakao_social':
ret = cls(cls.kakao_social)
elif value_str == 'kakao_sync':
ret = cls(cls.kakao_sync)
elif value_str == 'kakao_talkchannel':
ret = cls(cls.kakao_talkchannel)
return ret
class ConversationHistory:
def __init__(self, conversation_id: str):
file_path = os.path.join(HISTORY_DIR, f"{conversation_id}.json")
self.conversation_id = conversation_id
self.history = FileChatMessageHistory(file_path)
@classmethod
def load(cls, conversation_id: str):
return ConversationHistory(conversation_id)
def save_history(self, user_message, bot_answer):
self.log_user_message(user_message)
self.log_bot_message(bot_answer)
def log_user_message(self, user_message: str):
self.history.add_user_message(user_message)
def log_bot_message(self, bot_message: str):
self.history.add_ai_message(bot_message)
def get_chat_history(self):
memory = ConversationBufferMemory(
memory_key="chat_history",
input_key="user_message",
chat_memory=self.history,
)
return memory.buffer
from chromadb.utils import embedding_functions
class IntentDB:
intent = Intent(Intent.NONE)
path: str = None
# db: chromadb.Collection = None
# retriever = None
def __init__(self, client: chromadb.PersistentClient, intent: Intent, txt_path: str):
self.db: Chroma = None
self.intent = intent
self.conversation_history = ConversationHistory(intent.to_str())
self.init_db(intent, txt_path)
def query(self, query: str, use_retriever: bool = False) -> list[str]:
if use_retriever:
retriever = self.db.as_retriever()
docs = retriever.get_relevant_documents(query)
else:
docs = self.db.similarity_search(query)
str_docs = [doc.page_content for doc in docs]
return str_docs
# def add(self, documents, ids):
# self.db.add(documents=documents, ids=ids)
def init_db(self, intent: Intent, txt_path: str):
# read from txt_path into raw_text
self.intent = intent
self.path = txt_path
raw_text = read_file(txt_path)
# todo: 전처리가 많이 필요하다.
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len,
)
texts = text_splitter.split_text(raw_text)
text_dict = {"id": [], "text": texts}
# todo: id 도 의미를 가질수 있도록...
text_dict["id"] = [f"{i}" for i in range(len(texts))]
# create the open-source embedding function
# embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
if os.path.exists(CHROMA_DIR) == False:
# 없으면, 생성
self.db = Chroma.from_documents(
documents=text_dict["text"],
ids=text_dict["id"],
embedding_function=OpenAIEmbeddings(),
persist_directory=CHROMA_DIR,
collection_name=self.intent.to_str(),
# metadata={"hnsw:space": "cosine"},
)
else:
# load docs into Chroma DB
self.db = Chroma(
persist_directory=CHROMA_DIR,
collection_name=self.intent.to_str(),
embedding_function=OpenAIEmbeddings(),
# metadata={"hnsw:space": "cosine"},
)
self.db.get()
return
from langchain.utilities import DuckDuckGoSearchAPIWrapper
def create_search_tool(use_google: bool = False):
search = None
name = ""
description = ""
search = None
if use_google == False:
# use DuckDuckGo
search = DuckDuckGoSearchAPIWrapper(region="ko-KR")
name = "DuckDuckGo Search"
else:
search = GoogleSearchAPIWrapper(
google_api_key=os.getenv("GOOGLE_API_KEY", ""),
google_cse_id=os.getenv("GOOGLE_CSE_ID", "")
)
name = "Google Search"
# todo: 명확하게 주어주면, 성능개선에 도옴이 된다.
description = f"Search {name} for recent results."
search_tool = Tool(
name=name,
description=description,
func=search.run
)
return search_tool
import os
from langchain.memory import ConversationBufferMemory, FileChatMessageHistory
def query_web_search(user_message: str) -> str:
context = {"user_message": user_message}
context["related_web_search_results"] = gChatBot.search_tool.run(user_message)
has_value = gChatBot.search_value_check_chain.run(context)
print(has_value)
if has_value == "Y":
return gChatBot.search_compression_chain.run(context)
else:
return ""
def intended_query(intent: Intent, query: str) -> list[str]:
context = dict(
related_documents=[],
user_message=query
)
i = intent.to_idx()
if i == Intent.NONE:
# web search with query
# todo: 모든 DB를 다 봐야할까?
for i in range(len(gChatBot.intent_info)):
info = gChatBot.intent_info[i]
db = info['db']
context["related_documents"].append(db.query(context["user_message"]))
context["compressed_web_search_results"] = query_web_search(context["user_message"])
answer = gChatBot.default_chain.run(context)
else:
info = gChatBot.intent_info[i]
db = info['db']
chain = info['chatbot_chain']
context["related_documents"] = db.query(context["user_message"])
answer = chain.run(context)
return answer
def gernerate_answer(user_message, conversation_id: str='fa1010') -> dict[str, str]:
hist = ConversationHistory.load(conversation_id)
context = dict( user_message=user_message )
context["input"] = context["user_message"]
context["intent_list"] = read_prompt_template(INTENT_LIST_TXT)
context["chat_history"] = hist.get_chat_history()
# intent = parse_intent_chain(context)["intent"]
intent_str = gChatBot.parse_intent_chain.run(context)
print(intent_str)
print("======================")
intent = Intent.init(intent_str)
answer = intended_query(intent, user_message)
# save history
hist.save_history(user_message, answer)
return {'answer': answer}
class ChatBot:
llm: ChatOpenAI = None
client: chromadb.PersistentClient = None
parse_intent_chain: LLMChain = None
# db: Datastore = []
# chatbot_chain: LLMChain = []
intent_info = []
search_tool: Tool = None
default_chain: LLMChain = None
search_value_check_chain: LLMChain = None
search_compression_chain: LLMChain = None
def init(self):
if os.path.exists(HISTORY_DIR) == False:
os.makedirs(HISTORY_DIR)
os.environ["OPENAI_API_KEY"] = os.getenv("API_KEY")
self.llm = llm = ChatOpenAI(
temperature=0.1,
max_tokens=200,
model="gpt-3.5-turbo"
)
# init DBs
pwd = os.getcwd()
self.client = chromadb.PersistentClient(pwd + "/chromadb")
# init intent_info
for i in range(len(Intent.str)):
intent_str = Intent.str[i]
prompt_template = os.path.join(CUR_DIR, f'data/prompt_template_{intent_str}.txt')
_tmp_ = {
'db': IntentDB(self.client, Intent(i), f'data/project_data_{intent_str}.txt'),
'chatbot_chain': create_chain(
llm=llm,
template_path=prompt_template,
output_key="output_" + intent_str
)
}
self.intent_info.append(_tmp_)
# self.chatbot_chain[i] = create_chain(
# llm=llm,
# template_path=prompt_template,
# output_key="output_"+intent_str,
# )
self.search_tool = create_search_tool()
self.parse_intent_chain = create_chain(
llm=llm,
template_path=INTENT_PROMPT_TEMPLATE,
output_key="intent",
)
self.default_chain = create_chain(
llm=llm,
template_path=DEFAULT_RESPONSE_PROMPT_TEMPLATE,
output_key="output"
)
self.search_value_check_chain = create_chain(
llm=llm,
template_path=SEARCH_VALUE_CHECK_PROMPT_TEMPLATE,
output_key="output",
)
self.search_compression_chain = create_chain(
llm=llm,
template_path=SEARCH_COMPRESSION_PROMPT_TEMPLATE,
output_key="output",
)
gChatBot = ChatBot()
gChatBot.init()
async def callback_handler(request: ChatbotRequest) -> dict:
# raw_data = read_file("data/project_data_kakaosync.txt")
#
# system_message = "assistant는 챗봇으로 동작한다. 챗봇은 '제품정보' 내용을 참고하여, user의 질문 혹은 요청에 따라 적절한 답변을 제공합니다."
# human_template = ("제품정보: {product_data}\n" +
# request.userRequest.utterance )
#
# system_message_prompt = SystemMessage(content=system_message)
# human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
#
# chat = ChatOpenAI(temperature=0.8)
#
# chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#
# chain = LLMChain(llm=chat, prompt=chat_prompt)
# output_text = chain.run(product_data=raw_data)
ret = gernerate_answer(request.userRequest.utterance)
output_text = ret['answer']
payload = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": output_text[0] + "\n" + output_text[1]
}
}
]
}
}
# debug
print(output_text[0] + "\n" + output_text[1])
time.sleep(1.0)
url = request.userRequest.callbackUrl
print(output_text)
if url:
async with aiohttp.ClientSession() as session:
async with session.post(url=url, json=payload) as resp:
await resp.json()
# async def callback_handler1(request: ChatbotRequest) -> dict:
# os.environ["OPENAI_API_KEY"] = os.getenv("API_KEY")
#
# raw_data = read_file("data/project_data_kakaosync.txt")
#
# system_message = "assistant는 챗봇으로 동작한다. 챗봇은 '제품정보' 내용을 참고하여, user의 질문 혹은 요청에 따라 적절한 답변을 제공합니다."
# human_template = ("제품정보: {product_data}\n" +
# request.userRequest.utterance )
#
# system_message_prompt = SystemMessage(content=system_message)
# human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
#
# chat = ChatOpenAI(temperature=0.8)
#
# chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
#
# chain = LLMChain(llm=chat, prompt=chat_prompt)
#
# output_text = chain.run(product_data=raw_data)
#
# payload = {
# "version": "2.0",
# "template": {
# "outputs": [
# {
# "simpleText": {
# "text": output_text
# }
# }
# ]
# }
# }
#
# # debug
# print(output_text)
#
# time.sleep(1.0)
#
# url = request.userRequest.callbackUrl
#
# if url:
# async with aiohttp.ClientSession() as session:
# async with session.post(url=url, json=payload) as resp:
# await resp.json()
# async def callback_handler2(request: ChatbotRequest) -> dict:
#
# # ===================== start =================================
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[
# {"role": "system", "content": SYSTEM_MSG},
# {"role": "user", "content": request.userRequest.utterance},
# ],
# temperature=0,
# )
# # focus
# output_text = response.choices[0].message.content
#
# # 참고링크 통해 payload 구조 확인 가능
# payload = {
# "version": "2.0",
# "template": {
# "outputs": [
# {
# "simpleText": {
# "text": output_text
# }
# }
# ]
# }
# }
# # ===================== end =================================
# # 참고링크1 : https://kakaobusiness.gitbook.io/main/tool/chatbot/skill_guide/ai_chatbot_callback_guide
# # 참고링크1 : https://kakaobusiness.gitbook.io/main/tool/chatbot/skill_guide/answer_json_format
#
# time.sleep(1.0)
#
# url = request.userRequest.callbackUrl
#
# if url:
# async with aiohttp.ClientSession() as session:
# async with session.post(url=url, json=payload, ssl=False) as resp:
# await resp.json() | [
"data/parse_intent.txt",
"data/default_response.txt",
"data/search_value_check.txt",
"data/bug_analyze.txt",
"data/search_compress.txt",
"data/prompt_template_PLACEHOLDER.txt"
] |
2024-01-10 | kakao-aicoursework/lunarvel.vet | 02~callback.py | from dto import ChatbotRequest
from samples import list_card
import aiohttp
import time
import logging
import openai
import os
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
SystemMessage
)
def read_file(path: str) -> str:
with open(path, "r") as f:
return f.read()
# 환경 변수 처리 필요!
openai.api_key = ''
SYSTEM_MSG = "당신은 카카오 서비스 제공자입니다."
logger = logging.getLogger("Callback")
async def callback_handler(request: ChatbotRequest) -> dict:
os.environ["OPENAI_API_KEY"] = os.getenv("API_KEY")
raw_data = read_file("data/project_data_kakaosync.txt")
system_message = "assistant는 챗봇으로 동작한다. 챗봇은 '제품정보' 내용을 참고하여, user의 질문 혹은 요청에 따라 적절한 답변을 제공합니다."
human_template = ("제품정보: {product_data}\n" +
request.userRequest.utterance )
system_message_prompt = SystemMessage(content=system_message)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat = ChatOpenAI(temperature=0.8)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
output_text = chain.run(product_data=raw_data)
payload = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": output_text
}
}
]
}
}
# debug
print(output_text)
time.sleep(1.0)
url = request.userRequest.callbackUrl
if url:
async with aiohttp.ClientSession() as session:
async with session.post(url=url, json=payload) as resp:
await resp.json()
async def callback_handler2(request: ChatbotRequest) -> dict:
# ===================== start =================================
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": SYSTEM_MSG},
{"role": "user", "content": request.userRequest.utterance},
],
temperature=0,
)
# focus
output_text = response.choices[0].message.content
# 참고링크 통해 payload 구조 확인 가능
payload = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": output_text
}
}
]
}
}
# ===================== end =================================
# 참고링크1 : https://kakaobusiness.gitbook.io/main/tool/chatbot/skill_guide/ai_chatbot_callback_guide
# 참고링크1 : https://kakaobusiness.gitbook.io/main/tool/chatbot/skill_guide/answer_json_format
time.sleep(1.0)
url = request.userRequest.callbackUrl
if url:
async with aiohttp.ClientSession() as session:
async with session.post(url=url, json=payload, ssl=False) as resp:
await resp.json() | [
"제품정보: {product_data}\n",
"[PLACEHOLDER, PLACEHOLDER]",
"assistant는 챗봇으로 동작한다. 챗봇은 '제품정보' 내용을 참고하여, user의 질문 혹은 요청에 따라 적절한 답변을 제공합니다.",
"당신은 카카오 서비스 제공자입니다."
] |
2024-01-10 | thesammiller/rime | server.py | import os
import re
import time
from flask import Flask, render_template, request, flash, redirect, url_for
from markupsafe import Markup
from openai import OpenAI
app = Flask(__name__, template_folder='./templates', static_folder='./static')
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TALK_DIR = "conversations/"
CODE_DIR = "code/"
CHATGPT_MODEL = "gpt-4"
STARTING_MESSAGE = "You are an intelligent assistant."
CHARACTER_SIZE_LIMIT=18000
client = OpenAI(api_key=OPENAI_API_KEY)
messages = [{"role": "system", "content": STARTING_MESSAGE}]
# Check if the conversation history is too long
def messages_are_outside_token_limit():
return len("".join([m['content'] for m in messages])) > CHARACTER_SIZE_LIMIT
def save_message_history():
filename = f"{time.time()}.txt"
with open(os.path.join(TALK_DIR, filename), "w") as f:
for m in messages:
f.write("{role} - {content}\n\n".format(**m))
return filename
def summarize_current_conversation(all_messages):
previous_conversation = "Summarize the following in one paragraph: " + all_messages
summary_messages = [{'role': 'system', 'content': previous_conversation}]
chat_completion = client.chat.completions.create(
messages=summary_messages,
model=CHATGPT_MODEL,
)
return "".join([i.message.content for i in chat_completion.choices])
def send_message_to_chatgpt(message):
global messages
if messages_are_outside_token_limit():
try:
save_message_history()
summary_response = summarize_current_conversation("".join([m['content'] for m in messages]))
messages = [{'role': 'system', 'content': summary_response},
{'role': 'user', 'content': message}]
except Exception as e:
flash(f"Error during summarization: {e}")
return redirect(url_for('home')), 500
try:
chat_completion = client.chat.completions.create(
messages=messages,
model=CHATGPT_MODEL,
)
return "".join([i.message.content for i in chat_completion.choices])
except Exception as e:
flash(f"Error during sending message: {e}")
return redirect(url_for('home')), 500
def format_text_with_code(reply):
return Markup(re.sub(r'```(\w+)?\s*(.*?)```', r'<pre><code>\2</code></pre>', reply, flags=re.DOTALL))
def save_code(code):
if len(code) != 2:
return
language = code[0]
program = code[1]
file_ext = 'txt' if language not in ['python', 'coq'] else language
filename = f"{time.time()}.{file_ext}"
with open(os.path.join(CODE_DIR, filename), "w") as f:
f.write(program)
return filename
def strip_out_language_and_code(reply):
return re.findall(r'```(\w+)?\s*(.*?)```', reply, flags=re.DOTALL)
def check_reply_for_code_and_save(reply):
code_chunks = strip_out_language_and_code(reply)
files_created = []
if len(code_chunks) == 0:
return []
primary_language = code_chunks[0][0]
primary_chunk = ''.join([code[1] for code in code_chunks if code[0] == primary_language])
for code in code_chunks:
if code[0] != primary_language:
filename = save_code(code)
files_created.append(filename)
filename = save_code((primary_language, primary_chunk))
files_created.append(filename)
return files_created
@app.route('/', methods=['GET', 'POST'])
def home():
global messages
if request.method == 'POST':
message = request.form.get('messages', STARTING_MESSAGE)
messages.append({
"role": "user",
"content": message,
})
reply = send_message_to_chatgpt(message)
files = check_reply_for_code_and_save(reply)
formatted_text = format_text_with_code(reply)
messages.append({"role": "assistant", "content": formatted_text})
return render_template('form.html', messages=messages)
@app.route('/reset', methods=['GET', 'POST'])
def reset():
global messages
messages = []
return redirect(url_for('home'))
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=8000)
| [] |
2024-01-10 | half-dreamer/CS61A-20fa | CS61A%202020%20FALL%20all-solution~ok~~client~protocols~unlock.py | """Implements the UnlockProtocol, which unlocks all specified tests
associated with an assignment.
The UnlockTestCase interface can be implemented by TestCases that are
compatible with the UnlockProtocol.
"""
from client.protocols.common import models
from client.utils import auth
from client.utils import format
from client.utils import guidance
from client.utils import locking
from datetime import datetime
import ast
import logging
import random
log = logging.getLogger(__name__)
try:
import readline
HAS_READLINE = True
except ImportError:
HAS_READLINE = False
class UnlockProtocol(models.Protocol):
"""Unlocking protocol that wraps that mechanism."""
PROMPT = '? ' # Prompt that is used for user input.
EXIT_INPUTS = ( # Valid user inputs for aborting the session.
'exit()',
'quit()',
)
SPECIAL_INPUTS = ( # Inputs that are not from the interpreter
'Error',
'Infinite Loop',
'Nothing',
)
def __init__(self, cmd_args, assignment):
super().__init__(cmd_args, assignment)
self.hash_key = assignment.name
self.analytics = []
self.guidance_util = guidance.Guidance("", assignment=assignment, suppress_warning_message=True)
def run(self, messages):
"""Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics.
"""
if not self.args.unlock:
return
format.print_line('~')
print('Unlocking tests')
print()
print('At each "{}", type what you would expect the output to be.'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()
for test in self.assignment.specified_tests:
log.info('Unlocking test {}'.format(test.name))
self.current_test = test.name
# Reset guidance explanation probability for every question
self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY
try:
test.unlock(self.interact)
except (KeyboardInterrupt, EOFError):
try:
# TODO(albert): When you use Ctrl+C in Windows, it
# throws two exceptions, so you need to catch both
# of them. Find a cleaner fix for this.
print()
print('-- Exiting unlocker --')
except (KeyboardInterrupt, EOFError):
pass
print()
break
messages['unlock'] = self.analytics
def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True, *, normalizer=lambda x: x):
"""Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output.
"""
if randomize and choices:
choices = random.sample(choices, len(choices))
correct = False
while not correct:
if choices:
assert len(answer) == 1, 'Choices must have 1 line of output'
choice_map = self._display_choices(choices)
question_timestamp = datetime.now()
input_lines = []
for line_number, line in enumerate(answer):
if len(answer) == 1:
prompt = self.PROMPT
else:
prompt = '(line {}){}'.format(line_number + 1, self.PROMPT)
student_input = format.normalize(self._input(prompt))
self._add_history(student_input)
if student_input in self.EXIT_INPUTS:
raise EOFError
if choices and student_input in choice_map:
student_input = choice_map[student_input]
correct_answer = self._verify_student_input(student_input, line, normalizer)
if correct_answer:
input_lines.append(correct_answer)
else:
input_lines.append(student_input)
break
else:
correct = True
tg_id = -1
misU_count_dict = {}
rationale = "Unknown - Default Value"
if not correct:
guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines,
self.hash_key)
misU_count_dict, tg_id, printed_msg, rationale = guidance_data
else:
rationale = self.guidance_util.prompt_with_prob()
print("-- OK! --")
printed_msg = ["-- OK! --"]
self.analytics.append({
'id': unique_id,
'case_id': case_id,
'question timestamp': self.unix_time(question_timestamp),
'answer timestamp': self.unix_time(datetime.now()),
'prompt': question_prompt,
'answer': input_lines,
'correct': correct,
'treatment group id': tg_id,
'rationale': rationale,
'misU count': misU_count_dict,
'printed msg': printed_msg
})
print()
return input_lines
###################
# Private Methods #
###################
def _verify_student_input(self, student_input, locked, normalizer):
"""If the student's answer is correct, returns the normalized answer.
Otherwise, returns None.
normalizer: a function str -> str that 'normalizes' a student's output into a standardized form
"""
guesses = [student_input]
try:
guesses.append(normalizer(student_input))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess
def _verify(self, guess, locked):
return locking.lock(self.hash_key, guess) == locked
def _input(self, prompt):
"""Retrieves user input from stdin."""
return input(prompt)
def _display_choices(self, choices):
"""Prints a mapping of numbers to choices and returns the
mapping as a dictionary.
"""
print("Choose the number of the correct choice:")
choice_map = {}
for i, choice in enumerate(choices):
i = str(i)
print('{}) {}'.format(i, format.indent(choice,
' ' * (len(i) + 2)).strip()))
choice = format.normalize(choice)
choice_map[i] = choice
return choice_map
def _add_history(self, line):
"""Adds the given line to readline history, only if the line
is non-empty.
"""
if line and HAS_READLINE:
readline.add_history(line)
def unix_time(self, dt):
"""Returns the number of seconds since the UNIX epoch for the given
datetime (dt).
PARAMETERS:
dt -- datetime
"""
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(delta.total_seconds())
protocol = UnlockProtocol
| [
"(line {}){}",
"? "
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~Metaprompt_gpt3.py | import openai
def program_Classifier(prompt, max_tokens=256, stop=None, temperature=0):
response = None
while response is None:
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=max_tokens,
stop=stop,
temperature=temperature
)
except Exception as e:
print(type(e), e)
# if "This model's maximum context length" in type(e):
# response = "null"
if str(type(e)) == "<class 'openai.error.InvalidRequestError'>":
response = "null"
results = []
for choice in response.choices:
text = choice.text.strip()
results.append(text)
return results
def gen_for_gpt3(input, query, OpenAIKey):
openai.api_key = OpenAIKey
input_m = []
for put in input:
input_m.append(put[0])
input_mes = ", ".join(input_m)
prompt = 'A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\n\n' + \
'Figure out what the intent of the following prompt is that the user submitted and suggest a better prompt for what they are trying to do. Use triangle brackets {{}} for templating parts of the prompt that could be substituted. The new prompt should be specific and detailed.\n\n' + \
'PROMPT: Write a short feature description for a website Input: Website_Name\n' + \
'NEW PROMPT: Write a short description of {{Website_Name}} to be used on its homepage. Focus on features such as pricing, user experience, customer suport, etc. Include a call-to-action linking to a signup page.\n\n' + \
'PROMPT:' + query.strip() + " Input: " + input_mes + '\n' + \
'NEW PROMPT:'
first = program_Classifier(prompt=prompt, max_tokens=256, temperature=0.5)
second = program_Classifier(prompt=prompt, max_tokens=256, temperature=0.7)
third = program_Classifier(prompt=prompt, max_tokens=256, temperature=1)
result = [first[0], second[0], third[0]]
print('First: ', first[0])
print('Second: ', second[0])
print('Third: ', third[0])
return result
# query = "According to the number of questions entered, generate math homework based on the math problem. "
# first, second, thrid = gen_for_gpt3(["Number"], query)
| [
"\n",
"PROMPT: Write a short feature description for a website Input: Website_Name\n",
"A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\\n\\n' + \\\n 'Figure out what the intent of the following prompt is that the user submitted and suggest a better prompt for what they are trying to do. Use triangle brackets {{}} for templating parts of the prompt that could be substituted. The new prompt should be specific and detailed.\\n\\n' + \\\n 'PROMPT: Write a short feature description for a website Input: Website_Name\\n' + \\\n 'NEW PROMPT: Write a short description of {{Website_Name}} to be used on its homepage. Focus on features such as pricing, user experience, customer suport, etc. Include a call-to-action linking to a signup page.\\n\\n' + \\\n 'PROMPT:' + query.strip() + \" Input: \" + input_mes + '\\n' + \\\n 'NEW PROMPT:",
"A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\n\n",
"NEW PROMPT:",
" Input: ",
"Figure out what the intent of the following prompt is that the user submitted and suggest a better prompt for what they are trying to do. Use triangle brackets {{}} for templating parts of the prompt that could be substituted. The new prompt should be specific and detailed.\n\n",
"NEW PROMPT: Write a short description of {{Website_Name}} to be used on its homepage. Focus on features such as pricing, user experience, customer suport, etc. Include a call-to-action linking to a signup page.\n\n"
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~decompose.py | import time
import openai
def gpt3(prompt, t, max_tokens):
try:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=t,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=""
)
return response["choices"][0]["text"]
except Exception as e:
print(type(e), e)
time.sleep(3)
decompose_template = """A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.
In order for a large language model to better complete tasks or generate outputs, task requirements need to be decomposed.
The decomposed subtasks can interact with a large language model like a human thought chain, reflecting data flow and control flow.
<Task Description>
I want to build a chatbot that will stop the conversation until someone says GoodBye.
</Task Description>
<Decomposed Subtasks>
<Control While User_Input not equal to GoodBye>
(Subtask1 Input: Chat_History User_Input Bot_Response Output: Bot_Response Model LLM)
Combine chat history, user input, and bot response to get a new chat history.
(Subtask2 Input: Chat_History User_Input Output: Bot_Response Model LLM)
Keep the conversation going and prompt the user for more input
</Control>
</Decomposed Subtasks>
<Task Description>
I need to develop a function to obtain the weather conditions of the day according to the weather API and automatically draw 500x500 pixel RGB color paintings that meet the weather conditions, draw abstract paintings when the weather is rainy, and draw natural landscape paintings when the weather is sunny, so as to improve the user experience and entertainment.
</Task Description>
<Decomposed Subtasks >
(Subtask1 Input: None Output: Weather_Data Model OpenWeatherMap)
obtain weather conditions for the day
<Control If Weather equal to rainy>
(Subtask2 Input: Weather_Data Output: Painting_Description Model LLM)
generate descriptions of abstract paintings through weather information.
</Control>
<Control If Weather equal to sunny>
(Subtask3 Input: Weather_Data Output: Painting_Description Model LLM)
generate natural landscape descriptions of abstract paintings through weather information.
</Control>
(Subtask4 Input: Painting_Description Output: Painting; Model Image-generation-model)
Generate 500x500 pixel paintings according to Painting_Description.
</Decomposed Subtasks>
<Task Description>
{{Description}}
</Task Description>
Note: Output cannot have the same name
"""
decompose_template1 = """<Requirement Description>
I need to develop a function to obtain the weather conditions of the day according to the weather API and automatically draw 500x500 pixel RGB color paintings that meet the weather conditions, draw abstract paintings when the weather is rainy, and draw natural landscape paintings when the weather is sunny, so as to improve the user experience and entertainment.
</Requirement Description>
<Decomposed steps>
To achieve this function, you can follow the following steps to analyze and design the process:
(Step1 Input: None Output: API_Interface_Selection)
First, you need to choose an API interface used to obtain weather information. You can select some open weather APIs, such as OpenWeatherMap, AccuWeather, etc.
(Step2 Input: API_Interface_Selection Output: API_Key)
After selecting the API interface, you need to configure the API key to access the API. Generally, the API provider will provide a key, which can be found in the API documentation.
(Step3 Input: API_Key Output: Weather_Data)
Use the API key to access the API to get the weather data of the day. The data format is usually JSON or XML.
(Step4 Input: Weather_Data Output: Parsed_Data_Structure)
Parse the obtained JSON or XML format data into easy-to-handle data structures, such as dictionaries or objects.
(Step5 Input: Parsed_Data_Structure Output: Weather_Type)
Judge the weather type of the day according to the description information in the weather data. It can be classified according to weather conditions, such as sunny, cloudy and rainy days.
(Step6 Input: Weather_Type Output: RGB_Color_Value)
In order to generate paintings that meet weather conditions, you need to map the weather type to the corresponding RGB color value. You can map sunny days to blue tones, rainy days to gray tones, and snowy days to white tones.
(Step7 Input: Weather_Type&RGB_Color_Value Output: Painting)
Generate 500x500 pixel paintings according to weather type and corresponding RGB color values. For rainy days, you can generate abstract paintings, and for sunny days, you can generate natural landscape paintings.
(Step8 Input: Painting Output: Display_Painting)
Display the generated paintings to users to improve user experience and entertainment.
</Decomposed steps>
<Requirement Description>
{{Description}}
</Requirement Description>
<Decomposed steps>
"""
def decompose(query):
decomposed_steps = gpt3(decompose_template.replace("{{Description}}", query), 0, 2048)
return decomposed_steps
def Generasteps(query , OpenAIKey):
openai.api_key = OpenAIKey
# query = "I need to develop a function that allows users to search for nearby restaurants and hotels according to their current location and display the search results on the map."
steps = decompose(query).split("\n\n")[1:]
stepsJson = {}
for j,step in enumerate(steps):
if '(Subtask' in step:
temp = step.split("\n")
inp = temp[0].split(" ")[2].split("&")
newinp = [[i, 'no'] for i in inp if i != 'None']
# for i in inp:
# newinp.append([i,'no'])
oup = temp[0].split(" ")[4]
mod = temp[0].split(" ")[6][:-1]
content = temp[1]
js = {"content": content, "input": newinp, "output": [oup, 'no'], "model": mod}
name = 'step' + str(j)
stepsJson[name] = js
return stepsJson
query = "I need to develop a function that allows users to search for nearby restaurants and hotels according to their current location and display the search results on the map."
query1 = "I need an automatic problem maker that can generate multiple choice math questions based on the difficulty and number of questions entered by the user."
pat = {"content": "How are you", "input": ["history", "chatbot"],"prompt":["prompt1","prompt2"] ,"output": "human", "model": "LLM"}
| [
"How are you",
"<Requirement Description>\nI need to develop a function to obtain the weather conditions of the day according to the weather API and automatically draw 500x500 pixel RGB color paintings that meet the weather conditions, draw abstract paintings when the weather is rainy, and draw natural landscape paintings when the weather is sunny, so as to improve the user experience and entertainment.\n</Requirement Description>\n\n<Decomposed steps>\nTo achieve this function, you can follow the following steps to analyze and design the process:\n\n(Step1 Input: None Output: API_Interface_Selection)\nFirst, you need to choose an API interface used to obtain weather information. You can select some open weather APIs, such as OpenWeatherMap, AccuWeather, etc.\n\n(Step2 Input: API_Interface_Selection Output: API_Key)\nAfter selecting the API interface, you need to configure the API key to access the API. Generally, the API provider will provide a key, which can be found in the API documentation.\n\n(Step3 Input: API_Key Output: Weather_Data)\nUse the API key to access the API to get the weather data of the day. The data format is usually JSON or XML.\n\n(Step4 Input: Weather_Data Output: Parsed_Data_Structure)\nParse the obtained JSON or XML format data into easy-to-handle data structures, such as dictionaries or objects.\n\n(Step5 Input: Parsed_Data_Structure Output: Weather_Type)\nJudge the weather type of the day according to the description information in the weather data. It can be classified according to weather conditions, such as sunny, cloudy and rainy days.\n\n(Step6 Input: Weather_Type Output: RGB_Color_Value)\nIn order to generate paintings that meet weather conditions, you need to map the weather type to the corresponding RGB color value. You can map sunny days to blue tones, rainy days to gray tones, and snowy days to white tones.\n\n(Step7 Input: Weather_Type&RGB_Color_Value Output: Painting)\nGenerate 500x500 pixel paintings according to weather type and corresponding RGB color values. For rainy days, you can generate abstract paintings, and for sunny days, you can generate natural landscape paintings.\n\n(Step8 Input: Painting Output: Display_Painting)\nDisplay the generated paintings to users to improve user experience and entertainment.\n</Decomposed steps>\n\n<Requirement Description>\n{{Description}}\n</Requirement Description>\n\n<Decomposed steps>\n",
"A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output. \n \nIn order for a large language model to better complete tasks or generate outputs, task requirements need to be decomposed. \n \nThe decomposed subtasks can interact with a large language model like a human thought chain, reflecting data flow and control flow.\n \n \n<Task Description>\nI want to build a chatbot that will stop the conversation until someone says GoodBye.\n</Task Description>\n \n<Decomposed Subtasks>\n\n<Control While User_Input not equal to GoodBye>\n\n(Subtask1 Input: Chat_History User_Input Bot_Response Output: Bot_Response Model LLM)\nCombine chat history, user input, and bot response to get a new chat history.\n \n(Subtask2 Input: Chat_History User_Input Output: Bot_Response Model LLM)\nKeep the conversation going and prompt the user for more input\n\n</Control>\n</Decomposed Subtasks>\n \n \n<Task Description>\nI need to develop a function to obtain the weather conditions of the day according to the weather API and automatically draw 500x500 pixel RGB color paintings that meet the weather conditions, draw abstract paintings when the weather is rainy, and draw natural landscape paintings when the weather is sunny, so as to improve the user experience and entertainment.\n</Task Description>\n \n<Decomposed Subtasks >\n\n(Subtask1 Input: None Output: Weather_Data Model OpenWeatherMap)\nobtain weather conditions for the day\n \n<Control If Weather equal to rainy>\n\n(Subtask2 Input: Weather_Data Output: Painting_Description Model LLM)\ngenerate descriptions of abstract paintings through weather information.\n\n</Control>\n \n<Control If Weather equal to sunny>\n\n(Subtask3 Input: Weather_Data Output: Painting_Description Model LLM)\ngenerate natural landscape descriptions of abstract paintings through weather information.\n\n</Control>\n \n(Subtask4 Input: Painting_Description Output: Painting; Model Image-generation-model)\nGenerate 500x500 pixel paintings according to Painting_Description.\n\n</Decomposed Subtasks>\n \n \n<Task Description>\n{{Description}}\n</Task Description>\nNote: Output cannot have the same name\n"
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~deployproInfor.py | import openai
# openai.api_key = "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"
def program_Generate(prompt, num_candidates=1, max_tokens=256, stop=None, temperature=0):
results = []
try:
response = openai.Completion.create(
prompt=prompt,
model="text-davinci-003",
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
n=num_candidates
)
return response["choices"][0]["text"]
except Exception as e:
print(type(e), e)
if str(type(e)) == "<class 'openai.error.InvalidRequestError'>":
response = "null"
return results
question_prompt = """A developer is crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output as an AI service.
Here's a Prompt about the small AI service.
Please understand the task completed by Prompt, and then write a pre-message to the task to remind the user to use the service.
This pre-information should include a description of the AI service and what the user needs to input for the first time, and written in the first person
Prompts: {{Prompts}}
pre-information:
"""
def generate_deploypreInfor(query, OpenAIKey):
openai.api_key = OpenAIKey
# TODO figure out alternative stopping criterion for generating initial characters?
question_prompt1 = question_prompt.replace("{{Prompts}}", query)
if len(question_prompt1) >=2000 :
question_prompt1 = question_prompt1[0:2000]
expansion = program_Generate(prompt=question_prompt1, temperature=0.7, max_tokens=512, num_candidates=1)
return expansion
| [
"A developer is crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output as an AI service.\nHere's a Prompt about the small AI service. \nPlease understand the task completed by Prompt, and then write a pre-message to the task to remind the user to use the service. \nThis pre-information should include a description of the AI service and what the user needs to input for the first time, and written in the first person\nPrompts: {{Prompts}}\npre-information:\n",
"{{Prompts}}"
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~exploration_view.py | import openai
import json
class exploration():
def __init__(self, OpenaiKey):
openai.api_key = OpenaiKey
self.prompt = [{"role": "system", "content": "Are you ready?"}, {"role": "assistant", "content": "Yes, I am always ready to assist you to the best of my abilities. Just let me know how I can help you."},]
self.context = """
I would like you to act as a project manager. I have given you the conversation between user and assistant.
According to the conversation above, please summarize the key information.
You can only refer to the given conversation but not add extra information.
Do not pay too much attention to what the user is trying the system, but have high level abstraction for system design.
You should summarize it from three aspects:
1. Illustrate the key requirements of the user?
2. Desctibe the user's preference? For example, what the user like and what the user dislike. What should you have to do to satisfy the user's requirement and what you have not to do.
3. List the points that you have to pay attention to when implementing the system exlicitly.
You have to output the three aspects in the form of 1. Key Requirements:, 2. User Preference:, 3. Implementing Consideration:.You are expected to bullet list the key points for each aspects.
"""
# self.conversation_file = conversation_file_name
def chatbot(self):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.prompt
)
return response["choices"][0]["message"]
def pre_design_view(self):
self.prompt = self.prompt + [
{"role": "system", "content": self.context},
]
# Summarize conversation and extract user requirements
return self.chatbot()["content"]
| [
"Are you ready?",
"Yes, I am always ready to assist you to the best of my abilities. Just let me know how I can help you."
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~pychain~sapperchain.py | import openai
import LLMConfigurator
import re
import json
import os
from io import StringIO
import sys
from typing import Optional, Dict
def run_python_code(command: str, _globals: Optional[Dict] = None, _locals: Optional[Dict] = None) -> str:
_globals = _globals if _globals is not None else {}
_locals = _locals if _locals is not None else {}
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, _globals, _locals)
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = str(e)
return output
class sapperchain:
def __init__(self, OpenaiKey):
openai.api_key = OpenaiKey
def promptbase(self,prompt_template):
self.prompt_template = json.loads(prompt_template)
def worker(self, prompt, preunits, model):
if(model["engine"].replace(" ","") == "PythonREPL"):
return self.run_PythonREPL(prompt, preunits, model)
else:
return self.run_Function(prompt, preunits, model)
def getPromptParams(self,prompt_template):
paraNames = []
if re.search(r"{{\w+}}", prompt_template):
paraNames = re.findall(r"{{.*}}", prompt_template)
for i in range(len(paraNames)):
paraNames[i] = paraNames[i][2:-2]
return paraNames
def run_Function(self, promptvalue, prenunits ,model):
ready_prompt = ""
for value in self.prompt_template[promptvalue]:
ready_prompt += value[1] + "\n"
para_name = self.getPromptParams(ready_prompt)
for index, key in enumerate(para_name):
ready_prompt = ready_prompt.replace("{{%s}}" % key, prenunits[index])
Config = LLMConfigurator.Config()
Config.add_to_config("prompt", ready_prompt)
if (model["engine"].replace(" ", "") == "DALL-E"):
response = openai.Image.create(
prompt=ready_prompt,
n=1,
size="512x512",
)
image_url = response['data'][0]['url']
return image_url
if (model["engine"].replace(" ", "") == "gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": ready_prompt}
]
)
output = response.choices[0].message["content"]
return output
for key in model:
Config.add_to_config(key, model[key])
response = openai.Completion.create(
engine=Config.engine.replace(" ", ""),
prompt=Config.prompt,
temperature=float(Config.temperature),
max_tokens=int(Config.max_tokens),
top_p=float(Config.top_p),
frequency_penalty=float(Config.frequency_penalty),
presence_penalty=float(Config.presence_penalty),
stop=Config.stop_strs
)
output = response["choices"][0]["text"]
return output
def run_PythonREPL(self,promptvalue, prenunits, model):
ready_prompt = ""
for value in self.prompt_template[promptvalue]:
ready_prompt += value[1] + "\n"
para_name = self.getPromptParams(ready_prompt)
for index, key in enumerate(para_name):
ready_prompt = ready_prompt.replace("{{%s}}" % key, prenunits[index])
output = run_python_code(ready_prompt)
return output
| [
"PLACEHOLDER\n"
] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~run_prompt.py | import openai
import LLMConfigurator
import re
import os
from io import StringIO
import sys
from typing import Optional, Dict
def run_python_code(command: str, _globals: Optional[Dict] = None, _locals: Optional[Dict] = None) -> str:
_globals = _globals if _globals is not None else {}
_locals = _locals if _locals is not None else {}
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, _globals, _locals)
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = str(e)
return output
def getPromptParams(prompt_template):
paraNames = []
if re.search(r"{{\w+}}", prompt_template):
paraNames = re.findall(r"{{.*}}", prompt_template)
for i in range(len(paraNames)):
paraNames[i] = paraNames[i][2:-2]
return paraNames
def run_Function(promptvalue, prenunits ,model,OpenAIKey,debugvalue):
# ready_prompt = open("../PromptTemplate/"+prompt_name+"_Prompt.txt", "r").read()
ready_prompt = promptvalue
para_name = getPromptParams(promptvalue)
for index, key in enumerate(para_name):
try:
ready_prompt = ready_prompt.replace("{{%s}}" % key, prenunits[index])
except Exception as e:
e = "The number of preworkers is different from the number of parameters in the prompt."
return {"error": e, 'type': 'text'}
Config = LLMConfigurator.Config()
openai.api_key = OpenAIKey
if debugvalue != "":
ready_prompt = debugvalue
Config.add_to_config("prompt", ready_prompt)
print(ready_prompt)
try:
if(model["engine"].replace(" ", "") == "DALL-E"):
response = openai.Image.create(
prompt=ready_prompt,
n=1,
size="512x512",
)
image_url = response['data'][0]['url']
return {'message': image_url, 'type': 'image'}
for key in model:
Config.add_to_config(key, model[key])
if(model["engine"].replace(" ","")=="gpt-3.5-turbo"):
# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
response=openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": ready_prompt}
],
temperature=float(Config.temperature),
max_tokens=int(Config.max_tokens),
top_p=float(Config.top_p),
frequency_penalty=float(Config.frequency_penalty),
presence_penalty=float(Config.presence_penalty),
)
output = response.choices[0].message["content"]
return {'message': output, 'type': 'text'}
response = openai.Completion.create(
engine=Config.engine.replace(" ", ""),
prompt=Config.prompt,
temperature=float(Config.temperature),
max_tokens=int(Config.max_tokens),
top_p=float(Config.top_p),
frequency_penalty=float(Config.frequency_penalty),
presence_penalty=float(Config.presence_penalty),
stop=Config.stop_strs
)
output = response["choices"][0]["text"]
# 只选取第一个生成的内容
output = output.split("===================")[0]
# 删除空值之后的内容
output = output.split("\n")
# delete the empty string in the list
output = [i for i in output if i != '']
output = "\n".join(output)
return {'message': output, 'type': 'text'}
except Exception as e:
return {"error": str(e), 'type': 'text'}
def run_PythonREPL(promptvalue, prenunits ,model,debugvalue):
ready_prompt = promptvalue
para_name = getPromptParams(promptvalue)
for index, key in enumerate(para_name):
ready_prompt = ready_prompt.replace("{{%s}}" % key, prenunits[index])
if debugvalue != "":
ready_prompt = debugvalue
print(ready_prompt)
try:
output = run_python_code(ready_prompt)
return {'message': output, 'type': 'text'}
except Exception as e:
return {'error': str(e), 'type': 'text'}
| [] |
2024-01-10 | AI4FutureSE/Sapper-IDE | Sapper-IDE~clarify.py | import openai
def program_Generate(prompt, num_candidates=1, max_tokens=256, stop=None, temperature=0):
results = []
try:
response = openai.Completion.create(
prompt=prompt,
model="text-davinci-003",
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
n=num_candidates
)
for choice in response.choices:
text = choice.text.strip()
print(text)
results.append(text.split(". You")[0])
except Exception as e:
print(type(e), e)
if str(type(e)) == "<class 'openai.error.InvalidRequestError'>":
response = "null"
return results
question_prompt = """
Context: A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.
Instruction: In order for a large language model to better complete tasks or generate outputs, need to ask a question about the task and let users reply.
The questions asked can be combined with the previously obtained Key Requirements, User Preference, and Implementing Consideration to make users more clear about their needs.
{{User_Behaviour}}
The questions asked need to lead users to clarification of requirements and conform to strategies for interacting with LLM.
Functional requirement: I want to develop a service that automatically draws according to the weather.
Requirement guidance: You need to consider what goes into the design. For example, which colors to use for painting, canvas size, canvas type, etc.
Answer: Draw 500x500 pixel RGB color pictures.
Requirement guidance: You need to specify some conditions. For example, paint automatically only on rainy or sunny days.
Answer: Abstract pictures when the weather is rainy and nature landscapes when the weather is sunny.
Please give requirement guidance for the following functional requirement based on the above form.
"""
question_prompt2 = """
Context: A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.
You are a product manager AI tasked with focusing on users' requirements and understanding them through deep communication.
You need to provide a question about the users' requirements and let users reply.
The question raised can be referred to the user's task notes:
{{User_Behaviour}}
The question should be well in line with user requirements and usage scenarios.
Functional requirement: I want to develop a service that automatically draws according to the weather.
Requirement guidance: You need to consider what goes into the design. For example, which colors to use for painting, canvas size, canvas type, etc.
Answer: Draw 500x500 pixel RGB color pictures.
Requirement guidance: You need to specify some conditions. For example, paint automatically only on rainy or sunny days.
Answer: Abstract pictures when the weather is rainy and nature landscapes when the weather is sunny.
Please give requirement guidance for the following functional requirement based on the above form.
"""
def generate_query_expansion(Behaviour, query, OpenAIKey):
openai.api_key = OpenAIKey
# TODO figure out alternative stopping criterion for generating initial characters?
question_prompt1 = question_prompt + query + 'Requirement guidance:'
question_prompt1 = question_prompt1.replace("{{User_Behaviour}}", Behaviour)
expansion = program_Generate(prompt=question_prompt1, temperature=0.3, max_tokens=48, num_candidates=1, stop='\n')[0]
if "Requirement guidance:" in query:
query = query + "\nWrite the Functional requirement in detail according to the Requirement guidance and Answer above\nFunctional Requirement:"
expansion1 = program_Generate(prompt=query, num_candidates=1, max_tokens=256, temperature=0.3)[0]
else:
expansion1 = query.replace("Functional requirement:", "")
return expansion, expansion1
| [
"{{User_Behaviour}}",
"\nContext: A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\nInstruction: In order for a large language model to better complete tasks or generate outputs, need to ask a question about the task and let users reply.\nThe questions asked can be combined with the previously obtained Key Requirements, User Preference, and Implementing Consideration to make users more clear about their needs.\n{{User_Behaviour}}\nThe questions asked need to lead users to clarification of requirements and conform to strategies for interacting with LLM. \n\nFunctional requirement: I want to develop a service that automatically draws according to the weather.\nRequirement guidance: You need to consider what goes into the design. For example, which colors to use for painting, canvas size, canvas type, etc.\nAnswer: Draw 500x500 pixel RGB color pictures.\nRequirement guidance: You need to specify some conditions. For example, paint automatically only on rainy or sunny days.\nAnswer: Abstract pictures when the weather is rainy and nature landscapes when the weather is sunny.\nPlease give requirement guidance for the following functional requirement based on the above form.\nqueryaaaad8fc-9b63-4969-88bc-49663f6c3fd8\nWrite the Functional requirement in detail according to the Requirement guidance and Answer above\nFunctional Requirement:Requirement guidance:",
"\nContext: A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\nYou are a product manager AI tasked with focusing on users' requirements and understanding them through deep communication.\nYou need to provide a question about the users' requirements and let users reply.\nThe question raised can be referred to the user's task notes:\n{{User_Behaviour}}\nThe question should be well in line with user requirements and usage scenarios.\n\nFunctional requirement: I want to develop a service that automatically draws according to the weather.\nRequirement guidance: You need to consider what goes into the design. For example, which colors to use for painting, canvas size, canvas type, etc.\nAnswer: Draw 500x500 pixel RGB color pictures.\nRequirement guidance: You need to specify some conditions. For example, paint automatically only on rainy or sunny days.\nAnswer: Abstract pictures when the weather is rainy and nature landscapes when the weather is sunny.\nPlease give requirement guidance for the following functional requirement based on the above form.\n",
"\nContext: A user is interacting with a large language model. They are crafting prompts and giving them to the LLM in order to get the model to complete a task or generate output.\nInstruction: In order for a large language model to better complete tasks or generate outputs, need to ask a question about the task and let users reply.\nThe questions asked can be combined with the previously obtained Key Requirements, User Preference, and Implementing Consideration to make users more clear about their needs.\n{{User_Behaviour}}\nThe questions asked need to lead users to clarification of requirements and conform to strategies for interacting with LLM. \n\nFunctional requirement: I want to develop a service that automatically draws according to the weather.\nRequirement guidance: You need to consider what goes into the design. For example, which colors to use for painting, canvas size, canvas type, etc.\nAnswer: Draw 500x500 pixel RGB color pictures.\nRequirement guidance: You need to specify some conditions. For example, paint automatically only on rainy or sunny days.\nAnswer: Abstract pictures when the weather is rainy and nature landscapes when the weather is sunny.\nPlease give requirement guidance for the following functional requirement based on the above form.\n"
] |
2024-01-10 | Audio-AGI/WavJourney | pipeline.py | import datetime
import os
from string import Template
import openai
import re
import glob
import pickle
import time
import json5
from retrying import retry
from code_generator import check_json_script, collect_and_check_audio_data
import random
import string
import utils
import voice_presets
from code_generator import AudioCodeGenerator
# Enable this for debugging
USE_OPENAI_CACHE = False
openai_cache = []
if USE_OPENAI_CACHE:
os.makedirs('cache', exist_ok=True)
for cache_file in glob.glob('cache/*.pkl'):
with open(cache_file, 'rb') as file:
openai_cache.append(pickle.load(file))
def chat_with_gpt(prompt, api_key):
if USE_OPENAI_CACHE:
filtered_object = list(filter(lambda x: x['prompt'] == prompt, openai_cache))
if len(filtered_object) > 0:
response = filtered_object[0]['response']
return response
try:
openai.api_key = api_key
chat = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": prompt
}
]
)
finally:
openai.api_key = ''
if USE_OPENAI_CACHE:
cache_obj = {
'prompt': prompt,
'response': chat['choices'][0]['message']['content']
}
with open(f'cache/{time.time()}.pkl', 'wb') as _openai_cache:
pickle.dump(cache_obj, _openai_cache)
openai_cache.append(cache_obj)
return chat['choices'][0]['message']['content']
def get_file_content(filename):
with open(filename, 'r') as file:
return file.read().strip()
def write_to_file(filename, content):
with open(filename, 'w') as file:
file.write(content)
def extract_substring_with_quotes(input_string, quotes="'''"):
pattern = f"{quotes}(.*?){quotes}"
matches = re.findall(pattern, input_string, re.DOTALL)
return matches
def try_extract_content_from_quotes(content):
if "'''" in content:
return extract_substring_with_quotes(content)[0]
elif "```" in content:
return extract_substring_with_quotes(content, quotes="```")[0]
else:
return content
def maybe_get_content_from_file(content_or_filename):
if os.path.exists(content_or_filename):
with open(content_or_filename, 'r') as file:
return file.read().strip()
return content_or_filename
# Pipeline Interface Guidelines:
#
# Init calls:
# - Init calls must be called before running the actual steps
# - init_session() is called every time a gradio webpage is loaded
#
# Single Step:
# - takes input (file or content) and output path as input
# - most of time just returns output content
#
# Compositional Step:
# - takes session_id as input (you have session_id, you have all the paths)
# - run a series of steps
# This is called for every new gradio webpage
def init_session(session_id=''):
def uid8():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))
if session_id == '':
session_id = f'{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}_{uid8()}'
# create the paths
os.makedirs(utils.get_session_voice_preset_path(session_id))
os.makedirs(utils.get_session_audio_path(session_id))
print(f'New session created, session_id={session_id}')
return session_id
@retry(stop_max_attempt_number=3)
def input_text_to_json_script_with_retry(complete_prompt_path, api_key):
print(" trying ...")
complete_prompt = get_file_content(complete_prompt_path)
json_response = try_extract_content_from_quotes(chat_with_gpt(complete_prompt, api_key))
json_data = json5.loads(json_response)
try:
check_json_script(json_data)
collect_and_check_audio_data(json_data)
except Exception as err:
print(f'JSON ERROR: {err}')
retry_complete_prompt = f'{complete_prompt}\n```\n{json_response}```\nThe script above has format error(s). Return the fixed script.\n\nScript:\n'
write_to_file(complete_prompt_path, retry_complete_prompt)
raise err
return json_response
# Step 1: input_text to json
def input_text_to_json_script(input_text, output_path, api_key):
input_text = maybe_get_content_from_file(input_text)
text_to_audio_script_prompt = get_file_content('prompts/text_to_json.prompt')
prompt = f'{text_to_audio_script_prompt}\n\nInput text: {input_text}\n\nScript:\n'
complete_prompt_path = output_path / 'complete_input_text_to_audio_script.prompt'
write_to_file(complete_prompt_path, prompt)
audio_script_response = input_text_to_json_script_with_retry(complete_prompt_path, api_key)
generated_audio_script_filename = output_path / 'audio_script.json'
write_to_file(generated_audio_script_filename, audio_script_response)
return audio_script_response
# Step 2: json to char-voice map
def json_script_to_char_voice_map(json_script, voices, output_path, api_key):
json_script_content = maybe_get_content_from_file(json_script)
prompt = get_file_content('prompts/audio_script_to_character_voice_map.prompt')
presets_str = '\n'.join(f"{preset['id']}: {preset['desc']}" for preset in voices.values())
prompt = Template(prompt).substitute(voice_and_desc=presets_str)
prompt = f"{prompt}\n\nAudio script:\n'''\n{json_script_content}\n'''\n\noutput:\n"
write_to_file(output_path / 'complete_audio_script_to_char_voice_map.prompt', prompt)
char_voice_map_response = try_extract_content_from_quotes(chat_with_gpt(prompt, api_key))
char_voice_map = json5.loads(char_voice_map_response)
# enrich char_voice_map with voice preset metadata
complete_char_voice_map = {c: voices[char_voice_map[c]] for c in char_voice_map}
char_voice_map_filename = output_path / 'character_voice_map.json'
write_to_file(char_voice_map_filename, json5.dumps(complete_char_voice_map))
return complete_char_voice_map
# Step 3: json to py code
def json_script_and_char_voice_map_to_audio_gen_code(json_script_filename, char_voice_map_filename, output_path, result_filename):
audio_code_generator = AudioCodeGenerator()
code = audio_code_generator.parse_and_generate(
json_script_filename,
char_voice_map_filename,
output_path,
result_filename
)
write_to_file(output_path / 'audio_generation.py', code)
# Step 4: py code to final wav
def audio_code_gen_to_result(audio_gen_code_path):
audio_gen_code_filename = audio_gen_code_path / 'audio_generation.py'
os.system(f'PYTHONPATH=. python {audio_gen_code_filename}')
# Function call used by Gradio: input_text to json
def generate_json_file(session_id, input_text, api_key):
output_path = utils.get_session_path(session_id)
# Step 1
print(f'session_id={session_id}, Step 1: Writing audio script with LLM ...')
return input_text_to_json_script(input_text, output_path, api_key)
# Function call used by Gradio: json to result wav
def generate_audio(session_id, json_script, api_key):
def count_lines(content):
# Split the string using the newline character and count the non-empty lines
return sum(1 for line in content.split('\n') if line.strip())
max_lines = utils.get_max_script_lines()
if count_lines(json_script) > max_lines:
raise ValueError(f'The number of lines of the JSON script has exceeded {max_lines}!')
output_path = utils.get_session_path(session_id)
output_audio_path = utils.get_session_audio_path(session_id)
voices = voice_presets.get_merged_voice_presets(session_id)
# Step 2
print(f'session_id={session_id}, Step 2: Parsing character voice with LLM...')
char_voice_map = json_script_to_char_voice_map(json_script, voices, output_path, api_key)
# Step 3
json_script_filename = output_path / 'audio_script.json'
char_voice_map_filename = output_path / 'character_voice_map.json'
result_wav_basename = f'res_{session_id}'
print(f'session_id={session_id}, Step 3: Compiling audio script to Python program ...')
json_script_and_char_voice_map_to_audio_gen_code(json_script_filename, char_voice_map_filename, output_path, result_wav_basename)
# Step 4
print(f'session_id={session_id}, Step 4: Start running Python program ...')
audio_code_gen_to_result(output_path)
result_wav_filename = output_audio_path / f'{result_wav_basename}.wav'
print(f'Done all processes, result: {result_wav_filename}')
return result_wav_filename, char_voice_map
# Convenient function call used by wavjourney_cli
def full_steps(session_id, input_text, api_key):
json_script = generate_json_file(session_id, input_text, api_key)
return generate_audio(session_id, json_script, api_key)
| [
"PLACEHOLDER\n\nInput text: PLACEHOLDER\n\nScript:\n",
"PLACEHOLDER\n\nAudio script:\n'''\nPLACEHOLDER\n'''\n\noutput:\n",
"prompts/text_to_json.prompt",
"You are a helpful assistant.",
"prompts/audio_script_to_character_voice_map.prompt",
"complete_input_text_to_audio_script.prompt",
"PLACEHOLDER\n```\nPLACEHOLDER```\nThe script above has format error(s). Return the fixed script.\n\nScript:\n"
] |
2024-01-10 | rit-git/kankan | kankan~backend.py | import sys
sys.path.append('.')
from fastapi import FastAPI
from pydantic import BaseModel
from array import array
import uvicorn
import os
import heapq
import csv
import openai
from openai.embeddings_utils import get_embedding
from vecscan.vecscan import PyVectorScanner
from genre import genre_all, genre_food, STR_GENRE_ALL, STR_GENRE_NO_FOOD
class KankanRequest(BaseModel):
tdfk: str
query: str
genre: str
class KankanAPI(FastAPI):
def __init__(self):
super().__init__()
openai.api_key=os.environ["OPENAI_API_KEY"]
self.scanners = {}
self.raw_data = {}
self.genre_data = {}
self.vec_file_path_base = '/home/01052711/kankan/dataset/sentvecs.ada.kuchikomi_report.vec'
self.raw_file_path_base = '/home/01052711/kankan/dataset/jalan_kanko.csv'
self.genre_file_path_base = '/home/01052711/kankan/dataset/jalan_kanko.genre'
self.n_best_hotel = 10
self.n_best_kuchikomi = 10
self.post("/api/search")(self.search)
def search(self, req: KankanRequest):
if req.tdfk not in self.scanners:
self.scanners[req.tdfk] = PyVectorScanner(f'{self.vec_file_path_base}.{req.tdfk}', '')
self.raw_data[req.tdfk] = []
for line_idx, line in enumerate(open(f'{self.raw_file_path_base}.{req.tdfk}.csv')):
if line_idx > 0:
self.raw_data[req.tdfk].append(line.strip())
for row in csv.DictReader(open(f'{self.genre_file_path_base}.{req.tdfk}.csv')):
self.genre_data[row['odk_id']] = row['genre']
genre_accept = set()
if req.genre == STR_GENRE_ALL:
genre_accept = genre_all
elif req.genre == STR_GENRE_NO_FOOD:
genre_accept = genre_all - genre_food
else:
genre_accept.add(req.genre)
scan_results = self.scanners[req.tdfk].n_best_vecs(
query_vec=array('f', get_embedding(req.query, engine='text-embedding-ada-002')),
n_best=100000,
openmp=True,
debug=True,
threshold=0.50
)[0]
spot_result = {}
full_odk_set = set()
scan_results_len = scan_results.size()
for i in range(scan_results_len):
idx, score = scan_results.get(i)
fields = self.raw_data[req.tdfk][idx].strip().split(',')
kuchikomi_id, odk_id, other = fields[0], fields[1], ','.join(fields[2:])
if self.genre_data[odk_id] not in genre_accept:
continue
if odk_id not in spot_result:
spot_result[odk_id] = {'score': 0.0, 'kuchikomi': [], 'cnt': 0, 'genre': self.genre_data[odk_id]}
if spot_result[odk_id]['cnt'] < self.n_best_kuchikomi:
spot_result[odk_id]['kuchikomi'].append((score, other))
spot_result[odk_id]['score'] += score
if spot_result[odk_id]['cnt'] > 0:
spot_result[odk_id]['score'] /= 2
spot_result[odk_id]['cnt'] += 1
if spot_result[odk_id]['cnt'] == self.n_best_kuchikomi:
if True: # len(full_odk_set) < self.n_best_hotel:
full_odk_set.add(odk_id)
if len(full_odk_set) >= self.n_best_hotel:
result_list = [spot_result[k] for k in full_odk_set]
minor_list = [spot_result[k] for k in (set(spot_result.keys()) - full_odk_set)]
else:
result_list = spot_result.values()
minor_list = []
result_list = heapq.nlargest(self.n_best_hotel, result_list, key=lambda x: x['score'])
minor_list = heapq.nlargest(self.n_best_hotel, minor_list, key=lambda x: x['score'])
ret = {'hotel': [], 'hotel_minor': []}
for __hotel_key, __hotel_result in zip(['hotel', 'hotel_minor'], [result_list, minor_list]):
for spot_rank, kuchikomi_list in enumerate(__hotel_result):
ret[__hotel_key].append(
{
'rank': spot_rank + 1,
'score': kuchikomi_list['score'],
'genre': kuchikomi_list['genre'],
'kuchikomi': []
}
)
for kuchikomi in kuchikomi_list['kuchikomi']:
fields = kuchikomi[1].split(',')
ret[__hotel_key][-1]['kuchikomi'].append(
{
'score': kuchikomi[0],
'rate': fields[0],
'title': fields[1],
'text': ','.join(fields[2:-5]),
'date': fields[-5] + '/' + fields[-4],
'name': fields[-3],
'address': fields[-2],
'ybn': fields[-1]
}
)
return ret
def main():
app = KankanAPI()
uvicorn.run(
app,
port=21344,
root_path='/app/kankan'
)
if __name__ == '__main__':
main() | [] |
2024-01-10 | siva-nagendra/ai_toolkit | helpers~train_custom_dataset.py | import os
import time
import logging
from . import data_visualizer as dv
from concurrent.futures import ThreadPoolExecutor
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from typing import List, Tuple
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
DATASET_PATH = os.environ["DATASET_PATH"]
DB_FAISS_PATH = os.environ["DB_FAISS_PATH"]
sentence_transformer_model = "all-MiniLM-L6-v2"
start_time = time.time()
doc_count = []
times = []
# file types that you want to ingest, including all coding languages
extensions = ['.py', '.java', '.js', '.ts', '.md', '.cpp', '.c', '.cs', '.go', '.rs', '.php', '.html', '.css', '.xml', '.json', '.yaml', '.yml', '.sh', '.rst', '.sql', '.rb', '.pl', '.swift', '.m', '.mm', '.kt', '.gradle', '.groovy', '.scala', '.clj', '.cljs', '.cljc', '.edn', '.lua', '.coffee', 'pdf']
documents = []
def load_file(index: int, file: str) -> List[str]:
"""
Loads and splits the content of a file.
Args:
index (int): The index of the file being processed.
file (str): The path to the file.
Returns:
List[str]: A list of strings representing the split content of the file.
"""
try:
loader = TextLoader(file, encoding='utf-8')
return loader.load_and_split()
except RuntimeError as e:
if isinstance(e.__cause__, UnicodeDecodeError):
try:
loader = TextLoader(file, encoding='ISO-8859-1')
return loader.load_and_split()
except Exception as e_inner:
logging.exception(f"Failed to load {file} due to error: {e_inner}, Traceback: {e_inner.__traceback__}")
else:
logging.exception(f"Failed to load {file} due to an unexpected error: {e}")
if (index + 1) % 100 == 0:
logging.info(f"Processed {index + 1} documents.")
return []
def run_fast_scandir(dir: str, ext: List[str]) -> Tuple[List[str], List[str]]:
"""
Recursively scans a directory for files with specified extensions.
Args:
dir (str): The directory to scan.
ext (List[str]): A list of file extensions to look for.
Returns:
Tuple[List[str], List[str]]: A tuple containing a list of subfolders and a list of files found.
"""
subfolders, files = [], []
for f in os.scandir(dir):
if f.is_dir():
subfolders.append(f.path)
if f.is_file():
if os.path.splitext(f.name)[1].lower() in ext:
files.append(f.path)
for dir in list(subfolders):
sf, f = run_fast_scandir(dir, ext)
subfolders.extend(sf)
files.extend(f)
return subfolders, files
def run(DATASET_PATH: str, DB_FAISS_PATH: str) -> None:
"""
Initiates the process to load and process documents from a dataset,
and then save the resultant FAISS database locally.
Args:
DATASET_PATH (str): The path to the dataset directory.
DB_FAISS_PATH (str): The path to save the FAISS database.
Returns:
None
"""
logging.info(f"Training started with\nDATASET_PATH: {DATASET_PATH}\nDB_FAISS_PATH: {DB_FAISS_PATH}")
subfolders, files = run_fast_scandir(DATASET_PATH, extensions)
with ThreadPoolExecutor() as executor:
document_lists = list(executor.map(lambda p: load_file(*p), enumerate(files)))
documents = [doc for doc_list in document_lists for doc in doc_list if doc_list]
doc_count.append(len(documents))
times.append(time.time() - start_time)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
logging.info(f"Total number of documents pre-processed: {len(documents)}")
visualizer = dv.DataVisualizer()
visualizer.start_timer()
embeddings = HuggingFaceEmbeddings(model_name=f'sentence-transformers/{sentence_transformer_model}',)
logging.info("Starting the creation of FAISS database from documents...")
db = FAISS.from_documents(texts, embeddings)
logging.info("FAISS database created successfully.")
logging.info(f"Saving the FAISS database locally at {DB_FAISS_PATH}...")
db.save_local(DB_FAISS_PATH)
logging.info("FAISS database saved successfully.")
visualizer.generate_plots(documents=files)
| [] |
2024-01-10 | siva-nagendra/ai_toolkit | multi_model_chatbot.py | import os
import logging
import chainlit as cl
from chainlit.input_widget import Select, Slider
from langchain import PromptTemplate
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import CTransformers, OpenAI
from langchain import LLMMathChain, SerpAPIWrapper
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
logging.basicConfig(
filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
llm_path = os.environ["LLM_PATH"]
local_models = [
"",
"codellama-7b.Q8_0.gguf",
"codellama-13b.Q4_K_M.gguf",
"codellama-34b.Q4_K_M.gguf",
"llama-2-13b-chat.ggmlv3.q4_K_M.bin",
"wizardcoder-python-34b-v1.0.Q3_K_M.gguf",
"wizardcoder-python-34b-v1.0.Q3_K_L.gguf",
]
sentence_transformer_model = "all-MiniLM-L6-v2"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
DB_FAISS_PATH = os.environ["DB_FAISS_PATH"]
template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
def set_custom_prompt():
"""
This function sets up a custom prompt template for the language model.
Returns:
prompt: A custom prompt template object
Raises:
Exception: If any error occurs during the setup of the prompt template.
"""
try:
prompt = PromptTemplate(template=template, input_variables=['context', 'question'])
return prompt
except Exception as e:
logging.exception("Error occurred in set_custom_prompt function: ", exc_info=True)
raise
def load_llm(local_model, openai_model, temperature):
"""
This function loads the language model either from the local machine or from OpenAI's API.
Args:
local_model (str): The name of the local model to load.
openai_model (str): The name of the OpenAI model to load.
temperature (float): The temperature setting for the language model.
Returns:
llm: The loaded language model object.
Raises:
Exception: If any error occurs during the loading of the language model.
"""
try:
if local_model:
llm = CTransformers(
model=llm_path,
model_name=local_model,
n_batch=4096,
n_ctx=4096,
max_new_tokens=2048,
temperature=temperature,
callbacks=[StreamingStdOutCallbackHandler()],
verbose=True,
streaming=True,
)
if openai_model:
llm = OpenAI(model_name=openai_model, api_key=OPENAI_API_KEY, temperature=temperature, max_tokens=2000)
return llm
except Exception as e:
logging.exception("Error occurred in load_llm function: ", exc_info=True)
raise
def qa_bot(llm):
"""
This function sets up the question and answer bot with the loaded language model.
Args:
llm: The loaded language model object.
Returns:
qa: The initialized question and answer bot object.
"""
embeddings = HuggingFaceEmbeddings(model_name=f"sentence-transformers/{sentence_transformer_model}", model_kwargs={'device': 'cpu'})
db = FAISS.load_local(DB_FAISS_PATH, embeddings)
qa_prompt = set_custom_prompt()
qa = RetrievalQA.from_chain_type(
llm=llm, retriever=db.as_retriever(search_kwargs={'k': 2}), return_source_documents=True, chain_type_kwargs={'prompt': qa_prompt}
)
return qa
@cl.on_chat_start
async def start():
"""
This asynchronous function initiates the chat start process, setting up the chat settings, loading the model, initializing the agent and sending the starting message.
Raises:
Exception: If any error occurs during the chat start process.
"""
try:
settings = await cl.ChatSettings(
[
Select(id="OpenAIModel", label="OpenAI - Model", values=["", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"], initial_index=0),
Select(id="LocalModel", label="Local - Model", values=local_models, initial_index=6),
Slider(id="Temperature", label="Temperature", initial=0.5, min=0, max=2, step=0.1),
]
).send()
# setup_agent(settings)
local_model = settings["LocalModel"]
openai_model = settings["OpenAIModel"]
temperature = settings["Temperature"]
llm = load_llm(local_model, openai_model, temperature)
chain = qa_bot(llm)
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(name="Search", func=search.run, description="useful for when you need to answer questions about current events. You should ask targeted questions"),
Tool(name="Calculator", func=llm_math_chain.run, description="useful for when you need to answer questions about math"),
]
agent = initialize_agent(tools, llm, agent="chat-zero-shot-react-description", verbose=True)
cl.user_session.set("agent", agent)
msg = cl.Message(content="Starting the bot...")
await msg.send()
msg.content = "Hi, what would you like to ask?"
await msg.update()
cl.user_session.set("chain", chain)
except Exception as e:
logging.exception("Error occurred in start function: ", exc_info=True)
raise
@cl.on_settings_update
async def setup_agent(settings):
"""
This asynchronous function handles settings update during the chat session.
Args:
settings: The settings to update.
Raises:
Exception: If any error occurs during the settings update process.
"""
try:
print("on_settings_update", settings)
except Exception as e:
logging.exception("Error occurred in setup_agent function: ", exc_info=True)
raise
@cl.on_message
async def main(message):
"""
This is the main asynchronous function to handle incoming messages during the chat session, updating the agent and returning the response message.
Args:
message: The incoming message from the user.
Returns:
response: The response message from the bot.
Raises:
Exception: If any error occurs during the message handling process.
"""
try:
agent = cl.user_session.get("agent") # type: AgentExecutor
chain = cl.user_session.get("chain") # type: RetrievalQA
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True
)
cb.answer_reached = True
res = await chain.acall(message, callbacks=[cb])
# TODO: make the agents work
# res = await cl.make_async(agent.run)(message, callbacks=[cb])
# answer = res["result"]
sources = res["source_documents"]
main_message = await cl.Message(content="").send()
if sources:
sources_text = f"\n\n# Sources: \n"
for source in sources:
sources_text += '\n' + str(source.metadata['source'])
sources_text += '\n\n'
sources_text += source.page_content
else:
sources_text += "\n\nNo sources found"
await cl.Message(content=sources_text, parent_id= main_message, author="Source: ").send()
except Exception as e:
logging.exception("Error occurred in main function: ", exc_info=True)
raise
| [
"question",
"context",
"Use the following pieces of information to answer the user's question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nContext: {context}\nQuestion: {question}\n\nOnly return the helpful answer below and nothing else.\nHelpful answer:\n"
] |
2024-01-10 | figelwump/gpt3-story-app | gpt~run_gpt_api.py | from http import HTTPStatus
import json
import os
import openai
from flask import Flask, request, Response
from flask_cors import CORS
def run_web_app():
app = Flask(__name__)
CORS(app) # allow CORS for all routes, all domains (NOTE: not production ready)
openai.api_key = os.getenv('OPENAI_KEY')
@app.route("/gpt", methods=["POST"])
def gpt():
print("in gpt route: ")
print(request)
prompt = request.json["prompt"]
print(prompt)
res = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=200,
temperature=0.9,
top_p=1,
n=1,
frequency_penalty=0.8,
stop="\n"
)
print(res)
for choice in res['choices']:
if choice['text'].strip() is not "":
return {'gptResponse': choice['text']}
# API didn't return any non-empty completions
return {'gptResponse': ""}
app.run(port=3001)
#
# start it
#
run_web_app()
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | tests~integration_tests~utilities~test_arxiv.py | """Integration test for Arxiv API Wrapper."""
import pytest
from langchain.utilities import ArxivAPIWrapper
@pytest.fixture
def api_client() -> ArxivAPIWrapper:
return ArxivAPIWrapper()
def test_call(api_client: ArxivAPIWrapper) -> None:
"""Test that ArxivAPIWrapper returns correct answer"""
output = api_client.run("1605.08386")
assert "Heat-bath random walks with Markov bases" in output
def test_several_docs(api_client: ArxivAPIWrapper) -> None:
"""Test that ArxivAPIWrapper returns several docs"""
output = api_client.run("Caprice Stanley")
assert "On Mixing Behavior of a Family of Random Walks" in output
def test_no_result_call(api_client: ArxivAPIWrapper) -> None:
"""Test that call gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good Arxiv Result was found" == output
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | langchain~retrievers~document_compressors~chain_extract.py | """DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseLanguageModel, BaseOutputParser, Document
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
raise NotImplementedError
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
) -> "LLMChainExtractor":
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, get_input=_get_input)
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | langchain~document_loaders~youtube.py | """Loader that loads YouTube transcript."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from pydantic.dataclasses import dataclass
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
@dataclass
class GoogleApiClient:
"""A Generic Google Api Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib"
"youtube-transcript-api`"
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
class YoutubeLoader(BaseLoader):
"""Loader that loads Youtube transcripts."""
def __init__(
self, video_id: str, add_video_info: bool = False, language: str = "en"
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
@classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given youtube URL, load video."""
video_id = youtube_url.split("youtube.com/watch?v=")[-1]
return cls(video_id, **kwargs)
def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript([self.language])
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.language)
transcript_pieces = transcript.fetch()
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title,
"description": yt.description,
"view_count": yt.views,
"thumbnail_url": yt.thumbnail_url,
"publish_date": yt.publish_date,
"length": yt.length,
"author": yt.author,
}
return video_info
@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Loader that loads all Videos from a Channel
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib"
"youtube-transcript-api`"
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_ids)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.captions_language)
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
video_ids.append(
Document(
page_content=self._get_transcripe_for_video_id(
item["id"]["videoId"]
),
metadata=meta_data,
)
)
request = self.youtube_client.search().list_next(request, response)
return video_ids
def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | langchain~retrievers~document_compressors~chain_filter.py | """Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import BasePromptTemplate, LLMChain, PromptTemplate
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
from langchain.schema import BaseLanguageModel, Document
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(**_input)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any
) -> "LLMChainFilter":
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | langchain~text_splitter.py | """Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
from abc import ABC, abstractmethod
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Iterable,
List,
Literal,
Optional,
Sequence,
Union,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
):
"""Create a new TextSplitter."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(text):
new_doc = Document(
page_content=chunk, metadata=copy.deepcopy(_metadatas[i])
)
documents.append(new_doc)
return documents
def split_documents(self, documents: List[Document]) -> List[Document]:
"""Split documents."""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TextSplitter:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str, **kwargs: Any) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
**kwargs,
)
)
return cls(length_function=_tiktoken_encoder, **kwargs)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
raise NotImplementedError
class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._separator:
splits = text.split(self._separator)
else:
splits = list(text)
return self._merge_splits(splits, self._separator)
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = []
input_ids = self._tokenizer.encode(
text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
start_idx = 0
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(self._tokenizer.decode(chunk_ids))
start_idx += self._chunk_size - self._chunk_overlap
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(self, separators: Optional[List[str]] = None, **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = self._separators[-1]
for _s in self._separators:
if _s == "":
separator = _s
break
if _s in text:
separator = _s
break
# Now that we have the separator, split the text
if separator:
splits = text.split(separator)
else:
splits = list(text)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
_good_splits = []
other_info = self.split_text(s)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
return final_chunks
class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
):
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along Markdown headings (starting with level 2)
"\n## ",
"\n### ",
"\n#### ",
"\n##### ",
"\n###### ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n\n",
# Horizontal lines
"\n\n***\n\n",
"\n\n---\n\n",
"\n\n___\n\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any):
"""Initialize a LatexTextSplitter."""
separators = [
# First, try to split along Latex sections
"\n\\chapter{",
"\n\\section{",
"\n\\subsection{",
"\n\\subsubsection{",
# Now split by environments
"\n\\begin{enumerate}",
"\n\\begin{itemize}",
"\n\\begin{description}",
"\n\\begin{list}",
"\n\\begin{quote}",
"\n\\begin{quotation}",
"\n\\begin{verse}",
"\n\\begin{verbatim}",
## Now split by math environments
"\n\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
super().__init__(separators=separators, **kwargs)
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | tests~integration_tests~vectorstores~test_opensearch.py | """Test OpenSearch functionality."""
import pytest
from langchain.docstore.document import Document
from langchain.vectorstores.opensearch_vector_search import (
PAINLESS_SCRIPTING_SEARCH,
SCRIPT_SCORING_SEARCH,
OpenSearchVectorSearch,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
DEFAULT_OPENSEARCH_URL = "http://localhost:9200"
texts = ["foo", "bar", "baz"]
def test_opensearch() -> None:
"""Test end to end indexing and search using Approximate Search."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_opensearch_with_custom_field_name() -> None:
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
vector_field="my_vector",
text_field="custom_text",
)
output = docsearch.similarity_search(
"foo", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
text_input = ["test", "add", "text", "method"]
OpenSearchVectorSearch.add_texts(
docsearch, text_input, vector_field="my_vector", text_field="custom_text"
)
output = docsearch.similarity_search(
"add", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
def test_opensearch_with_metadatas() -> None:
"""Test end to end indexing and search with metadata."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_add_text() -> None:
"""Test adding additional text elements to existing index."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
docids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas)
assert len(docids) == len(text_input)
def test_opensearch_script_scoring() -> None:
"""Test end to end indexing and search using Script Scoring Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "bar"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="bar")]
def test_add_text_script_scoring() -> None:
"""Test adding additional text elements and validating using Script Scoring."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=SCRIPT_SCORING_SEARCH, space_type="innerproduct"
)
assert output == [Document(page_content="test")]
def test_opensearch_painless_scripting() -> None:
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "baz"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val
)
assert output == [Document(page_content="baz")]
def test_add_text_painless_scripting() -> None:
"""Test adding additional text elements and validating using Painless Scripting."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, space_type="cosineSimilarity"
)
assert output == [Document(page_content="test")]
def test_opensearch_invalid_search_type() -> None:
"""Test to validate similarity_search by providing invalid search_type."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
with pytest.raises(ValueError):
docsearch.similarity_search("foo", k=1, search_type="invalid_search_type")
def test_opensearch_embedding_size_zero() -> None:
"""Test to validate indexing when embedding size is zero."""
with pytest.raises(RuntimeError):
OpenSearchVectorSearch.from_texts(
[], FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
def test_appx_search_with_boolean_filter() -> None:
"""Test Approximate Search with Boolean Filter."""
boolean_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search(
"foo", k=3, boolean_filter=boolean_filter_val, subquery_clause="should"
)
assert output == [Document(page_content="bar")]
def test_appx_search_with_lucene_filter() -> None:
"""Test Approximate Search with Lucene Filter."""
lucene_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
output = docsearch.similarity_search("foo", k=3, lucene_filter=lucene_filter_val)
assert output == [Document(page_content="bar")]
| [] |
2024-01-10 | Atsushi-Ishii/langchain_custom | tests~unit_tests~test_math_utils.py | """Test math utility functions."""
from typing import List
import numpy as np
from langchain.math_utils import cosine_similarity
def test_cosine_similarity_zero() -> None:
X = np.zeros((3, 3))
Y = np.random.random((3, 3))
expected = np.zeros((3, 3))
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
def test_cosine_similarity_identity() -> None:
X = np.random.random((4, 4))
expected = np.ones(4)
actual = np.diag(cosine_similarity(X, X))
assert np.allclose(expected, actual)
def test_cosine_similarity_empty() -> None:
empty_list: List[List[float]] = []
assert len(cosine_similarity(empty_list, empty_list)) == 0
assert len(cosine_similarity(empty_list, np.random.random((3, 3)))) == 0
def test_cosine_similarity() -> None:
X = [[1.0, 2.0, 3.0], [0.0, 1.0, 0.0], [1.0, 2.0, 0.0]]
Y = [[0.5, 1.0, 1.5], [1.0, 0.0, 0.0], [2.0, 5.0, 2.0]]
expected = [
[1.0, 0.26726124, 0.83743579],
[0.53452248, 0.0, 0.87038828],
[0.5976143, 0.4472136, 0.93419873],
]
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.