date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | danial-amin/LLM-Insure | data_gen.py | import os
import openai
import pandas as pd
class SyntheticDataGenerator:
def __init__(self):
self.api_key = os.getenv('OPENAI_API_KEY')
if not self.api_key:
raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.")
openai.api_key = self.api_key
def generate_synthetic_claim(self, category):
prompt = f"{category} insurance claim: "
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=100,
temperature=0.7
)
return prompt + response.choices[0].text.strip()
def generate_data(self, categories, num_samples_per_category):
synthetic_data = []
for category in categories:
for _ in range(num_samples_per_category):
synthetic_data.append({"category": category, "claim_text": self.generate_synthetic_claim(category)})
return pd.DataFrame(synthetic_data)
if __name__ == "__main__":
categories = ['Auto', 'Home', 'Life', 'Health']
data_generator = SyntheticDataGenerator()
df = data_generator.generate_data(categories, 1000)
df.to_csv('synthetic_claims.csv', index=False)
| [
"PLACEHOLDER insurance claim: "
] |
2024-01-10 | Ganryuu/repo | flask_app.py | from flask import Flask, render_template, request
import openai
import os
app = Flask(__name__)
openai.api_key = os.getenv("sk-")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/answer", methods=["POST"])
def answer():
topic = request.form["topic"]
prompt = request.form["prompt"]
model = "text-davinci-003"
completions = openai.Completion.create(engine=model, prompt=prompt + " " + topic, max_tokens=1024, n=1,stop=None,temperature=0.7)
message = completions.choices[0].text
return render_template("answer.html", response=message)
@app.route("/download", methods=["POST"])
def download():
response = request.form["response"]
if response:
with open("output.md", "w") as f:
f.write(response)
return "Download complete"
else:
return "No response available for download"
if __name__ == "__main__":
app.run(debug=True)
| [
"PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | chuyishang/llm-video-understanding | collect_by_task2.py | import openai
import json
from tqdm import tqdm
import time
import argparse
import multiprocessing
f = open("/home/shang/openai-apikey.txt")
#print(f.readlines()[0])
openai.api_key = f.readlines()[0]
f.close()
def first_pass():
f = open("./COIN/base/coin_categories.json")
data = json.load(f)
category_steps = {}
with multiprocessing.Pool(10) as p:
for result in p.starmap(get_category, [(category, data) for category in data.keys()]):
category_steps[result[1]] = result[0]
with open("./COIN/base/category_base_steps.json", "w") as outfile:
json.dump(category_steps, outfile)
return category_steps
def get_category(category, data):
ids = data[category]["ids"].split(",")
steps = {}
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
for i in tqdm(range(len(ids))):
try:
f = open("/shared/medhini/COIN/coin_asr/" + ids[i] + ".txt")
transcript = " ".join(f.readlines())
#print(transcript)
input_text = prompt.replace("|||1", transcript)
tries, max_attempts = 0, 5
while tries < max_attempts:
time.sleep(0.5)
try:
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except Exception as exc:
print(f"OPENAI ERROR, try ({tries}), {exc}")
tries += 1
output = response["choices"][0]["text"].strip()
steps[ids[i]] = output
except:
print(ids[i])
pass
return steps, category
def second_pass(category_steps):
if args.no_fp:
with open("./COIN/base/category_base_steps.json") as r:
category_steps = json.load(r)
with open("./COIN/base/coin_categories.json") as file:
data = json.load(file)
for category in category_steps:
print(f"CATEGORY: {category}")
steps = []
for id in category_steps[category]:
steps.append(category_steps[category][id])
input_message=[
{"role": "system", "content": f"Extract a set of concise general steps for perfoming the task: {category} from the following recipes. Be as vague and general as possible. For your output, only include the steps without extra information."},
]
for step in steps:
input_message.append({"role": "user", "content": step})
input_message.append({"role": "assistant", "content": "Recieved, waiting on next step list."})
input_message.append({"role": "user", "content": "I have inputted all recipes. Now, give me a general recipe like I instructed before."})
response = openai.ChatCompletion.create(
model="gpt-4",
messages=input_message
)
#print(response)
output = response["choices"][0]["message"]["content"].strip()
print(output)
print("============================")
data[category]["general steps"] = output
with open("./COIN/base/coin_gen_seps.json", "w") as outfile:
json.dump(data, outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--no_fp", action="store_true")
args = parser.parse_args()
if not args.no_fp:
steps = first_pass()
else:
steps = None
second_pass(steps)
| [
"I have inputted all recipes. Now, give me a general recipe like I instructed before.",
"Recieved, waiting on next step list.",
"Extract a set of concise general steps for perfoming the task: PLACEHOLDER from the following recipes. Be as vague and general as possible. For your output, only include the steps without extra information.",
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | chuyishang/llm-video-understanding | misc~steps.py | import os
import string
import json
import torch
import numpy as np
import openai
import random
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModel
#from nemo.collections.nlp.models import PunctuationCapitalizationModel
import argparse
from tqdm import tqdm
import spacy
from sentence_transformers import SentenceTransformer
import multiprocessing as mp
import _io
nlp = spacy.load('en_core_web_sm')
sent_tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2")
f = open("/home/shang/self/openai-api.txt")
openai.api_key = f.readlines()[0]
def process_video(video_id, args, input_steps, transcripts, tokenizer, output_queue, punct_cap_model=None):
'''Main function that processes the video. Takes in arguments:
- video_id: id of input video
- args: ???
- input_steps:
- transcripts: transcripts, indexed by video_id
- tokenizer:
- punct_cap_model:
- output_queue:
'''
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
# Indexes into transcripts if argument is passed, else processes it
#print("TRANSCRIPTS:", transcripts)
if transcripts is not None:
try:
transcript = transcripts[video_id]
except:
return
# Creates the output path, adds capitalization and other formatting if specified
# Tokenizes transcript and saves it as `tokens`
tokens = tokenizer(transcript)
print(video_id, len(transcript), len(tokens["input_ids"]))
while len(tokens["input_ids"]) > 1600:
transcript = transcript[:-100]
tokens = tokenizer(transcript)
if args.input_steps_path is not None:
if video_id not in input_steps:
return
steps = input_steps[video_id]["steps"]
else:
if video_id in finished:
return
input_text = prompt.replace("|||1", transcript)
steps = []
num_attempts = 0
while len(steps) == 0:
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
num_attempts += 1
steps = output.split("\n")
if all(["." in step for step in steps[1:]]):
steps = steps[:1]+[step[step.index(".")+1:].strip() for step in steps[1:]]
elif num_attempts < args.max_attempts:
steps = []
output_dict = {"video_id": video_id, "steps": steps, "transcript": transcript}
if not args.no_align:
#TODO: Compare similarities
pass
if isinstance(output_queue, _io.TextIOWrapper):
output_queue.write(json.dumps(output_dict)+'\n')
else:
output_queue.put(json.dumps(output_dict)+'\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_list_path")
parser.add_argument("--transcripts_path")
parser.add_argument("--formatted_transcripts_path")
parser.add_argument("--start_index", type=int, default=0)
parser.add_argument("--end_index", type=int, default=None)
parser.add_argument("--max_attempts", type=int, default=1)
parser.add_argument("--no_formatting", action="store_true")
parser.add_argument("--output_path")
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--no_align", action="store_true")
parser.add_argument("--input_steps_path", type=str, default=None)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--no_dtw", action="store_true")
parser.add_argument("--dtw_window_size", type=int, default=1000000)
args = parser.parse_args()
'''
Specify device, CPU vs. GPU
'''
#print(args)
if not args.no_align:
if args.cpu:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cpu()
else:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
# sent_model = AutoModel.from_pretrained('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
'''
Args no formatting - load pretrained punctuation capitalization model
if not args.no_formatting:
punct_cap_model = PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert")
if args.cpu:
punct_cap_model = punct_cap_model.cpu()
'''
tokenizer = AutoTokenizer.from_pretrained("gpt2")
'''
Opens list of videos
'''
f = open(args.video_list_path)
content = f.read()
video_ids = content.split(",")
#print(video_ids)
'''
Loads transcripts
'''
transcripts = None
if args.transcripts_path[-5:] == ".json":
f = open(args.transcripts_path)
transcripts = json.load(f)
'''
Video End-index, can be used to truncate # videos read
'''
if args.end_index is not None:
video_ids = video_ids[:args.end_index]
'''
Video Start-index
'''
video_ids = video_ids[args.start_index:]
'''
Ending: output is read and the video id is added to set "finished"
'''
finished = set()
if os.path.exists(args.output_path):
fout = open(args.output_path)
written_lines = fout.readlines()
fout.close()
for line in written_lines:
try:
datum = json.loads(line)
finished.add(datum['video_id'])
except:
pass
fout = open(args.output_path, 'a')
else:
fout = open(args.output_path, 'w')
'''
Reads input_steps
'''
input_steps = None
if args.input_steps_path is not None:
f = open(args.input_steps_path)
lines = f.readlines()
input_steps = [json.loads(line) for line in lines]
input_steps = {datum["video_id"]: datum for datum in input_steps}
'''
Goes through list of all video_ids, if video is in set finished, skip and move to next unfinished video
'''
for video_id in tqdm(video_ids):
#print(video_id, finished)
if video_id in finished:
continue
# job = pool.apply_async(process_video, (video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, q))
'''
Call process_video here
'''
process_video(video_id, args, input_steps, transcripts, tokenizer, fout)
# print('here', len(jobs))
# jobs.append(job)
fout.close()
| [
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | chuyishang/llm-video-understanding | collect_category.py | import openai
import json
from tqdm import tqdm
import webvtt
f = open("/shared/medhini/COIN/COIN.json")
data = json.load(f)["database"]
dic = {}
category = []
for item in data:
if data[item]["class"] not in dic:
dic[data[item]["class"]] = {}
dic[data[item]["class"]]["ids"] = []
dic[data[item]["class"]]["ids"].append(item)
for category in dic:
dic[category]["ids"] = ",".join(dic[category]["ids"])
with open("./COIN/base/coin_categories.json","w") as f:
json.dump(dic, f) | [] |
2024-01-10 | chuyishang/llm-video-understanding | misc~gather_align_steps.py | import os
import string
import json
import torch
import numpy as np
import openai
import random
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModel
from nemo.collections.nlp.models import PunctuationCapitalizationModel
import argparse
from tqdm import tqdm
import spacy
from sentence_transformers import SentenceTransformer
import multiprocessing as mp
import _io
nlp = spacy.load('en_core_web_sm')
sent_tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2")
def get_next_character(text_list, index1, index2):
'''
Gets next character from a text list. Index 1 -> rows (entry), Index 2 -> cols (word).
'''
if index1 == len(text_list):
return None, index1, index2
if index2 == len(text_list[index1]):
return get_next_character(text_list, index1+1, 0)
if text_list[index1][index2].isspace():
return get_next_character(text_list, index1, index2+1)
return text_list[index1][index2], index1, index2
def align_after_postprocess(postprocessed, original):
'''
'''
index_map = {}
speech_segment_index = 0
within_segment_index = 0
p_index = 0
postprocessed_l = postprocessed # .lower()
while p_index < len(postprocessed_l):
if postprocessed_l[p_index].isspace():
p_index += 1
continue
char, speech_segment_index, within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index)
if char is not None:
_, next_speech_segment_index, next_within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index+1)
if postprocessed_l[p_index].upper().lower() == char.upper().lower() or postprocessed_l[p_index:p_index+2].upper().lower() == char.upper().lower():
index_map[p_index] = (speech_segment_index, within_segment_index)
speech_segment_index = next_speech_segment_index
within_segment_index = next_within_segment_index
p_index += 1
return index_map
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def encode_section(sent_model, sents, start, end):
section = ' '.join(sents[start:end])
return {(start, end): sent_model.encode([section])[0]}
def remove_punctuation(text):
new_text = text
for c in string.punctuation:
new_text = new_text.replace(c, '')
return new_text
def align_text(text, original_text, steps, sent_model, num_workers, dtw=True, dtw_window_size=10000000000, dtw_start_offset=False):
doc = nlp(text)
sents = [str(sent) for sent in list(doc.sents)]
steps = steps[:len(sents)]
step_embs = sent_model.encode(steps)
text = text.replace('ı', 'i')
if dtw:
dtw_matrix = np.zeros((len(steps)+1, len(sents)+1, len(sents)+1))
for i in range(len(steps)+1):
for start in range(len(sents)+1):
for end in range(len(sents)+1):
dtw_matrix[i,start,end] = -np.inf
dtw_matrix[0,0,0] = 0
pointers = -1*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.int32)
pointer_scores = -np.inf*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.float32)
start_sent_index = 0
if dtw_start_offset:
single_sent_emb = np.stack([sent_model.encode([sent])[0,:] for sent in sents])
start_scores = (step_embs[:1,:]*single_sent_emb).sum(1)
start_sent_index = min(max(0, start_scores.argmax()-1), len(sents)-len(steps))
dtw_matrix[0,start_sent_index,start_sent_index] = 0
section_emb = {}
if num_workers == 1:
batch = []
for start in range(start_sent_index, len(sents)):
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)):
section = ' '.join(sents[start:end])
batch.append((start, end, section))
if len(batch) == 16 or (start == len(sents)-1 and end == len(sents)):
inputs = [item[-1] for item in batch]
outputs = sent_model.encode(inputs)
for item, output in zip(batch, outputs):
section_emb[item[:2]] = output
batch = []
if len(batch) > 0:
inputs = [item[-1] for item in batch]
outputs = sent_model.encode(inputs)
for item, output in zip(batch, outputs):
section_emb[item[:2]] = output
else:
with mp.Pool(num_workers) as pool:
section_emb_list = pool.starmap(encode_section, [(sent_model, sents, start, end) for start in range(0, len(sents)) for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1))])
for emb_dict in section_emb_list:
section_emb.update(emb_dict)
for i in range(1, len(steps)+1):
for start in range(start_sent_index, len(sents)):
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)):
section = ' '.join(sents[start:end])
sentence_emb = section_emb[(start,end)] # sent_model.encode([section])[0]
step_emb = step_embs[i-1] # sent_model.encode([steps[i-1]])[0]
similarity = (sentence_emb*step_emb).sum().item()
best_prev_segment = dtw_matrix[i-1,:,start].argmax().item()
prev_segment_score = dtw_matrix[i-1,:,start].max().item()
# if prev_segment_score > dtw_matrix[i-1,start,end].item():
# pointers[i,start,end] = best_prev_segment
# else:
# pointers[i,start,end] = start
pointers[i,start,end] = best_prev_segment
pointer_scores[i,start,end] = prev_segment_score
last_max = np.max([prev_segment_score]) # , dtw_matrix[i-1,start,end]])
dtw_matrix[i,start,end] = similarity+last_max
# print('good', i, [j for j in range(dtw_matrix.shape[1]) if dtw_matrix[i,j,:].max().item() > -np.inf])
end = dtw_matrix.shape[1]-1
index = dtw_matrix.shape[0]-1
start = dtw_matrix[index,:,end].argmax().item()
print(dtw_matrix[index,:,:end].max().item())
segments = {index: (start, end)}
index -= 1
while index > 0:
# print(index+1, start, end)
new_start = int(pointers[index+1,start,end])
print(pointer_scores[index+1,start,end])
if new_start != start:
end = start
start = new_start
# else:
# print('bad', pointers[index+1,start,end], pointer_scores[index+1,start,end])
segments[index] = (start, end)
index -= 1
print(start_sent_index, segments)
else:
sent_emb = sent_model.encode(sents)
scores = torch.matmul(torch.from_numpy(step_embs), torch.from_numpy(sent_emb).t())
matched_sentences = scores.argmax(dim=-1).tolist()
segments = {}
for i in range(1, len(steps)+1):
print(steps[i-1], '|||', sents[matched_sentences[i-1]])
segments[i] = (max(0, matched_sentences[i-1]-1), min(len(sents), matched_sentences[i-1]+2))
# text_sans_punct = remove_punctuation(text)
# assert text_sans_punct.lower() == ' '.join(original_text['text'])
postprocess_alignment = align_after_postprocess(text, original_text)
# print(segments)
# print(postprocess_alignment)
aligned_segments = {}
sents = list(doc.sents)
# print(text)
# print(original_text)
# print(' '.join(original_text['text']))
# print(max(list(postprocess_alignment.keys())), [sents[segments[index][0]].start_char for index in segments], [text[sents[segments[index][0]].start_char:sents[segments[index][1]-1].end_char] for index in segments])
for index in segments:
while str(sents[segments[index][0]]).isspace():
segments[index] = (segments[index][0]-1, segments[index][1])
start = sents[segments[index][0]].start_char
while start not in postprocess_alignment and start < len(text):
start += 1
if start not in postprocess_alignment:
print('A', sents[segments[index][0]])
print('B', text[sents[segments[index][0]].start_char:], sents[segments[index][0]].start_char)
print('C', text)
print('D', ' '.join(original_text['text']))
print(sents[segments[index][0]].start_char, sorted(list(postprocess_alignment.keys()))[-50:])
assert start in postprocess_alignment
end = sents[segments[index][1]-1].end_char-1
while end not in postprocess_alignment and end >= 0:
end -= 1
assert end in postprocess_alignment
aligned_segments[index] = postprocess_alignment[start]+postprocess_alignment[end]
print('aligned', ' '.join(original_text['text'][aligned_segments[index][0]:aligned_segments[index][2]+1]), sents[segments[index][0]:segments[index][1]])
return aligned_segments
def remove_repeat_ngrams(text_list, min_n=3, max_n=8, return_segment_ids=False):
assert isinstance(text_list, list)
tokens = []
segment_ids = []
for segment_id, segment in enumerate(text_list):
segment_tokens = segment.split()
for token in segment_tokens:
if len(token) > 0:
tokens.append(token)
segment_ids.append(segment_id)
inside_segment = False
num_streak_tokens = 0
new_tokens = []
new_segment_ids = []
indices_added = set()
for i in range(len(tokens)):
redundant = False
for j in range(max_n, min_n-1, -1):
if i+1 >= j*2 and tokens[i+1-j:i+1] == tokens[i+1-j*2:i+1-j]:
# print('here', tokens[i+1-j*2:i+1])
inside_segment = True
num_streak_tokens = min_n
for k in range(1, j):
if i-k in indices_added:
new_tokens.pop()
new_segment_ids.pop()
indices_added.remove(i-k)
redundant = True
break
if not redundant:
new_tokens.append(tokens[i])
indices_added.add(i)
new_segment_ids.append(segment_ids[i])
if return_segment_ids:
return ' '.join(new_tokens), new_segment_ids
return ' '.join(new_tokens)
def process_video(video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, output_queue):
'''
Main function that processes the video. Takes in arguments:
- video_id:
- args:
- input_steps:
- transcripts:
- tokenizer:
- punct_cap_model:
- output_queue:
'''
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
print('here3')
# Indexes into transcripts if argument is passed, else processes it
if transcripts is not None:
original = transcripts[video_id]
else:
f = open(os.path.join(args.transcripts_path, video_id+".csv"))
lines = f.readlines()
original = {"text": [], "start": [], "end": []}
for line in lines[1:]:
parts = line.split(',')
original["start"].append(float(parts[0]))
original["end"].append(float(parts[1]))
original["text"].append(parts[-1].strip())
transcript = " ".join(original["text"])
# Removes repeated n-grams
deduplicated_text, new_segment_ids = remove_repeat_ngrams(original["text"], min_n=3, max_n=9, return_segment_ids=True)
deduplicated_tokens = deduplicated_text.split()
original["text"] = [[] for _ in range(len(original["text"]))]
for token, new_id in zip(deduplicated_tokens, new_segment_ids):
original["text"][new_id].append(token)
original["text"] = [" ".join(lst) for lst in original["text"]]
transcript = " ".join(original["text"])
# Creates the output path, adds capitalization and other formatting if specified
if not args.no_formatting:
if args.formatted_transcripts_path is not None:
fname = os.path.join(args.formatted_transcripts_path, video_id+".txt")
if args.formatted_transcripts_path is not None and os.path.exists(fname):
f = open(fname)
transcript = f.readlines()[0]
else:
transcript = punct_cap_model.add_punctuation_capitalization([transcript])[0]
# Tokenizes transcript and saves it as `tokens`
tokens = tokenizer(transcript)
print(video_id, len(transcript), len(tokens["input_ids"]))
while len(tokens["input_ids"]) > 1600:
transcript = transcript[:-100]
tokens = tokenizer(transcript)
if args.input_steps_path is not None:
if video_id not in input_steps:
return
steps = input_steps[video_id]["steps"]
else:
if video_id in finished:
return
input_text = prompt.replace("|||1", transcript)
steps = []
num_attempts = 0
while len(steps) == 0:
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
num_attempts += 1
steps = output.split("\n")
if all(["." in step for step in steps[1:]]):
steps = steps[:1]+[step[step.index(".")+1:].strip() for step in steps[1:]]
elif num_attempts < args.max_attempts:
steps = []
output_dict = {"video_id": video_id, "steps": steps, "transcript": transcript}
if not args.no_align:
segments = align_text(transcript, original, steps, sent_model, args.num_workers, not args.no_dtw, args.dtw_window_size)
print(segments)
output_dict["segments"] = segments
if isinstance(output_queue, _io.TextIOWrapper):
output_queue.write(json.dumps(output_dict)+'\n')
else:
output_queue.put(json.dumps(output_dict)+'\n')
def output_listener(output_queue, output_filename):
mode = 'a+' if os.path.exists(output_filename) else 'w'
with open(output_filename, 'a+') as fout:
while True:
output = output_queue.get()
if output == 'kill':
break
fout.write(output)
fout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_list_path")
parser.add_argument("--transcripts_path")
parser.add_argument("--formatted_transcripts_path")
parser.add_argument("--start_index", type=int, default=0)
parser.add_argument("--end_index", type=int, default=None)
parser.add_argument("--max_attempts", type=int, default=1)
parser.add_argument("--no_formatting", action="store_true")
parser.add_argument("--output_path")
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--no_align", action="store_true")
parser.add_argument("--input_steps_path", type=str, default=None)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--no_dtw", action="store_true")
parser.add_argument("--dtw_window_size", type=int, default=1000000)
args = parser.parse_args()
'''
Specify device, CPU vs. GPU
'''
if not args.no_align:
if args.cpu:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cpu()
else:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
# sent_model = AutoModel.from_pretrained('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
'''
Args no formatting - load pretrained punctuation capitalization model
'''
if not args.no_formatting:
punct_cap_model = PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert")
if args.cpu:
punct_cap_model = punct_cap_model.cpu()
tokenizer = AutoTokenizer.from_pretrained("gpt2")
'''
Opens list of videos
'''
f = open(args.video_list_path)
lines = f.readlines()
video_ids = [line.strip().split()[0].split('.')[0] for line in lines]
'''
Loads transcripts
'''
transcripts = None
if args.transcripts_path[-5:] == ".json":
f = open(args.transcripts_path)
transcripts = json.load(f)
'''
Video End-index, can be used to truncate # videos read
'''
if args.end_index is not None:
video_ids = video_ids[:args.end_index]
'''
Video Start-index
'''
video_ids = video_ids[args.start_index:]
'''
Ending: output is read and the video id is added to set "finished"
'''
finished = set()
if os.path.exists(args.output_path):
fout = open(args.output_path)
written_lines = fout.readlines()
fout.close()
for line in written_lines:
try:
datum = json.loads(line)
finished.add(datum['video_id'])
except:
pass
fout = open(args.output_path, 'a')
else:
fout = open(args.output_path, 'w')
'''
Reads input_steps
'''
input_steps = None
if args.input_steps_path is not None:
f = open(args.input_steps_path)
lines = f.readlines()
input_steps = [json.loads(line) for line in lines]
input_steps = {datum["video_id"]: datum for datum in input_steps}
"""manager = mp.Manager()
q = manager.Queue()
pool = mp.Pool(args.num_workers+2)
watcher = pool.apply_async(output_listener, (q, args.output_path))
print('here1', pool._processes)
jobs = []"""
'''
Goes through list of all video_ids, if video is in set finished, skip and move to next unfinished video
'''
for video_id in tqdm(video_ids):
if video_id in finished:
continue
# job = pool.apply_async(process_video, (video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, q))
'''
Call process_video here
'''
process_video(video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, fout)
# print('here', len(jobs))
# jobs.append(job)
"""for job in jobs:
job.get()
q.put('kill')
pool.close()
pool.join()"""
fout.close()
| [
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | chuyishang/llm-video-understanding | collect_by_task.py | import openai
import json
from tqdm import tqdm
import webvtt
f = open("/home/shang/openai-apikey.txt")
#print(f.readlines()[0])
openai.api_key = f.readlines()[0]
f.close()
f = open("/shared/medhini/COIN/COIN_annts.json")
data = json.load(f)
coffee = []
with open("./COIN/demo_coffee.txt","w") as f:
for id in data.keys():
if data[id] == "MakeCoffee":
f.write(id + ",")
coffee.append(id)
coffee = coffee[:-2]
steps = []
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
for i in tqdm(range(len(coffee))):
try:
f = open("/shared/medhini/COIN/coin_asr/" + coffee[i] + ".txt")
transcript = " ".join(f.readlines())
print(transcript)
input_text = prompt.replace("|||1", transcript)
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
steps.append(output)
except:
print(coffee[i])
pass
prompt2 = "You take on the role of a professional summarizer. You are given a list of different methods to make coffee. For each method, you are given a list of steps. Use the given steps to construct a generalized recipe for making coffee. Do not rely on one method too much - generalize across all different methods.\nSteps: |||1\nSteps:\n1."
input_text2 = prompt2.replace("|||1", "\nMethod: ".join(steps))
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text2,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
with open("./COIN/coffee_vtt.json","w") as f:
json.dump(output, f) | [
"You take on the role of a professional summarizer. You are given a list of different methods to make coffee. For each method, you are given a list of steps. Use the given steps to construct a generalized recipe for making coffee. Do not rely on one method too much - generalize across all different methods.\nSteps: |||1\nSteps:\n1.",
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | chuyishang/llm-video-understanding | localize.py | import os
import string
import json
import torch
import numpy as np
import openai
import random
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModel
from nemo.collections.nlp.models import PunctuationCapitalizationModel
import argparse
from tqdm import tqdm
#import en_core_web_sm
import spacy
from sentence_transformers import SentenceTransformer, util
import multiprocessing as mp
import _io
from get_times import *
f = open("/home/shang/openai-apikey.txt")
#print(f.readlines()[0])
openai.api_key = f.readlines()[0]
nlp = spacy.load('en_core_web_sm')
#nlp = en_core_web_sm.load()
sent_tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2")
def get_next_character(text_list, index1, index2):
if index1 == len(text_list):
return None, index1, index2
if index2 == len(text_list[index1]):
return get_next_character(text_list, index1+1, 0)
if text_list[index1][index2].isspace():
return get_next_character(text_list, index1, index2+1)
return text_list[index1][index2], index1, index2
def align_after_postprocess(postprocessed, original):
index_map = {}
speech_segment_index = 0
within_segment_index = 0
p_index = 0
postprocessed_l = postprocessed # .lower()
while p_index < len(postprocessed_l):
if postprocessed_l[p_index].isspace():
p_index += 1
continue
char, speech_segment_index, within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index)
if char is not None:
_, next_speech_segment_index, next_within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index+1)
if postprocessed_l[p_index].upper().lower() == char.upper().lower() or postprocessed_l[p_index:p_index+2].upper().lower() == char.upper().lower():
index_map[p_index] = (speech_segment_index, within_segment_index)
speech_segment_index = next_speech_segment_index
within_segment_index = next_within_segment_index
p_index += 1
return index_map
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def encode_section(sent_model, sents, start, end):
section = ' '.join(sents[start:end])
return {(start, end): sent_model.encode([section])[0]}
def remove_punctuation(text):
new_text = text
for c in string.punctuation:
new_text = new_text.replace(c, '')
return new_text
def align_text(text, original_text, steps, sent_model, num_workers, do_dtw=False, do_drop_dtw=True, dtw_window_size=10000000000, dtw_start_offset=False, id=None):
#print("===================")
doc = nlp(text)
#print("DOC:", doc)
#print("===================")
sents = [str(sent) for sent in list(doc.sents)]
if args.gen_steps:
steps = args.gen_steps.split("\\n")
else:
steps = steps[:len(sents)]
#("=========================")
#print("SENTS:", len(sents), sents)
#print("STEPS:", len(steps), steps)
#print("=========================")
step_embs = sent_model.encode(steps)
text = text.replace('ı', 'i')
if do_dtw:
dtw_matrix = np.zeros((len(steps)+1, len(sents)+1, len(sents)+1)) # dtw matrix size [steps+1, sent+1, sent+1]
#print("==========================")
#print(dtw_matrix.shape)
#print("==========================")
for i in range(len(steps)+1):
for start in range(len(sents)+1):
for end in range(len(sents)+1):
dtw_matrix[i,start,end] = -np.inf # sets everything to -inf
dtw_matrix[0,0,0] = 0 # sets start to 0
pointers = -1*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.int32) # pointers -> pointer matrix (possibly for keeping track of prev values?)
pointer_scores = -np.inf*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.float32) # pointer_scores -> same size matrix of -infs
start_sent_index = 0 # sentence start index
# if there is offset, can ignore for now
if dtw_start_offset:
single_sent_emb = np.stack([sent_model.encode([sent])[0,:] for sent in sents])
start_scores = (step_embs[:1,:]*single_sent_emb).sum(1)
start_sent_index = min(max(0, start_scores.argmax()-1), len(sents)-len(steps))
dtw_matrix[0,start_sent_index,start_sent_index] = 0
# section_emb -> empty dic, we append to it later
section_emb = {}
if num_workers == 1:
batch = [] # batch -> empty arr, we append to it later
for start in range(start_sent_index, len(sents)): # outer loop: sentences -> for each sentence
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)): # for end in (start to smaller of window size or sentence length)
section = ' '.join(sents[start:end]) # section -> joined sentences from start index to end, represents 1 section
batch.append((start, end, section)) # append tuple (start index, end index, section) to batch list
if len(batch) == 16 or (start == len(sents)-1 and end == len(sents)): # when batch is full:
inputs = [item[-1] for item in batch] # inputs -> list of sections (combined sections)
outputs = sent_model.encode(inputs) # out -> encoded inputs (sections)
for item, output in zip(batch, outputs): # item -> batch, output -> outputs (encoded)
section_emb[item[:2]] = output # section_emb[batch[:2]] = --> key is (start, end), val is encoded output, 2 is so that it gets stored in the output section
batch = [] # resets batch if previous batch full
if len(batch) > 0: # if batch nonempty: (this is the TAIL CASE)
inputs = [item[-1] for item in batch] # inputs = section (-1 is last element)
outputs = sent_model.encode(inputs) # same process as before
for item, output in zip(batch, outputs):
section_emb[item[:2]] = output
else:
with mp.Pool(num_workers) as pool:
section_emb_list = pool.starmap(encode_section, [(sent_model, sents, start, end) for start in range(0, len(sents)) for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1))])
for emb_dict in section_emb_list:
section_emb.update(emb_dict)
for i in range(1, len(steps)+1): # for step:
for start in range(start_sent_index, len(sents)): # for start index:
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)): # for end index:
#print(f"({i, start, end})")
section = ' '.join(sents[start:end]) # section formed by joined sentences
sentence_emb = section_emb[(start,end)] # sent_model.encode([section])[0] sentence_emb -> encoded sentence embedding for [start to end]
step_emb = step_embs[i-1] # step_emb -> step embedding
similarity = (sentence_emb*step_emb).sum().item() # take dot product similarity
best_prev_segment = dtw_matrix[i-1,:,start].argmax().item() # [step -1, :, start sentence]
#print("BEST PREV SEG:", sha)
prev_segment_score = dtw_matrix[i-1,:,start].max().item() # [step -1, :, start sentence]
#print("PREV SEGMENT SCORE:", prev_segment_score) -> this is a single number
# if prev_segment_score > dtw_matrix[i-1,start,end].item():
# pointers[i,start,end] = best_prev_segment
# else:
# pointers[i,start,end] = start
pointers[i,start,end] = best_prev_segment
pointer_scores[i,start,end] = prev_segment_score
last_max = np.max([prev_segment_score]) # , dtw_matrix[i-1,start,end]])
dtw_matrix[i,start,end] = similarity+last_max
# print('good', i, [j for j in range(dtw_matrix.shape[1]) if dtw_matrix[i,j,:].max().item() > -np.inf])
# sentence - 1
end = dtw_matrix.shape[1]-1
# steps - 1
index = dtw_matrix.shape[0]-1
start = dtw_matrix[index,:,end].argmax().item()
#print("=====================")
#print("MAX:", dtw_matrix[index,:,:end].max().item(), "START", start, "END", end, "INDEX", index)
#print("=====================")
segments = {index: (start, end)}
index -= 1
while index > 0:
# print(index+1, start, end)
new_start = int(pointers[index+1,start,end])
#print(pointer_scores[index+1,start,end])
if new_start != start:
end = start
start = new_start
# else:
# print('bad', pointers[index+1,start,end], pointer_scores[index+1,start,end])
segments[index] = (start, end)
index -= 1
#print("PRINT!!:", start_sent_index, segments)
elif do_drop_dtw:
sent_emb = sent_model.encode(sents)
#scores = torch.matmul(torch.from_numpy(step_embs), torch.from_numpy(sent_emb).t())
scores = util.cos_sim(step_embs, sent_emb)
#print("SENT EMB:", sent_emb.shape, "STEP EMB:", step_embs.shape, "SCORES", scores.shape)
def drop_dtw(zx_costs, drop_costs, exclusive=True, contiguous=True, return_labels=False):
"""Drop-DTW algorithm that allows drop only from one (video) side. See Algorithm 1 in the paper.
Parameters
----------
zx_costs: np.ndarray [K, N]
pairwise match costs between K steps and N video clips
drop_costs: np.ndarray [N]
drop costs for each clip
exclusive: bool
If True any clip can be matched with only one step, not many.
contiguous: bool
if True, can only match a contiguous sequence of clips to a step
(i.e. no drops in between the clips)
return_label: bool
if True, returns output directly useful for segmentation computation (made for convenience)
"""
K, N = zx_costs.shape
# initialize solutin matrices
D = np.zeros([K + 1, N + 1, 2]) # the 2 last dimensions correspond to different states.
# State (dim) 0 - x is matched; State 1 - x is dropped
D[1:, 0, :] = np.inf # no drops in z in any state
D[0, 1:, 0] = np.inf # no drops in x in state 0, i.e. state where x is matched
D[0, 1:, 1] = np.cumsum(drop_costs) # drop costs initizlization in state 1
# initialize path tracking info for each state
P = np.zeros([K + 1, N + 1, 2, 3], dtype=int)
for xi in range(1, N + 1):
P[0, xi, 1] = 0, xi - 1, 1
# filling in the dynamic tables
for zi in range(1, K + 1):
for xi in range(1, N + 1):
# define frequently met neighbors here
diag_neigh_states = [0, 1]
diag_neigh_coords = [(zi - 1, xi - 1) for _ in diag_neigh_states]
diag_neigh_costs = [D[zi - 1, xi - 1, s] for s in diag_neigh_states]
left_neigh_states = [0, 1]
left_neigh_coords = [(zi, xi - 1) for _ in left_neigh_states]
left_neigh_costs = [D[zi, xi - 1, s] for s in left_neigh_states]
left_pos_neigh_states = [0] if contiguous else left_neigh_states
left_pos_neigh_coords = [(zi, xi - 1) for _ in left_pos_neigh_states]
left_pos_neigh_costs = [D[zi, xi - 1, s] for s in left_pos_neigh_states]
top_pos_neigh_states = [0]
top_pos_neigh_coords = [(zi - 1, xi) for _ in left_pos_neigh_states]
top_pos_neigh_costs = [D[zi - 1, xi, s] for s in left_pos_neigh_states]
z_cost_ind, x_cost_ind = zi - 1, xi - 1 # indexind in costs is shifted by 1
# state 0: matching x to z
if exclusive:
neigh_states_pos = diag_neigh_states + left_pos_neigh_states
neigh_coords_pos = diag_neigh_coords + left_pos_neigh_coords
neigh_costs_pos = diag_neigh_costs + left_pos_neigh_costs
else:
neigh_states_pos = diag_neigh_states + left_pos_neigh_states + top_pos_neigh_states
neigh_coords_pos = diag_neigh_coords + left_pos_neigh_coords + top_pos_neigh_coords
neigh_costs_pos = diag_neigh_costs + left_pos_neigh_costs + top_pos_neigh_costs
costs_pos = np.array(neigh_costs_pos) + zx_costs[z_cost_ind, x_cost_ind]
opt_ind_pos = np.argmin(costs_pos)
P[zi, xi, 0] = *neigh_coords_pos[opt_ind_pos], neigh_states_pos[opt_ind_pos]
D[zi, xi, 0] = costs_pos[opt_ind_pos]
# state 1: x is dropped
costs_neg = np.array(left_neigh_costs) + drop_costs[x_cost_ind]
opt_ind_neg = np.argmin(costs_neg)
P[zi, xi, 1] = *left_neigh_coords[opt_ind_neg], left_neigh_states[opt_ind_neg]
D[zi, xi, 1] = costs_neg[opt_ind_neg]
cur_state = D[K, N, :].argmin()
min_cost = D[K, N, cur_state]
# backtracking the solution
zi, xi = K, N
path, labels = [], np.zeros(N)
x_dropped = [] if cur_state == 1 else [N]
while not (zi == 0 and xi == 0):
path.append((zi, xi))
zi_prev, xi_prev, prev_state = P[zi, xi, cur_state]
if xi > 0:
labels[xi - 1] = zi * (cur_state == 0) # either zi or 0
if prev_state == 1:
x_dropped.append(xi_prev)
zi, xi, cur_state = zi_prev, xi_prev, prev_state
return min_cost, path, x_dropped, labels
drop_cost = np.percentile(-scores.flatten(), 20)
drop_cost_array = np.ones(len(sents)) * drop_cost
ddtw_results = drop_dtw(-scores.numpy(), drop_cost_array, contiguous=True)
segs = {}
for s in np.unique(ddtw_results[3]):
if s==0:
continue
indexes = np.where(ddtw_results[3] == s)[0] + 1
segs[int(s)] = (min(indexes), max(indexes))
#print("SEGS", segs)
#print("=======================\n")
if args.hr_folder:
human_readable = {}
for i in segs.keys():
print(i)
print(steps)
step_sentences = []
for f in range(segs[i][0], segs[i][1] + 1):
step_sentences.append(sents[f-1])
human_readable[i] = step_sentences
with open(args.hr_folder + id + ".json", "w") as outfile:
json.dump(human_readable, outfile)
segments = dict(reversed(list(segs.items())))
#print("HUMAN READABLE:", human_readable)
else:
print("ERROR!")
return
if args.allow_drops:
sent_embs = sent_model.encode(sents)
sims_arr = [] # [step, sent, sim]
for index in segments.keys():
start, end = segments[index]
#print("INDEX:", index, "STARTEND", start, end)
#print("SENT EMB SHAPE:", sent_embs.shape)
step_emb = step_embs[index - 1]
#print("STEP EMB SHAPE:", step_emb.shape)
sims = []
for i in range(start, end):
#sims.append((index, i, sent_embs[i] @ step_emb))
sims.append(sent_embs[i] @ step_emb)
relative_scores = [(b - max(sims)) / max(sims) for b in sims]
#print(relative_scores)
heuristic = -0.6
start_counter, end_counter = 0, 0
for i in range(1, len(relative_scores)):
if (relative_scores[i] + relative_scores[i-1])/2 < heuristic:
start_counter += 1
else:
break
for j in range(len(relative_scores)-2, 0, -1):
if (relative_scores[j] + relative_scores[j+1])/2 < heuristic:
end_counter += 1
else:
break
#print("INDEX", index, "START,END COUNTER:", start_counter, end_counter)
segments[index] = (start + start_counter, end - end_counter)
print(f"PROCESSED SEGMENT {index}")
#sims_arr.append(sims)
#print(sims_arr)
postprocess_alignment = align_after_postprocess(text, original_text)
# print(segments)
# print(postprocess_alignment)
aligned_segments = {}
sents = list(doc.sents)
#print("====================")
#print("SEGMENTS:", segments)
#print("POSTPROC ALIGN:", postprocess_alignment)
#print("====================")
# print(text)
# print(original_text)
# print(' '.join(original_text['text']))
# print(max(list(postprocess_alignment.keys())), [sents[segments[index][0]].start_char for index in segments], [text[sents[segments[index][0]].start_char:sents[segments[index][1]-1].end_char] for index in segments])
for index in segments:
#print("DEBUG:", segments[index][0], len(sents), sents)
# TEMP FIX
TEMP_FIX_INDEX = segments[index][0]
if segments[index][0] == len(sents):
TEMP_FIX_INDEX -= 1
while str(sents[TEMP_FIX_INDEX]).isspace():
#print(f"===========\n{index}\n==============\n")
segments[index] = (segments[index][0]-1, segments[index][1])
# TEMP FIX:
start = sents[TEMP_FIX_INDEX].start_char
#print("================")
#print("START:", start)
#print("================")
while start not in postprocess_alignment and start < len(text):
start += 1
if start not in postprocess_alignment:
print('A', sents[segments[index][0]])
print('B', text[sents[segments[index][0]].start_char:], sents[segments[index][0]].start_char)
print('C', text)
print('D', ' '.join(original_text['text']))
print(sents[segments[index][0]].start_char, sorted(list(postprocess_alignment.keys()))[-50:])
assert start in postprocess_alignment
end = sents[segments[index][1]-1].end_char-1
while end not in postprocess_alignment and end >= 0:
end -= 1
assert end in postprocess_alignment
#print("=============")
#print("POSTPROC START", postprocess_alignment[start], "POSTPROC END", postprocess_alignment[end])
#print("=============")
aligned_segments[index] = postprocess_alignment[start]+postprocess_alignment[end]
#print("==================")
#print('ALIGNED:', ' '.join(original_text['text'][aligned_segments[index][0]:aligned_segments[index][2]+1]), sents[segments[index][0]:segments[index][1]])
#print("==================")
return aligned_segments
def remove_repeat_ngrams(text_list, min_n=3, max_n=8, return_segment_ids=False):
assert isinstance(text_list, list)
tokens = []
segment_ids = []
for segment_id, segment in enumerate(text_list):
segment_tokens = segment.split()
for token in segment_tokens:
if len(token) > 0:
tokens.append(token)
segment_ids.append(segment_id)
inside_segment = False
num_streak_tokens = 0
new_tokens = []
new_segment_ids = []
indices_added = set()
for i in range(len(tokens)):
redundant = False
for j in range(max_n, min_n-1, -1):
if i+1 >= j*2 and tokens[i+1-j:i+1] == tokens[i+1-j*2:i+1-j]:
# print('here', tokens[i+1-j*2:i+1])
inside_segment = True
num_streak_tokens = min_n
for k in range(1, j):
if i-k in indices_added:
new_tokens.pop()
new_segment_ids.pop()
indices_added.remove(i-k)
redundant = True
break
if not redundant:
new_tokens.append(tokens[i])
indices_added.add(i)
new_segment_ids.append(segment_ids[i])
if return_segment_ids:
return ' '.join(new_tokens), new_segment_ids
return ' '.join(new_tokens)
def process_video(video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, output_queue):
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
#print('here3')
if transcripts is not None:
try:
original = transcripts[video_id]
except:
print(video_id)
return
else:
f = open(os.path.join(args.transcripts_path, video_id+".csv"))
lines = f.readlines()
original = {"text": [], "start": [], "end": []}
for line in lines[1:]:
parts = line.split(',')
original["start"].append(float(parts[0]))
original["end"].append(float(parts[1]))
original["text"].append(parts[-1].strip())
transcript = " ".join(original["text"])
deduplicated_text, new_segment_ids = remove_repeat_ngrams(original["text"], min_n=3, max_n=9, return_segment_ids=True)
deduplicated_tokens = deduplicated_text.split()
original["text"] = [[] for _ in range(len(original["text"]))]
for token, new_id in zip(deduplicated_tokens, new_segment_ids):
original["text"][new_id].append(token)
original["text"] = [" ".join(lst) for lst in original["text"]]
transcript = " ".join(original["text"])
if not args.no_formatting:
if args.formatted_transcripts_path is not None:
fname = os.path.join(args.formatted_transcripts_path, video_id+".txt")
if args.formatted_transcripts_path is not None and os.path.exists(fname):
f = open(fname)
transcript = f.readlines()[0]
else:
transcript = punct_cap_model.add_punctuation_capitalization([transcript])[0]
tokens = tokenizer(transcript)
#print(video_id, len(transcript), len(tokens["input_ids"]))
while len(tokens["input_ids"]) > 1600:
transcript = transcript[:-100]
tokens = tokenizer(transcript)
if args.gen_steps is not None:
steps = args.gen_steps.split("\\n")
elif args.input_steps_path is not None:
if video_id not in input_steps:
return
steps = input_steps[video_id]["steps"]
else:
if video_id in finished:
return
input_text = prompt.replace("|||1", transcript)
steps = []
num_attempts = 0
while len(steps) == 0:
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
num_attempts += 1
steps = output.split("\\n")
if all(["." in step for step in steps[1:]]):
steps = steps[:1]+[step[step.index(".")+1:].strip() for step in steps[1:]]
elif num_attempts < args.max_attempts:
steps = []
output_dict = {"video_id": video_id, "steps": steps, "transcript": transcript}
if not args.no_align:
segments = align_text(transcript, original, steps, sent_model, args.num_workers, args.do_dtw, args.do_drop_dtw, args.dtw_window_size, id=video_id)
#print(segments)
output_dict["segments"] = segments
if isinstance(output_queue, _io.TextIOWrapper):
output_queue.write(json.dumps(output_dict)+'\n')
else:
output_queue.put(json.dumps(output_dict)+'\n')
def output_listener(output_queue, output_filename):
mode = 'a+' if os.path.exists(output_filename) else 'w'
with open(output_filename, 'a+') as fout:
while True:
output = output_queue.get()
if output == 'kill':
break
fout.write(output)
fout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_list_path")
parser.add_argument("--transcripts_path")
parser.add_argument("--formatted_transcripts_path")
parser.add_argument("--start_index", type=int, default=0)
parser.add_argument("--end_index", type=int, default=None)
parser.add_argument("--max_attempts", type=int, default=1)
parser.add_argument("--no_formatting", action="store_true")
parser.add_argument("--output_path")
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--no_align", action="store_true")
parser.add_argument("--input_steps_path", type=str, default=None)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--do_dtw", action="store_true")
parser.add_argument("--dtw_window_size", type=int, default=1000000)
parser.add_argument("--allow_drops", action="store_true")
parser.add_argument("--do_drop_dtw", action="store_true")
parser.add_argument("--drop_cost_pct", default=25)
parser.add_argument("--hr_folder")
parser.add_argument("--gen_steps", type=str, default=None)
args = parser.parse_args()
if not args.no_align:
if args.cpu:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cpu()
else:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
# sent_model = AutoModel.from_pretrained('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
if not args.no_formatting:
punct_cap_model = PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert")
#punct_cap_model = None
if args.cpu:
punct_cap_model = punct_cap_model.cpu()
tokenizer = AutoTokenizer.from_pretrained("gpt2")
f = open(args.video_list_path)
lines = f.readlines()[0]
#print("LINES:", lines)
video_ids = lines.split(",")
#video_ids = [line.strip().split()[0].split('.')[0] for line in lines]
transcripts = None
if args.transcripts_path[-5:] == ".json":
f = open(args.transcripts_path)
transcripts = json.load(f)
if args.end_index is not None:
video_ids = video_ids[:args.end_index]
video_ids = video_ids[args.start_index:]
finished = set()
if os.path.exists(args.output_path):
fout = open(args.output_path)
written_lines = fout.readlines()
fout.close()
for line in written_lines:
try:
datum = json.loads(line)
finished.add(datum['video_id'])
except:
pass
fout = open(args.output_path, 'a')
else:
fout = open(args.output_path, 'w')
input_steps = None
if args.input_steps_path is not None:
f = open(args.input_steps_path)
lines = f.readlines()
input_steps = [json.loads(line) for line in lines]
input_steps = {datum["video_id"]: datum for datum in input_steps}
"""manager = mp.Manager()
q = manager.Queue()
pool = mp.Pool(args.num_workers+2)
watcher = pool.apply_async(output_listener, (q, args.output_path))
print('here1', pool._processes)
jobs = []"""
for video_id in tqdm(video_ids):
if video_id in finished:
continue
# job = pool.apply_async(process_video, (video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, q))
process_video(video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, fout)
# print('here', len(jobs))
# jobs.append(job)
"""for job in jobs:
job.get()
q.put('kill')
pool.close()
pool.join()"""
fout.close()
| [
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | chuyishang/llm-video-understanding | misc~gather_align_steps_orig.py | import os
import string
import json
import torch
import numpy as np
import openai
import random
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModel
#from nemo.collections.nlp.models import PunctuationCapitalizationModel
import argparse
from tqdm import tqdm
import spacy
from sentence_transformers import SentenceTransformer
import multiprocessing as mp
import _io
nlp = spacy.load('en_core_web_sm')
sent_tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2")
def get_next_character(text_list, index1, index2):
if index1 == len(text_list):
return None, index1, index2
if index2 == len(text_list[index1]):
return get_next_character(text_list, index1+1, 0)
if text_list[index1][index2].isspace():
return get_next_character(text_list, index1, index2+1)
return text_list[index1][index2], index1, index2
def align_after_postprocess(postprocessed, original):
index_map = {}
speech_segment_index = 0
within_segment_index = 0
p_index = 0
postprocessed_l = postprocessed # .lower()
while p_index < len(postprocessed_l):
if postprocessed_l[p_index].isspace():
p_index += 1
continue
char, speech_segment_index, within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index)
if char is not None:
_, next_speech_segment_index, next_within_segment_index = get_next_character(original["text"], speech_segment_index, within_segment_index+1)
if postprocessed_l[p_index].upper().lower() == char.upper().lower() or postprocessed_l[p_index:p_index+2].upper().lower() == char.upper().lower():
index_map[p_index] = (speech_segment_index, within_segment_index)
speech_segment_index = next_speech_segment_index
within_segment_index = next_within_segment_index
p_index += 1
return index_map
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def encode_section(sent_model, sents, start, end):
section = ' '.join(sents[start:end])
return {(start, end): sent_model.encode([section])[0]}
def remove_punctuation(text):
new_text = text
for c in string.punctuation:
new_text = new_text.replace(c, '')
return new_text
def align_text(text, original_text, steps, sent_model, num_workers, dtw=True, dtw_window_size=10000000000, dtw_start_offset=False):
doc = nlp(text)
sents = [str(sent) for sent in list(doc.sents)]
steps = steps[:len(sents)]
step_embs = sent_model.encode(steps)
text = text.replace('ı', 'i')
if dtw:
dtw_matrix = np.zeros((len(steps)+1, len(sents)+1, len(sents)+1))
for i in range(len(steps)+1):
for start in range(len(sents)+1):
for end in range(len(sents)+1):
dtw_matrix[i,start,end] = -np.inf
dtw_matrix[0,0,0] = 0
pointers = -1*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.int32)
pointer_scores = -np.inf*np.ones((len(steps)+1, len(sents)+1, len(sents)+1), dtype=np.float32)
start_sent_index = 0
if dtw_start_offset:
single_sent_emb = np.stack([sent_model.encode([sent])[0,:] for sent in sents])
start_scores = (step_embs[:1,:]*single_sent_emb).sum(1)
start_sent_index = min(max(0, start_scores.argmax()-1), len(sents)-len(steps))
dtw_matrix[0,start_sent_index,start_sent_index] = 0
section_emb = {}
if num_workers == 1:
batch = []
for start in range(start_sent_index, len(sents)):
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)):
section = ' '.join(sents[start:end])
batch.append((start, end, section))
if len(batch) == 16 or (start == len(sents)-1 and end == len(sents)):
inputs = [item[-1] for item in batch]
outputs = sent_model.encode(inputs)
for item, output in zip(batch, outputs):
section_emb[item[:2]] = output
batch = []
if len(batch) > 0:
inputs = [item[-1] for item in batch]
outputs = sent_model.encode(inputs)
for item, output in zip(batch, outputs):
section_emb[item[:2]] = output
else:
with mp.Pool(num_workers) as pool:
section_emb_list = pool.starmap(encode_section, [(sent_model, sents, start, end) for start in range(0, len(sents)) for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1))])
for emb_dict in section_emb_list:
section_emb.update(emb_dict)
for i in range(1, len(steps)+1):
for start in range(start_sent_index, len(sents)):
for end in range(start+1, min(start+dtw_window_size+1, len(sents)+1)):
section = ' '.join(sents[start:end])
sentence_emb = section_emb[(start,end)] # sent_model.encode([section])[0]
step_emb = step_embs[i-1] # sent_model.encode([steps[i-1]])[0]
similarity = (sentence_emb*step_emb).sum().item()
best_prev_segment = dtw_matrix[i-1,:,start].argmax().item()
prev_segment_score = dtw_matrix[i-1,:,start].max().item()
# if prev_segment_score > dtw_matrix[i-1,start,end].item():
# pointers[i,start,end] = best_prev_segment
# else:
# pointers[i,start,end] = start
pointers[i,start,end] = best_prev_segment
pointer_scores[i,start,end] = prev_segment_score
last_max = np.max([prev_segment_score]) # , dtw_matrix[i-1,start,end]])
dtw_matrix[i,start,end] = similarity+last_max
# print('good', i, [j for j in range(dtw_matrix.shape[1]) if dtw_matrix[i,j,:].max().item() > -np.inf])
end = dtw_matrix.shape[1]-1
index = dtw_matrix.shape[0]-1
start = dtw_matrix[index,:,end].argmax().item()
print(dtw_matrix[index,:,:end].max().item())
segments = {index: (start, end)}
index -= 1
while index > 0:
# print(index+1, start, end)
new_start = int(pointers[index+1,start,end])
print(pointer_scores[index+1,start,end])
if new_start != start:
end = start
start = new_start
# else:
# print('bad', pointers[index+1,start,end], pointer_scores[index+1,start,end])
segments[index] = (start, end)
index -= 1
print(start_sent_index, segments)
else:
sent_emb = sent_model.encode(sents)
scores = torch.matmul(torch.from_numpy(step_embs), torch.from_numpy(sent_emb).t())
matched_sentences = scores.argmax(dim=-1).tolist()
segments = {}
for i in range(1, len(steps)+1):
print(steps[i-1], '|||', sents[matched_sentences[i-1]])
segments[i] = (max(0, matched_sentences[i-1]-1), min(len(sents), matched_sentences[i-1]+2))
# text_sans_punct = remove_punctuation(text)
# assert text_sans_punct.lower() == ' '.join(original_text['text'])
postprocess_alignment = align_after_postprocess(text, original_text)
# print(segments)
# print(postprocess_alignment)
aligned_segments = {}
sents = list(doc.sents)
# print(text)
# print(original_text)
# print(' '.join(original_text['text']))
# print(max(list(postprocess_alignment.keys())), [sents[segments[index][0]].start_char for index in segments], [text[sents[segments[index][0]].start_char:sents[segments[index][1]-1].end_char] for index in segments])
for index in segments:
while str(sents[segments[index][0]]).isspace():
segments[index] = (segments[index][0]-1, segments[index][1])
start = sents[segments[index][0]].start_char
while start not in postprocess_alignment and start < len(text):
start += 1
if start not in postprocess_alignment:
print('A', sents[segments[index][0]])
print('B', text[sents[segments[index][0]].start_char:], sents[segments[index][0]].start_char)
print('C', text)
print('D', ' '.join(original_text['text']))
print(sents[segments[index][0]].start_char, sorted(list(postprocess_alignment.keys()))[-50:])
assert start in postprocess_alignment
end = sents[segments[index][1]-1].end_char-1
while end not in postprocess_alignment and end >= 0:
end -= 1
assert end in postprocess_alignment
aligned_segments[index] = postprocess_alignment[start]+postprocess_alignment[end]
print('aligned', ' '.join(original_text['text'][aligned_segments[index][0]:aligned_segments[index][2]+1]), sents[segments[index][0]:segments[index][1]])
return aligned_segments
def remove_repeat_ngrams(text_list, min_n=3, max_n=8, return_segment_ids=False):
assert isinstance(text_list, list)
tokens = []
segment_ids = []
for segment_id, segment in enumerate(text_list):
segment_tokens = segment.split()
for token in segment_tokens:
if len(token) > 0:
tokens.append(token)
segment_ids.append(segment_id)
inside_segment = False
num_streak_tokens = 0
new_tokens = []
new_segment_ids = []
indices_added = set()
for i in range(len(tokens)):
redundant = False
for j in range(max_n, min_n-1, -1):
if i+1 >= j*2 and tokens[i+1-j:i+1] == tokens[i+1-j*2:i+1-j]:
# print('here', tokens[i+1-j*2:i+1])
inside_segment = True
num_streak_tokens = min_n
for k in range(1, j):
if i-k in indices_added:
new_tokens.pop()
new_segment_ids.pop()
indices_added.remove(i-k)
redundant = True
break
if not redundant:
new_tokens.append(tokens[i])
indices_added.add(i)
new_segment_ids.append(segment_ids[i])
if return_segment_ids:
return ' '.join(new_tokens), new_segment_ids
return ' '.join(new_tokens)
def process_video(video_id, args, input_steps, transcripts, tokenizer, output_queue, punct_cap_model=None):
prompt = "Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
print('here3')
# RUN IF TRANSCRIPTS HAS BEEN PASSED IN
if transcripts is not None:
original = transcripts[video_id]
else:
# OPEN TRANSCRIPTS PATH, QUERY USING video_id.csv
f = open(os.path.join(args.transcripts_path, video_id+".csv"))
# READ LINES OF TRANSCRIPT
lines = f.readlines()
# TEXT - ALL CAPTIONS OF VIDEO ID, START - ALL START TIMESTAMPS, END - ALL END TIMESTAMPS
original = {"text": [], "start": [], "end": []}
for line in lines[1:]:
parts = line.split(',')
original["start"].append(float(parts[0]))
original["end"].append(float(parts[1]))
original["text"].append(parts[-1].strip())
# TRANSCRIPT - JOINED TEXT OF ALL CAPTIONS
transcript = " ".join(original["text"])
# DEDUPLICATES THE TEXT FOR REPEATED CAPTIONS
deduplicated_text, new_segment_ids = remove_repeat_ngrams(original["text"], min_n=3, max_n=9, return_segment_ids=True)
deduplicated_tokens = deduplicated_text.split()
# RESETS 'TEXT' VARIABLES
original["text"] = [[] for _ in range(len(original["text"]))]
# ADD SEGMENT IDS FOR DEDUPLICATED TEXT, APENDS THEM
for token, new_id in zip(deduplicated_tokens, new_segment_ids):
original["text"][new_id].append(token)
# COMBINES DEDUPLICATED TEXT
original["text"] = [" ".join(lst) for lst in original["text"]]
transcript = " ".join(original["text"])
# DEALS WITH FORMATTING OPTIONS
if not args.no_formatting:
# ADDS CAPITALIZATION AND FORMATS TRANSCRIPT??
if args.formatted_transcripts_path is not None:
fname = os.path.join(args.formatted_transcripts_path, video_id+".txt")
if args.formatted_transcripts_path is not None and os.path.exists(fname):
f = open(fname)
transcript = f.readlines()[0]
else:
transcript = punct_cap_model.add_punctuation_capitalization([transcript])[0]
# TOKENIZES TRANSCRIPT
tokens = tokenizer(transcript)
print(video_id, len(transcript), len(tokens["input_ids"]))
# ENSURES TOKEN LENGTH IS LESS THAN MAX TOKEN LENGTH (1600)
while len(tokens["input_ids"]) > 1600:
transcript = transcript[:-100]
tokens = tokenizer(transcript)
# ARGS INPUT STEPS - TAKES IN INPUT STEPS?? NOT SURE IF NECESSARY
if args.input_steps_path is not None:
if video_id not in input_steps:
return
steps = input_steps[video_id]["steps"]
else:
# MAKES OPENAI API CALL
if video_id in finished:
return
input_text = prompt.replace("|||1", transcript)
steps = []
num_attempts = 0
while len(steps) == 0:
response = openai.Completion.create(
engine="text-babbage-001",
prompt=input_text,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output = response["choices"][0]["text"].strip()
num_attempts += 1
steps = output.split("\n")
if all(["." in step for step in steps[1:]]):
steps = steps[:1]+[step[step.index(".")+1:].strip() for step in steps[1:]]
elif num_attempts < args.max_attempts:
steps = []
output_dict = {"video_id": video_id, "steps": steps, "transcript": transcript}
# DEALS WITH THE DROP DTW CODE
if not args.no_align:
segments = align_text(transcript, original, steps, sent_model, args.num_workers, not args.no_dtw, args.dtw_window_size)
print(segments)
output_dict["segments"] = segments
if isinstance(output_queue, _io.TextIOWrapper):
output_queue.write(json.dumps(output_dict)+'\n')
else:
output_queue.put(json.dumps(output_dict)+'\n')
def output_listener(output_queue, output_filename):
mode = 'a+' if os.path.exists(output_filename) else 'w'
with open(output_filename, 'a+') as fout:
while True:
output = output_queue.get()
if output == 'kill':
break
fout.write(output)
fout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_list_path")
parser.add_argument("--transcripts_path")
parser.add_argument("--formatted_transcripts_path")
parser.add_argument("--start_index", type=int, default=0)
parser.add_argument("--end_index", type=int, default=None)
parser.add_argument("--max_attempts", type=int, default=1)
parser.add_argument("--no_formatting", action="store_true")
parser.add_argument("--output_path")
parser.add_argument("--cpu", action="store_true")
parser.add_argument("--no_align", action="store_true")
parser.add_argument("--input_steps_path", type=str, default=None)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--no_dtw", action="store_true")
parser.add_argument("--dtw_window_size", type=int, default=1000000)
args = parser.parse_args()
if not args.no_align:
if args.cpu:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cpu()
else:
sent_model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
# sent_model = AutoModel.from_pretrained('sentence-transformers/paraphrase-mpnet-base-v2').cuda()
'''
if not args.no_formatting:
punct_cap_model = PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert")
if args.cpu:
punct_cap_model = punct_cap_model.cpu()
'''
tokenizer = AutoTokenizer.from_pretrained("gpt2")
f = open(args.video_list_path)
#video_ids = [line.strip().split()[0].split('.')[0] for line in lines]
content = f.read()
print("CONTENT:", content)
video_ids = content.split(",")
print("VIDEO_IDS:", video_ids)
transcripts = None
if args.transcripts_path[-5:] == ".json":
f = open(args.transcripts_path)
transcripts = json.load(f)
if args.end_index is not None:
video_ids = video_ids[:args.end_index]
video_ids = video_ids[args.start_index:]
finished = set()
if os.path.exists(args.output_path):
fout = open(args.output_path)
written_lines = fout.readlines()
fout.close()
for line in written_lines:
try:
datum = json.loads(line)
finished.add(datum['video_id'])
except:
pass
fout = open(args.output_path, 'a')
else:
fout = open(args.output_path, 'w')
input_steps = None
if args.input_steps_path is not None:
f = open(args.input_steps_path)
lines = f.readlines()
input_steps = [json.loads(line) for line in lines]
input_steps = {datum["video_id"]: datum for datum in input_steps}
"""manager = mp.Manager()
q = manager.Queue()
pool = mp.Pool(args.num_workers+2)
watcher = pool.apply_async(output_listener, (q, args.output_path))
print('here1', pool._processes)
jobs = []"""
for video_id in tqdm(video_ids):
if video_id in finished:
continue
# job = pool.apply_async(process_video, (video_id, args, input_steps, transcripts, tokenizer, punct_cap_model, q))
process_video(video_id, args, input_steps, transcripts, tokenizer, fout)
# print('here', len(jobs))
# jobs.append(job)
"""for job in jobs:
job.get()
q.put('kill')
pool.close()
pool.join()"""
fout.close()
| [
"Write the steps of the task that the person is demonstrating, based on the noisy transcript.\nTranscript: |||1\nSteps:\n1."
] |
2024-01-10 | modulabs-ctrl/pommerman-2018 | ctrl~cli~train_ppo_with_rnd.py | # -*- coding:utf-8 -*-
import warnings
warnings.filterwarnings("always")
warnings.filterwarnings("ignore")
"""Train an agent with TensorForce.
python ctrl/cli/train_ppo_with_rnd.py \
--agents=tensorforce::ppo,test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent \
--num_of_episodes=1000 --max_timesteps=1000 --config=PommeFFAFast-v0 --render --simulation 2000
"""
import atexit
import functools
import os, sys
import argparse
import docker
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
import gym
sys.path.append('.')
from pommerman import helpers, make
from pommerman.agents import TensorForceAgent
from ctrl.agents import TensorForcePpoAgent
from collections import deque
from copy import deepcopy
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
from torch.utils.data import DataLoader
SEED = 5
BATCH_SIZE = 255
LR = 0.0001
LE = 1e-5
SIZE = 372
EPS = np.finfo(np.float).eps
INTRINSIC_CLIP=0.0001
# set device
use_cuda = torch.cuda.is_available()
print('cuda:', use_cuda)
device = torch.device('cuda' if use_cuda else 'cpu')
# random seed
np.random.seed(SEED)
torch.manual_seed(SEED)
if use_cuda:
torch.cuda.manual_seed_all(SEED)
# ppo settings
CLIENT = docker.from_env()
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
class RandomNet(nn.Module):
def __init__(self, obs_space):
super().__init__()
self.head = nn.Sequential(
nn.Linear(obs_space, SIZE),
nn.SELU()
)
self.fc = nn.Sequential(
nn.Linear(SIZE, SIZE*2)
)
def forward(self, x):
out = self.head(x)
obs_feature = self.fc(out).reshape(out.shape[0], -1)
return obs_feature
class PredictNet(nn.Module):
def __init__(self, obs_space):
super().__init__()
self.head = nn.Sequential(
nn.Linear(obs_space, SIZE),
nn.SELU()
)
self.fc = nn.Sequential(
nn.Linear(SIZE, SIZE),
nn.SELU(),
nn.Linear(SIZE, SIZE),
nn.SELU(),
nn.Linear(SIZE, SIZE*2)
)
def forward(self, x):
out = self.head(x)
obs_feature = self.fc(out).reshape(out.shape[0], -1)
return obs_feature
class WrappedEnv(OpenAIGym):
'''An Env Wrapper used to make it easier to work
with multiple agents'''
def __init__(self, gym, visualize=False):
self.gym = gym
self.action_space = gym.action_space.n
self.visualize = visualize
self.timestep = 0
self.global_ts = 0
self.episode = 0
self.res_reward = 0.0
self.render = False
# curiosity properties
self.obs_space = gym.observation_space.shape[0]
self.threshold_of_simulation = -1
self.simulated = 0
self.obs_memory = []
self.max_timesteps = 0
self.rep_memory = None
self.mean = 0.0
self.std = 0.0
self.pred_net = PredictNet(self.obs_space).to(device)
self.rand_net = RandomNet(self.obs_space).to(device)
self.pred_optim = torch.optim.Adam(self.pred_net.parameters(), lr=LR, eps=LE)
def set_render(self, render):
self.render = render
def set_simulation(self, simulation):
self.threshold_of_simulation = int(simulation)
def set_max_timesteps(self, max_timesteps):
self.max_timesteps = max_timesteps
self.reset_replay_memory()
def reset_replay_memory(self):
self.rep_memory = deque(maxlen=self.max_timesteps)
def get_norm_params_old(self, obs_memory):
obses = [[] for _ in range(self.obs_space)]
for obs in obs_memory:
for j in range(self.obs_space):
obses[j].append(obs[j])
mean = np.zeros(self.obs_space, np.float32)
std = np.zeros(self.obs_space, np.float32)
for i, obs_ in enumerate(obses):
mean[i] = np.mean(obs_)
std[i] = np.std(obs_)
print("get_norm_params : {}, {}".format(mean, std))
return mean, std
def get_norm_params(self, obs_memory):
global obs_apace
obses = [[] for _ in range(self.obs_space)]
for obs in obs_memory:
for j in range(self.obs_space):
obses[j].append(obs[j])
mean = np.zeros(self.obs_space, 'float')
std = np.zeros(self.obs_space, 'float')
for i, obs_ in enumerate(obses):
mean[i] = np.mean(obs_)
std[i] = np.std(obs_)
obs_memory.clear()
std = np.clip(std, a_min=EPS, a_max=None)
print("mean:'{}', std:'{}'".format(mean, std))
return mean, std
def normalize_obs(self, label, obs, mean, std):
means = [mean for _ in range(BATCH_SIZE)]
stds = [std for _ in range(BATCH_SIZE)]
mean = np.stack(means)
std = np.stack(stds)
norm_obs = (obs - mean) / std
return np.clip(norm_obs, -1, 1)
def calculate_reward_in(self, pred_net, rand_net, obs):
norm_obs = self.normalize_obs("calculate", obs, self.mean, self.std)
state = torch.tensor([norm_obs]).to(device).float()
with torch.no_grad():
pred_obs = pred_net(state)
rand_obs = rand_net(state)
reward = (pred_obs - rand_obs).pow(2).sum()
clipped_reward = torch.clamp(reward, -1.0 * INTRINSIC_CLIP, 1.0 * INTRINSIC_CLIP)
return clipped_reward.item()
def execute(self, action):
if self.visualize:
self.gym.render()
obs = self.gym.get_observations()
all_actions = self.gym.act(obs)
all_actions.insert(self.gym.training_agent, action)
state, reward, terminal, _ = self.gym.step(all_actions)
# state of t+1
agent_state = self.gym.featurize(state[self.gym.training_agent])
extrinsic_reward = reward[self.gym.training_agent]
intrinsic_reward = 0.0
# simulation
if self.simulated < self.threshold_of_simulation:
self.obs_memory.append(agent_state)
self.simulated += 1
elif self.simulated == self.threshold_of_simulation:
print("##### simulation has started #####")
self.mean, self.std = self.get_norm_params(self.obs_memory)
print("mean:{}, \n std:{} \n".format(self.mean, self.std))
self.simulated += 1
self.obs_memory = None
else:
intrinsic_reward = self.calculate_reward_in(self.pred_net, self.rand_net, agent_state)
if np.isnan(intrinsic_reward):
intrinsic_reward = 0.0
# else:
# print("##### intrinsic_reward {} #####".format(intrinsic_reward))
self.timestep += 1
self.rep_memory.append(agent_state)
agent_reward = extrinsic_reward + intrinsic_reward
if self.timestep % 100 == 0:
print("ts[{}/{}]: ext:'{}' + int:'{}' = rew:'{}'".format(self.timestep, self.global_ts, extrinsic_reward, intrinsic_reward, agent_reward))
self.res_reward += agent_reward
return agent_state, terminal, agent_reward
def learn(self):
self.pred_net.train()
self.rand_net.train()
dataloader = DataLoader(
self.rep_memory,
shuffle=True,
batch_size=BATCH_SIZE,
pin_memory=use_cuda
)
# training pred_net -- batch size 가 4라서 agent_state 는 4개의 튜플이다.
for i, agent_state in enumerate(dataloader):
obs = agent_state.detach().cpu().numpy()
if obs.shape[0] != BATCH_SIZE: # shape 가 안 맞는 경우는 skip
continue
norm_state = self.normalize_obs("training", obs, self.mean, self.std)
norm_batch = torch.tensor(norm_state).to(device).float()
if i == 0:
print("origi_norm: {}".format(obs[0]))
print("norm_state: {}".format(norm_state))
print("norm_batch: {}".format(norm_batch))
pred_features = self.pred_net(norm_batch)
rand_features = self.rand_net(norm_batch)
f_loss = (pred_features - rand_features).pow(2).sum(dim=1).mean()
if i == 0:
print("pred_features: {}".format(pred_features))
print("rand_features: {}".format(rand_features))
print("loss_features: {}".format(pred_features - rand_features))
self.pred_optim.zero_grad()
f_loss.backward()
self.pred_optim.step()
'''Reset method is called when every episode starts'''
def reset(self):
if self.simulated > self.threshold_of_simulation:
self.learn()
self.global_ts += self.timestep
self.reset_replay_memory()
print(f'Episode [{self.episode:03}], Timestep [{self.timestep:03}/{self.global_ts}] reward {round(self.res_reward,2)}')
obs = self.gym.reset()
agent_obs = self.gym.featurize(obs[3])
self.timestep = 0
self.episode += 1
return agent_obs
def create_ppo_agent(agent):
if type(agent) == TensorForceAgent:
print("create_ppo_agent({})".format(agent))
return TensorForcePpoAgent()
return agent
def main():
'''CLI interface to bootstrap taining'''
parser = argparse.ArgumentParser(description="Playground Flags.")
parser.add_argument("--game", default="pommerman", help="Game to choose.")
parser.add_argument(
"--config",
default="PommeFFACompetition-v0",
help="Configuration to execute. See env_ids in "
"configs.py for options.")
parser.add_argument(
"--agents",
default="tensorforce::ppo,test::agents.SimpleAgent,"
"test::agents.SimpleAgent,test::agents.SimpleAgent",
help="Comma delineated list of agent types and docker "
"locations to run the agents.")
parser.add_argument(
"--agent_env_vars",
help="Comma delineated list of agent environment vars "
"to pass to Docker. This is only for the Docker Agent."
" An example is '0:foo=bar:baz=lar,3:foo=lam', which "
"would send two arguments to Docker Agent 0 and one to"
" Docker Agent 3.",
default="")
parser.add_argument(
"--record_pngs_dir",
default=None,
help="Directory to record the PNGs of the game. "
"Doesn't record if None.")
parser.add_argument(
"--record_json_dir",
default=None,
help="Directory to record the JSON representations of "
"the game. Doesn't record if None.")
parser.add_argument(
"--render",
default=False,
action='store_true',
help="Whether to render or not. Defaults to False.")
parser.add_argument(
"--game_state_file",
default=None,
help="File from which to load game state. Defaults to "
"None.")
parser.add_argument(
"--checkpoint",
default="models/ppo",
help="Directory where checkpoint file stored to."
)
parser.add_argument(
"--num_of_episodes",
default="10",
help="Number of episodes"
)
parser.add_argument(
"--max_timesteps",
default="2000",
help="Number of steps"
)
parser.add_argument(
"--simulation",
default=False,
help="Number of simulations"
)
args = parser.parse_args()
config = args.config
game_state_file = args.game_state_file
checkpoint = args.checkpoint
num_of_episodes = int(args.num_of_episodes)
max_timesteps = int(args.max_timesteps)
simulation = args.simulation
# TODO: After https://github.com/MultiAgentLearning/playground/pull/40
# this is still missing the docker_env_dict parsing for the agents.
agents = [
create_ppo_agent(helpers.make_agent_from_string(agent_string, agent_id + 1000))
for agent_id, agent_string in enumerate(args.agents.split(","))
]
env = make(config, agents, game_state_file)
training_agent = None
training_agent_id = None
for agent in agents:
if type(agent) == TensorForcePpoAgent:
print("Ppo agent initiazlied : {}, {}".format(agent, type(agent)))
training_agent = agent
env.set_training_agent(agent.agent_id)
training_agent_id = agent.agent_id
break
print("[{}] : id[{}]".format(agent, agent.agent_id))
if args.record_pngs_dir:
assert not os.path.isdir(args.record_pngs_dir)
os.makedirs(args.record_pngs_dir)
if args.record_json_dir:
assert not os.path.isdir(args.record_json_dir)
os.makedirs(args.record_json_dir)
learning_agent = training_agent.initialize(env)
for agent in agents:
if type(agent) == TensorForcePpoAgent:
if agent.agent_id == training_agent_id:
learning_agent = training_agent.initialize(env)
else:
agent.initialize(env)
atexit.register(functools.partial(clean_up_agents, agents))
wrapped_env = WrappedEnv(env, visualize=args.render)
wrapped_env.set_render(args.render)
wrapped_env.set_simulation(simulation)
wrapped_env.set_max_timesteps(max_timesteps)
runner = Runner(agent=learning_agent, environment=wrapped_env)
runner.run(episodes=num_of_episodes, max_episode_timesteps=max_timesteps)
print("Stats: ",
runner.episode_rewards[-30:],
runner.episode_timesteps,
runner.episode_times)
learning_agent.save_model(checkpoint)
rewards = runner.episode_rewards
import numpy as np
mean = np.mean(rewards)
print('last 30 rewards {}'.format(rewards[-30:]))
print('mean of rewards {}'.format(mean))
try:
runner.close()
except AttributeError as e:
print(e)
pass
if __name__ == "__main__":
main() | [] |
2024-01-10 | modulabs-ctrl/pommerman-2018 | ctrl~cli~train_pommerman.py | # -*- coding:utf-8 -*-
import warnings
warnings.filterwarnings("always")
warnings.filterwarnings("ignore")
"""Train an agent with TensorForce.
Call this with a config, a game, and a list of agents, one of which should be a
tensorforce agent. The script will start separate threads to operate the agents
and then report back the result.
An example with all three simple agents running ffa:
python train_with_tensorforce.py \
--agents=tensorforce::ppo,test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent \
--config=PommeFFACompetition-v0
"""
import atexit
import functools
import os, sys
import argparse
import docker
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
import gym
sys.path.append('.')
from pommerman import helpers, make
from pommerman.agents import TensorForceAgent
from ctrl.agents import TensorForcePpoAgent
CLIENT = docker.from_env()
DEFAULT_REWARDS = [
1.01, # 0. 승리
-1.01, # 1. 패배
0.01, # 2. 무승부
-0.01, # 3. 아무 행동도 하지 않으면 타임 스텝마다
0.01, # 4. 폭탄을 설치하면 해당 타임스텝
0.01, # 5. 계속 움직이면 해당 타임스텝
0.01, # 6. 아이템 (폭탄범위) 최초로 먹으면
0.01, # 7. 아이템 (킥) 최초로 먹으면
0.01 # 8. 아이템 (탄창) 최초로 먹으면
]
RES_WIN = 0
RES_LOSE = 1
RES_DRAW = 2
ACT_SLEEP = 3
ACT_BOMB = 4
ACT_OTHER = 5
ITEM_BLAST = 6
ITEM_KICK = 7
ITEM_AMMO = 8
STR_WINNER='Winner' # :thumbs_up_light_skin_tone:'
STR_LOSER='Loser' # :thumbs_down_light_skin_tone:'
STR_SLEEP='Sleep'
STR_STAY='Stay'
STR_UP='Up'
STR_LEFT='Left'
STR_DOWN='Down'
STR_RIGHT='Right'
STR_BOMBSET='BombSet' # :bomb:'
STR_BLAST='ItemBlast' # :cookie:'
STR_KICK='ItemKick' # :egg:'
STR_AMMO='ItemAmmo' # :rice:'
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
class WrappedEnv(OpenAIGym):
'''An Env Wrapper used to make it easier to work
with multiple agents'''
def __init__(self, gym, visualize=False):
self.gym = gym
self.visualize = visualize
self.old_position = None
self.prev_position = None
self.curr_position = None
self.timestep = 0
self.episode = 0
self.has_blast_strength = False
self.has_can_kick = False
self.has_ammo = False
self.tmp_reward = 0.0
self.res_reward = 0.0
self.accu_bombset = 1.0
self.act_history = []
self.render = False
self.rewards = DEFAULT_REWARDS
print(f'Episode [{self.episode:03}], Timestep [{self.timestep:03}] initialized.')
def set_render(self, render):
self.render = render
def set_rewards(self, custom_rewards):
self.rewards = [ float(reward.strip()) for reward in custom_rewards.split(',') ]
print(self.rewards)
def shaping_reward(self, agent_id, agent_obs, agent_reward, agent_action):
import emoji
import numpy as np
self.timestep += 1
self.agent_board = agent_obs['board']
self.curr_position = np.where(self.agent_board == agent_id)
self.tmp_reward = 0.0
actions = []
if agent_reward == 1:
actions.append(emoji.emojize(STR_WINNER))
self.tmp_reward += self.rewards[RES_WIN]
if agent_reward == -1:
actions.append(emoji.emojize(STR_LOSER))
self.tmp_reward += self.rewards[RES_LOSE]
if agent_reward == 0:
# actions.append("Draw")
self.tmp_reward += self.rewards[RES_DRAW]
if self.prev_position != None and self.prev_position == self.curr_position and self.old_position == self.prev_position:
actions.append(emoji.emojize(STR_SLEEP))
self.tmp_reward += self.rewards[ACT_SLEEP]
elif agent_action == 0:
actions.append(emoji.emojize(STR_STAY))
self.tmp_reward += self.rewards[ACT_OTHER]
elif agent_action == 1:
actions.append(emoji.emojize(STR_UP))
self.tmp_reward += self.rewards[ACT_OTHER]
elif agent_action == 2:
actions.append(emoji.emojize(STR_LEFT))
self.tmp_reward += self.rewards[ACT_OTHER]
elif agent_action == 3:
actions.append(emoji.emojize(STR_DOWN))
self.tmp_reward += self.rewards[ACT_OTHER]
elif agent_action == 4:
actions.append(emoji.emojize(STR_RIGHT))
self.tmp_reward += self.rewards[ACT_OTHER]
elif agent_action == 5:
actions.append(emoji.emojize(STR_BOMBSET))
self.tmp_reward += self.rewards[ACT_BOMB] * self.accu_bombset
self.accu_bombset += 0.2
if not self.has_blast_strength and int(agent_obs['blast_strength']) > 2:
actions.append(emoji.emojize(STR_BLAST))
self.tmp_reward += self.rewards[ITEM_BLAST]
self.has_blast_strength = True
if not self.has_can_kick and agent_obs['can_kick'] == True:
actions.append(emoji.emojize(STR_KICK))
self.tmp_reward += self.rewards[ITEM_KICK]
self.has_can_kick = True
if not self.has_ammo and int(agent_obs['ammo']) > 1:
actions.append(emoji.emojize(STR_AMMO))
self.tmp_reward += self.rewards[ITEM_AMMO]
self.has_ammo = True
self.res_reward += self.tmp_reward
self.act_history += actions
# 렌더링 하는 경우에만 자세한 리워드를 출력한다.
if self.render:
print(f'Episode [{self.episode:03}], Timestep [{self.timestep:03}] got reward {round(self.res_reward, 2)} [{actions}]')
self.old_position = self.prev_position
self.prev_position = self.curr_position
# feature 네트워크
return self.tmp_reward
def execute(self, action):
if self.visualize:
self.gym.render()
obs = self.gym.get_observations()
all_actions = self.gym.act(obs)
all_actions.insert(self.gym.training_agent, action)
state, reward, terminal, _ = self.gym.step(all_actions)
agent_state = self.gym.featurize(state[self.gym.training_agent])
agent_id = self.gym.training_agent + 10
agent_reward = reward[self.gym.training_agent]
agent_action = all_actions[self.gym.training_agent]
agent_obs = obs[self.gym.training_agent]
modified_reward = self.shaping_reward(agent_id, agent_obs, agent_reward, agent_action)
return agent_state, terminal, modified_reward
'''Reset method is called when every episode starts'''
def reset(self):
hist = self.act_history
item_count = hist.count(STR_AMMO) + hist.count(STR_BLAST) + hist.count(STR_KICK)
bomb_count = hist.count(STR_BOMBSET)
move_count = hist.count(STR_UP) + hist.count(STR_DOWN) + hist.count(STR_LEFT) + hist.count(STR_RIGHT)
stop_count = hist.count(STR_SLEEP) + hist.count(STR_STAY)
history = "BombSet({}), ItemGot({}), Move({}), Stay({})".format(bomb_count, item_count, move_count, stop_count)
print(f'Episode [{self.episode:03}], Timestep [{self.timestep:03}] reward {round(self.res_reward,2)} history {history}.')
obs = self.gym.reset()
agent_obs = self.gym.featurize(obs[3])
self.timestep = 0
self.episode += 1
self.tmp_reward = 0.0
self.res_reward = 0.0
self.accu_bombset = 1.0
self.act_history = []
self.has_blast_strength = False
self.has_can_kick = False
self.has_ammo = False
return agent_obs
def create_ppo_agent(agent):
if type(agent) == TensorForceAgent:
print("create_ppo_agent({})".format(agent))
return TensorForcePpoAgent()
return agent
def main():
'''CLI interface to bootstrap taining'''
parser = argparse.ArgumentParser(description="Playground Flags.")
parser.add_argument("--game", default="pommerman", help="Game to choose.")
parser.add_argument(
"--config",
default="PommeFFACompetition-v0",
help="Configuration to execute. See env_ids in "
"configs.py for options.")
parser.add_argument(
"--agents",
default="tensorforce::ppo,test::agents.SimpleAgent,"
"test::agents.SimpleAgent,test::agents.SimpleAgent",
help="Comma delineated list of agent types and docker "
"locations to run the agents.")
parser.add_argument(
"--agent_env_vars",
help="Comma delineated list of agent environment vars "
"to pass to Docker. This is only for the Docker Agent."
" An example is '0:foo=bar:baz=lar,3:foo=lam', which "
"would send two arguments to Docker Agent 0 and one to"
" Docker Agent 3.",
default="")
parser.add_argument(
"--record_pngs_dir",
default=None,
help="Directory to record the PNGs of the game. "
"Doesn't record if None.")
parser.add_argument(
"--record_json_dir",
default=None,
help="Directory to record the JSON representations of "
"the game. Doesn't record if None.")
parser.add_argument(
"--render",
default=False,
action='store_true',
help="Whether to render or not. Defaults to False.")
parser.add_argument(
"--game_state_file",
default=None,
help="File from which to load game state. Defaults to "
"None.")
parser.add_argument(
"--checkpoint",
default="models/ppo",
help="Directory where checkpoint file stored to."
)
parser.add_argument(
"--num_of_episodes",
default="10",
help="Number of episodes"
)
parser.add_argument(
"--max_timesteps",
default="2000",
help="Number of steps"
)
parser.add_argument(
"--rewards",
default=DEFAULT_REWARDS,
help="Shaping of rewards"
)
args = parser.parse_args()
config = args.config
# record_pngs_dir = args.record_pngs_dir
# record_json_dir = args.record_json_dir
# agent_env_vars = args.agent_env_vars
game_state_file = args.game_state_file
checkpoint = args.checkpoint
num_of_episodes = int(args.num_of_episodes)
max_timesteps = int(args.max_timesteps)
custom_rewards = args.rewards
# TODO: After https://github.com/MultiAgentLearning/playground/pull/40
# this is still missing the docker_env_dict parsing for the agents.
agents = [
create_ppo_agent(helpers.make_agent_from_string(agent_string, agent_id + 1000))
for agent_id, agent_string in enumerate(args.agents.split(","))
]
env = make(config, agents, game_state_file)
training_agent = None
training_agent_id = None
for agent in agents:
if type(agent) == TensorForcePpoAgent:
print("Ppo agent initiazlied : {}, {}".format(agent, type(agent)))
training_agent = agent
env.set_training_agent(agent.agent_id)
training_agent_id = agent.agent_id
break
print("[{}] : id[{}]".format(agent, agent.agent_id))
if args.record_pngs_dir:
assert not os.path.isdir(args.record_pngs_dir)
os.makedirs(args.record_pngs_dir)
if args.record_json_dir:
assert not os.path.isdir(args.record_json_dir)
os.makedirs(args.record_json_dir)
learning_agent = training_agent.initialize(env)
for agent in agents:
if type(agent) == TensorForcePpoAgent:
if agent.agent_id == training_agent_id:
learning_agent = training_agent.initialize(env)
else:
agent.initialize(env)
atexit.register(functools.partial(clean_up_agents, agents))
wrapped_env = WrappedEnv(env, visualize=args.render)
wrapped_env.set_render(args.render)
wrapped_env.set_rewards(custom_rewards)
runner = Runner(agent=learning_agent, environment=wrapped_env)
runner.run(episodes=num_of_episodes, max_episode_timesteps=max_timesteps)
print("Stats: ",
runner.episode_rewards[-30:],
runner.episode_timesteps,
runner.episode_times)
learning_agent.save_model(checkpoint)
rewards = runner.episode_rewards
import numpy as np
mean = np.mean(rewards)
print('last 30 rewards {}'.format(rewards[-30:]))
print('mean of rewards {}'.format(mean))
try:
runner.close()
except AttributeError as e:
print(e)
pass
if __name__ == "__main__":
main() | [] |
2024-01-10 | ROBOBREIZH/robobreizh_dialog | NLP_Server~GPT-3~test_gpt3.py | import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
| [] |
2024-01-10 | Rustemhak/nanozymes_ai | ai_talks~src~utils~helpers.py | import base64
import os
import random
from pathlib import Path
from typing import List, Tuple
import requests
import streamlit as st
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
from src.utils.YandexGPT import YandexLLM
import openai
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.document_loaders import PyPDFium2Loader
api_key = os.environ['api_key']
folder_id = "b1g6krtrd2vcbunvjpg6"
# openai.api_key = os.environ.get('OPENAI_API_KEY', None)
# llm_model = 'gpt-4'
# chat = ChatOpenAI(temperature=0.0, model=llm_model, max_tokens=1024)
# embeddings = OpenAIEmbeddings()
def render_svg(svg: Path) -> str:
"""Renders the given svg string."""
with open(svg) as file:
b64 = base64.b64encode(file.read().encode("utf-8")).decode("utf-8")
return f"<img src='data:image/svg+xml;base64,{b64}'/>"
def get_files_in_dir(path: Path) -> List[str]:
files = []
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
files.append(file)
return files
def get_random_img(img_names: list[str]) -> str:
return random.choice(img_names)
def input_fields(*argv):
match argv:
case ("K_m",):
K_m = st.text_input(r"Введите значение $$K_m$$ в диапазоне от 0 до 3820 mM")
return K_m, None
case ("V_max",):
V_max = st.text_input(r"Введите значение $$V_{max}$$ в диапазоне от 0 до 59885 $$\frac{mM}{s}$$")
return None, V_max
case ("K_m", "V_max"):
K_m = st.text_input(r"Введите значение $$K_m$$ в диапазоне от 0 до 3820 mM")
V_max = st.text_input(r"Введите значение $$V_{max}$$ в диапазоне от 0 до 59885 $$\frac{mM}{s}$$")
return K_m, V_max
case _:
return None, None
def get_search_data(df: pd.DataFrame, K_m: str, V_max: str):
"""Function for search data in csv file for given K_m and V_max
Args:
K_m (str): field K_m in csv file
V_max (str): field V_max in csv file
Returns:
str: search most similar strings from csv file
"""
distance = None
match K_m, V_max:
case (_, None):
# print("K_m", K_m)
distance = lambda value: abs(float(value['Km, mM']) - float(K_m))
case (None, _):
# print("V_max", V_max)
distance = lambda value: abs(float(value['Vmax, mM/s']) - float(V_max))
case _:
# print("K_m, V_max", K_m, V_max)
# distance = lambda value: abs(float(value['Km, mM']) - float(K_m))
distance = lambda value: np.sqrt((float(value['Vmax, mM/s']) - float(V_max)) ** 2 + (float(value['Km, mM']) - float(K_m)) ** 2)
# distance = lambda value: abs(float(value['Km, mM']) - float(K_m)) + abs(float(value['Vmax, mM/s']) - float(V_max))
# Создаем список для хранения расстояний между значениями и целевыми значениями
distances = []
# Проходимся по каждому значению в данных
for _, value in df.iterrows():
# Вычисляем расстояние между значениями K_m и V_max
try:
# print("value", value)
# print("value['Vmax, mM/s']", value['Vmax, mM/s'])
_distance = distance(value)
# Добавляем расстояние и значение в список distances
distances.append((_distance, value))
except ValueError:
continue
# Сортируем список distances по расстоянию в порядке возрастания
distances.sort(key=lambda x: x[0])
# Возвращаем топ N наиболее похожих значений
return [(distance, value) for distance, value in distances[:5]]
# def get_search_data(df: pd.DataFrame, K_m: str, V_max: str) -> List[pd.DataFrame]:
# # Преобразуем K_m и V_max в числа
# K_m = float(K_m) if K_m else None
# V_max = float(V_max) if V_max else None
# # Извлекаем значения K_m и V_max из данных
# data = df[['Km, mM', 'Vmax, mM/s']]
# data.columns = ['Km, mM', 'Vmax, mM/s']
# # Убираем значения "no"
# data = data[data['Km, mM'] != 'no']
# data = data[data['Vmax, mM/s'] != 'no']
# print("data: ", data)
# print("+" * 20)
# # Нормализируем данные
# normalize_data = normalize(data, axis=0, norm='l2')
# # Вычисляем скалярное произведение
# similarities = cosine_similarity(data)
# print("data['Km, mM']", data['Km, mM'])
# print("data['Km, mM'].mean()", data['Km, mM'].mean())
# print("data['Vmax, mM/s'].mean()", data['Vmax, mM/s'].mean())
# # Нормализуем искомый вектор
# if K_m is not None and V_max is not None:
# finded_vec = normalize(pd.DataFrame([float(K_m), float(V_max)]), axis=0, norm='l2')
# elif K_m is not None:
# finded_vec = normalize(pd.DataFrame([float(K_m), data['Vmax, mM/s'].mean()]), axis=0, norm='l2')
# elif V_max is not None:
# finded_vec = normalize(pd.DataFrame([data['Km, mM'].mean(), float(V_max)]), axis=0, norm='l2')
# # Ищем топ 5 похожих значений
# print("finded_vec", finded_vec)
# finded_vec = np.array(finded_vec).reshape(1, -1)
# results = []
# print(pd.DataFrame(finded_vec).shape, pd.DataFrame(data).shape)
# distance = cosine_similarity(finded_vec, normalize_data)[0]
# print("distance", distance, len(distance))
# # distance.sort()
# top_k_indices = np.argpartition(distance, 5)[:5]
# print("top_k_indices", top_k_indices)
# # Добавляем расстояние и значение в results
# for k in top_k_indices:
# print("k", k)
# results.append((distance[k], df.iloc[k]))
# # print("results", results)
# return results
# def get_chatgpt_pdf_syntes(df: pd.DataFrame) -> str:
# """Function for get chatgpt pdf syntes paragraph
# Args:
# df (pd.DataFrame): row from csv file that need to get syntes
# Returns:
# str: description of syntes from chatgpt
# """
# print("In get_chatgpt_pdf_syntes: ", df)
# print("+" * 20)
# print("type(df)", type(df))
# try:
# df.reset_index(inplace=True, drop=True)
# index = 0
# formula, size, link = df.loc[index, 'formula'], int(df.loc[index, 'length, nm']), df.loc[index, 'link']
# paper_filename=link.split('/')[-1]+'.pdf'
# print("paper_filename", paper_filename)
# try:
# loader = PyPDFium2Loader("ai_talks/assets/pdf/" + paper_filename) # mode="elements"
# except ValueError:
# data = 'We apologize, but our service could not find the information in the original article: ' + df.loc[index, 'link']
# raise ValueError
# else:
# print("NOT ERROR IN LOADER")
# data = loader.load()
# db = DocArrayInMemorySearch.from_documents(
# data,
# embeddings
# )
# retriever = db.as_retriever()
# qa_stuff = RetrievalQA.from_chain_type(
# llm=chat,
# chain_type = "stuff", # "stuff" "map_reduce" "refine" "map_rerank"
# retriever=retriever,
# verbose=False
# )
# # query = "How is synthesis carried out in this article?"
# # \n\nSynthesis искать это
# # какие реагенты и оборудование. попробовать агентов. если ты не нашёл слово синтез то попробуй поискать словосочетания с ключевыми словами ...
# query = f'What is needed for synthesis of {size} nm or other size {formula} NPs? NPs means nanoparticles. If the article does not describe how the synthesis was carried out, but a link is given to another article where it is said about it, then give a link to the article as an answer. If the article does not say anything about synthesis, then answer it. Answer as fully as possible, try to take the maximum of the original text from the article. Your answer should consist of several paragraphs and be quite voluminous, while preserving the source text as much as possible'
# response = qa_stuff.run(query)
# return response
# except BaseException:
# data = 'We apologize, but our service could not find the information in the original article: ' + df.loc[index, 'link']
# return data
# # return data
def get_chatgpt_pdf_syntes(df: pd.DataFrame) -> str:
"""Function for get chatgpt pdf syntes paragraph
Args:
df (pd.DataFrame): row from csv file that need to get syntes
Returns:
str: description of syntes from chatgpt
"""
data = ""
try:
df.reset_index(inplace=True, drop=True)
index = 0
formula, size, link = df.loc[index, 'formula'], int(df.loc[index, 'length, nm']), df.loc[index, 'link']
data = df.loc[index, 'synthesis']
except BaseException:
data = 'We apologize, but our service could not find the information in the original article: ' + df.loc[index, 'link']
return data
instructions = """Представь себе, что ты умный помощник для помощи химикам и биологам Nanozymes. Твоя задача - вежливо и по мере своих сил отвечать на все вопросы собеседника по статье."""
chat = YandexLLM(api_key=api_key, folder_id=folder_id,
instruction_text = instructions)
def chat_YGPT(query, path_file=""):
# Промпт для обработки документов
try:
document_prompt = langchain.prompts.PromptTemplate(
input_variables=["page_content"], template="{page_content}"
)
data = ""
try:
if path_file != "":
loader = PyPDFium2Loader("ai_talks/assets/pdf/" + path_file) # mode="elements"
data = loader.load()
except BaseException:
data = ""
# Промпт для языковой модели
document_variable_name = "context"
stuff_prompt_override = """
Пожалуйста, посмотри на текст ниже и ответь на вопрос на русском языке, используя информацию из этого текста.
Текст:
-----
{context}
-----
Вопрос:
{query}"""
prompt = langchain.prompts.PromptTemplate(
template=stuff_prompt_override, input_variables=["context", "query"]
)
# Создаём цепочку
llm_chain = langchain.chains.LLMChain(llm=chat, prompt=prompt)
chain = langchain.chains.StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name,
)
class A:
pass
data = [A()]
data[0].page_content = data
data[0].metadata = {"metadata": "metadata"}
response = chain.run(input_documents=data, query="How is synthesis carried out in this article?")
except BaseException as err:
response = "We apologize, but our service could not find the information in the original articles."
print("In chat_YGPT: ", err)
return response
| [
"\n Пожалуйста, посмотри на текст ниже и ответь на вопрос на русском языке, используя информацию из этого текста.\n Текст:\n -----\n {context}\n -----\n Вопрос:\n {query}",
"{page_content}",
"context",
"page_content"
] |
2024-01-10 | Rustemhak/nanozymes_ai | remote_repositories~API%20service~src~pdf2text.py | import os
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
PDFMinerLoader,
TextLoader,
UnstructuredEmailLoader,
UnstructuredEPubLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
)
from src.logger import Logger
def process_text(text):
lines = text.split("\n")
lines = [line for line in lines if len(line.strip()) > 2]
text = "\n".join(lines).strip()
if len(text) < 10:
return None
return text
class PDF2text:
LOADER_MAPPING = {
# ".csv": (CSVLoader, {}),
# ".doc": (UnstructuredWordDocumentLoader, {}),
# ".docx": (UnstructuredWordDocumentLoader, {}),
# ".enex": (EverNoteLoader, {}),
# ".epub": (UnstructuredEPubLoader, {}),
# ".html": (UnstructuredHTMLLoader, {}),
# ".md": (UnstructuredMarkdownLoader, {}),
# ".odt": (UnstructuredODTLoader, {}),
".pdf": (PDFMinerLoader, {}),
# ".ppt": (UnstructuredPowerPointLoader, {}),
# ".pptx": (UnstructuredPowerPointLoader, {}),
# ".txt": (TextLoader, {"encoding": "utf8"}),
}
def __init__(self, file_paths, chunk_size=100, chunk_overlap=0):
self.file_paths = file_paths
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
def __load_single_document(self, file_path: str) -> Document:
ext = "." + file_path.rsplit(".", 1)[-1]
if ext not in self.LOADER_MAPPING:
for ext in self.LOADER_MAPPING:
Logger.info(file_path + ext)
if os.path.exists(file_path + ext):
file_path += ext
break
else:
raise ValueError(f"Unsupported file extension: {ext}")
loader_class, loader_args = self.LOADER_MAPPING[ext]
loader = loader_class(file_path, **loader_args)
return loader.load()[0]
def build_index(self):
documents = [self.__load_single_document(path) for path in self.file_paths]
# Logger.info(documents)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)
documents = text_splitter.split_documents(documents)
# Logger.info(documents)
self.fixed_documents = []
for doc in documents:
doc.page_content = process_text(doc.page_content)
if not doc.page_content:
continue
self.fixed_documents.append(doc)
Logger.info(f"Загружено {len(self.fixed_documents)} фрагментов! Можно задавать вопросы.")
return self.fixed_documents
# Пример использования PDF2text
if __name__ == "__main__":
file_paths = ["data/C4RA15675G.pdf"]
chunk_size = 200
chunk_overlap = 10
pdf2text = PDF2text(file_paths, chunk_size, chunk_overlap)
fixed_documents = pdf2text.build_index()
| [] |
2024-01-10 | Rustemhak/nanozymes_ai | ai_talks~src~utils~YandexGPT.py | from typing import Any, List, Mapping, Optional
import time
import requests
import os
import langchain
import langchain.document_loaders
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.embeddings.base import Embeddings
class YandexLLM(langchain.llms.base.LLM):
api_key: str = None
iam_token: str = None
folder_id: str
max_tokens : int = 1500
temperature : float = 1
instruction_text : str = None
@property
def _llm_type(self) -> str:
return "yagpt"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
headers = { "x-folder-id" : self.folder_id }
if self.iam_token:
headers["Authorization"] = f"Bearer {self.iam_token}"
if self.api_key:
headers["Authorization"] = f"Api-key {self.api_key}"
req = {
"model": "general",
"instruction_text": self.instruction_text,
"request_text": prompt,
"generation_options": {
"max_tokens": self.max_tokens,
"temperature": self.temperature
}
}
res = requests.post("https://llm.api.cloud.yandex.net/llm/v1alpha/instruct",
headers=headers, json=req).json()
return res['result']['alternatives'][0]['text']
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"max_tokens": self.max_tokens, "temperature" : self.temperature }
class YaGPTEmbeddings(Embeddings):
def __init__(self,folder_id,api_key,sleep_interval=1):
self.folder_id = folder_id
self.api_key = api_key
self.sleep_interval = sleep_interval
self.headers = {
"Authorization" : f"Api-key {api_key}",
"x-folder-id" : folder_id }
def embed_document(self, text):
j = {
"model" : "general:embedding",
"embedding_type" : "EMBEDDING_TYPE_DOCUMENT",
"text": text
}
res = requests.post("https://llm.api.cloud.yandex.net/llm/v1alpha/embedding",json=j,headers=self.headers)
vec = res.json()['embedding']
# vec = res.json()
# print(vec)
return vec
def embed_documents(self, texts, chunk_size = 0):
res = []
for x in texts:
res.append(self.embed_document(x))
time.sleep(self.sleep_interval)
return res
def embed_query(self, text):
j = {
"model" : "general:embedding",
"embedding_type" : "EMBEDDING_TYPE_QUERY",
"text": text
}
res = requests.post("https://llm.api.cloud.yandex.net/llm/v1alpha/embedding",json=j,headers=self.headers)
vec = res.json()['embedding']
# vec = res.json()
# print(vec)
return vec
| [] |
2024-01-10 | liziniu/HyperDQN | experiments~envs~atari_wrapper.py | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import cv2
import gym
import numpy as np
from collections import deque
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super(StickyActionEnv, self).__init__(env)
self.p = p
self.last_action = 0
def reset(self):
self.last_action = 0
return self.env.reset()
def step(self, action):
if self.unwrapped.np_random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
:param gym.Env env: the environment to wrap.
:param int noop_max: the maximum value of no-ops to run.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self.noop_max = noop_max
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
self.env.reset()
noops = np.random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""Return only every `skip`-th frame (frameskipping) using most recent raw
observations (for max pooling across time steps)
:param gym.Env env: the environment to wrap.
:param int skip: number of `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
"""Step the environment with the given action. Repeat action, sum
reward, and max over last observations.
"""
obs_list, total_reward, done = [], 0., False
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
obs_list.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(obs_list[-2:], axis=0)
return max_frame, total_reward, done, info
class EpisodicLifeEnv(gym.Wrapper):
"""Make end-of-life == end-of-episode, but only reset on true game over. It
helps the value estimation.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal, then update lives to
# handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames, so its important to keep lives > 0, so that we only reset
# once the environment is actually done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Calls the Gym environment reset, only when lives are exhausted. This
way all states are still reachable even though lives are episodic, and
the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs = self.env.step(0)[0]
self.lives = self.env.unwrapped.ale.lives()
return obs
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing.
Related discussion: https://github.com/openai/baselines/issues/240
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
return self.env.step(1)[0]
class WarpFrame(gym.ObservationWrapper):
"""Warp frames to 84x84 as done in the Nature paper and later work.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=(self.size, self.size), dtype=env.observation_space.dtype)
def observation(self, frame):
"""returns the current observation from a frame"""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return cv2.resize(frame, (self.size, self.size),
interpolation=cv2.INTER_AREA)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize observations to 0~1.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = low
self.scale = high - low
self.observation_space = gym.spaces.Box(
low=0., high=1., shape=env.observation_space.shape,
dtype=np.float32)
def observation(self, observation):
return (observation - self.bias) / self.scale
class ClipRewardEnv(gym.RewardWrapper):
"""clips the reward to {+1, 0, -1} by its sign.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.reward_range = (-1, 1)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign. Note: np.sign(0) == 0."""
return np.sign(reward)
class FrameStack(gym.Wrapper):
"""Stack n_frames last frames.
:param gym.Env env: the environment to wrap.
:param int n_frames: the number of frames to stack.
"""
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = (n_frames,) + env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=shape, dtype=env.observation_space.dtype)
def reset(self):
obs = self.env.reset()
for _ in range(self.n_frames):
self.frames.append(obs)
return self._get_ob()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frames.append(obs)
return self._get_ob(), reward, done, info
def _get_ob(self):
# the original wrapper use `LazyFrames` but since we use np buffer,
# it has no effect
return np.stack(self.frames, axis=0)
def wrap_deepmind(env_id, episode_life=True, clip_rewards=True,
frame_stack=4, scale=False, warp_frame=True):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env
def wrap_atari(env_id, episode_life=True, clip_rewards=True,
frame_stack=4, scale=False, warp_frame=True,
max_episode_steps=None, sticky_action=False):
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
if env_id in ['SeaquestNoFrameskip-v4', 'PitfallNoFrameskip-v4', 'ChopperCommandNoFrameskip-v4',
'MontezumaRevengeNoFrameskip-v4', 'FrostbiteNoFrameskip-v4', 'BattleZoneNoFrameskip-v4']:
env._max_episode_steps = 4500 * 4
elif max_episode_steps is not None:
env._max_episode_steps = min(env._max_episode_steps, max_episode_steps * 4)
env = NoopResetEnv(env, noop_max=30)
if sticky_action:
env = StickyActionEnv(env) # add sticky actions
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env
| [] |
2024-01-10 | TranNhiem/Autonomous_Driving_Visual_Instruction_DataEngine | llm_gpt.py | '''
@TranRick 2023/06/30
This code features the following:
1. Using GPT-3.5 or GPT-4 to Create Instruction input for Blip2 or InstructBLIP
2. Using BLIP2 or InstructBLIP to generate a Answer (Abstract Visual Information Summary )
'''
import os
import yaml
from tqdm import tqdm
import torch
import openai
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
)
import backoff # for exponential backoff
# ## Efficient OpenAI Request
# from gptcache import cache
# from gptcache.adapter import openai
# cache.init()
# cache.set_openai_key()
#*************** Section 1 GPTs model to Create Prompt *****************#
def set_openai_key():
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_base ="https://sslgroupservice.openai.azure.com/" # "https://agentgpt.openai.azure.com/" #
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_instructions(input_INSTRUCTION,sub_INSTRUCTION, solution_INSTRUCTION,ANSWER_INSTRUCTION, SUB_ANSWER_INSTRUCTION, FIRST_instruction):
instructions_dict = {
'question': input_INSTRUCTION,
'sub_question': sub_INSTRUCTION,
'summary': solution_INSTRUCTION,
'answer': ANSWER_INSTRUCTION,
'sub_answer': SUB_ANSWER_INSTRUCTION,
'first_question': FIRST_instruction
}
return instructions_dict
def prepare_gpt_prompt(task_prompt, questions, answers, sub_prompt):
gpt_prompt = '\n'.join([task_prompt,
get_chat_log(questions, answers),
sub_prompt])
return gpt_prompt
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def call_gpt3(gpt3_prompt, max_tokens=40, model="text-davinci-003"): # 'text-curie-001' does work at all to ask questions
response = openai.Completion.create(engine=model, prompt=gpt3_prompt, max_tokens=max_tokens) # temperature=0.6,
reply = response['choices'][0]['text']
total_tokens = response['usage']['total_tokens']
return reply, total_tokens
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def call_chatgpt(chatgpt_messages, max_tokens=40, model="gpt-35-turbo"):
response = openai.ChatCompletion.create(engine=model, messages=chatgpt_messages,#[chatgpt_messages],
temperature=0.7,
max_tokens=max_tokens,
top_p=0.95,
frequency_penalty=1.2,
presence_penalty=0,
stop=None)
reply = response['choices'][0]['message']['content']
total_tokens = response['usage']['total_tokens']
return reply, total_tokens
## Building Multiple Input Prompt to Maximizing the Information
def prepare_chatgpt_message(task_prompt, questions, answers, sub_prompt):
messages = [{"role": "system", "content": task_prompt}]
assert len(questions) == len(answers)
for q, a in zip(questions, answers):
messages.append({'role': 'assistant', 'content': 'Question: {}'.format(q)})
messages.append({'role': 'user', 'content': 'Answer: {}'.format(a)})
messages.append({"role": "system", "content": sub_prompt})
return messages
def get_chat_log(questions, answers, last_n=-1):
n_addition_q = len(questions) - len(answers)
assert (n_addition_q) in [0, 1]
template = 'Question: {} \nAnswer: {} \n'
chat_log = ''
if last_n > 0:
answers = answers[-last_n:]
questions = questions[-(last_n+n_addition_q):]
elif last_n == 0:
answers = []
questions = questions[-1:] if n_addition_q else []
for i in range(len(answers)):
chat_log = chat_log + template.format(questions[i], answers[i])
if n_addition_q:
chat_log = chat_log + 'Question: {}'.format(questions[-1])
else:
chat_log = chat_log[:-2] # remove the last '/n'
return chat_log
class Generate_instruction_Input_output():
def __init__(self, img, blip2, GPT_model,
FIRST_instruction, input_INSTRUCTION,
sub_INSTRUCTION,
VALID_CHATGPT_MODELS, VALID_GPT3_MODELS,
ANSWER_INSTRUCTION, SUB_ANSWER_INSTRUCTION,
max_gpt_token=100, n_blip2_context=-1,debug=False):
self.img = img
self.blip2 = blip2
self.model = GPT_model
self.max_gpt_token = max_gpt_token
self.n_blip2_context = n_blip2_context
## Initial Model and Instruction input
self.FIRST_instruction = FIRST_instruction
self.input_INSTRUCTION=input_INSTRUCTION
self.sub_INSTRUCTION = sub_INSTRUCTION
self.VALID_CHATGPT_MODELS = VALID_CHATGPT_MODELS
self.VALID_GPT3_MODELS =VALID_GPT3_MODELS
## Initialize the Answer Instruction format for BLIP2 & InstructBLIP \
self.ANSWER_INSTRUCTION = ANSWER_INSTRUCTION
self.SUB_ANSWER_INSTRUCTION = SUB_ANSWER_INSTRUCTION
self.questions = []
self.answers = []
self.total_tokens = 0
self.debug = debug
def reset(self, img):
"""
Resets the state of the generator.
"""
self.img = img
self.questions = []
self.answers = []
self.total_tokens = 0
## Type 1 Instruction input
def ask_question(self):
if len(self.questions) == 0:
# first question is given by human to request a general discription
question = self.FIRST_instruction
else:
if self.model in self.VALID_CHATGPT_MODELS:
chatgpt_messages = prepare_chatgpt_message(
self.input_INSTRUCTION,
self.questions, self.answers,
self.sub_INSTRUCTION
)
question, n_tokens = call_chatgpt(chatgpt_messages, model=self.model, max_tokens=self.max_gpt_token)
elif self.model in self.VALID_GPT3_MODELS:
# prepare the context for GPT3
gpt3_prompt = prepare_gpt_prompt(
self.input_INSTRUCTION,
self.questions, self.answers,
self.sub_INSTRUCTION
)
question, n_tokens = call_gpt3(gpt3_prompt, model=self.model, max_tokens=self.max_gpt_token)
else:
raise ValueError('{} is not a valid question model'.format(self.model))
self.total_tokens = self.total_tokens + n_tokens
if self.debug:
print(question)
return question
def question_trim(self, question):
question = question.split('Question: ')[-1].replace('\n', ' ').strip()
if 'Answer:' in question: # Some models make up an answer after asking. remove it
q, a = question.split('Answer:')[:2]
if len(q) == 0: # some not so clever models will put the question after 'Answer:'.
question = a.strip()
else:
question = q.strip()
return question
def answer_question(self, decoding_strategy="nucleus", max_length=100, min_length=50):
# prepare the context for blip2
blip2_prompt = '\n'.join([self.ANSWER_INSTRUCTION,
get_chat_log(self.questions, self.answers, last_n=self.n_blip2_context),
self.SUB_ANSWER_INSTRUCTION])
answer = self.blip2.abstract_visual_output(self.img, blip2_prompt,
llm_decoding_strategy=decoding_strategy,
max_length=max_length, min_length=min_length)
if self.debug:
print("Answer:", answer)
return answer
def answer_trim(self, answer):
answer = answer.split('Question:')[0].replace('\n', ' ').strip()
return answer
def chatting(self, n_rounds, print_mode, BLIP_llm_decoding_strategy="nucleus", BLIP_max_length_token_output=100, BLIP_min_length_token_output=50):
if print_mode == 'chat':
print('-------- Instruction Input & Response ----------')
for i in tqdm(range(n_rounds), desc='Chat Rounds', disable=print_mode != 'bar'):
question = self.ask_question()
# print('Raw: {}'.format(question))
question = self.question_trim(question)
self.questions.append(question)
if print_mode == 'chat':
#print('GPT: {}'.format(question))
print(f"Model: {self.model}, question: {question}")
answer = self.answer_question(decoding_strategy=BLIP_llm_decoding_strategy, max_length=BLIP_max_length_token_output, min_length=BLIP_min_length_token_output)
answer = self.answer_trim(answer)
self.answers.append(answer)
if print_mode == 'chat':
#print('BLIP-2: {}'.format(answer))
print(f"BLIP_Model: {self.blip2.visual_understand}, answer: {answer}")
if print_mode == 'chat':
print('-------- Ends ----------')
return self.questions, self.answers, self.total_tokens
| [
"Answer: PLACEHOLDER",
"Question: PLACEHOLDER",
"\n",
"Question: {} \nAnswer: {} \n"
] |
2024-01-10 | inteligenciamilgrau/enem_solver | resolver_prova.py | import os
import json
import re
import openai # pip install openai
import google.generativeai as genai
import time
import datetime
import requests
import base64
import PIL.Image
openai.api_key = "SUA_API_KEY_OPENAI"
GEMINI_API = "SUA_API_KEY_GEMINI"
'''
questões de número 01 a 45, relativas à área de Linguagens, Códigos e suas Tecnologias;
questões de número 46 a 90, relativas à área de Ciências Humanas e suas Tecnologias.
questões de número 91 a 135, relativas à área de Ciências da Natureza e suas Tecnologias;
questões de número 136 a 180, relativas à área de Matemática e suas Tecnologias;
Questões de 01 a 05 (opção inglês ou espanhol)
'''
questoes = 180
selecionar_questoes = []
gabarito_filename = "./gabaritos/gabarito_unificado_azul.json"
modelo_gpt = "gpt-3.5-turbo-1106"
#modelo_gpt = "gpt-4-1106-preview"
modelo_visao_gpt = "gpt-4-vision-preview"
modelo_gemini = "gemini-pro"
modelo_visao_gemini = "gemini-pro-vision"
modelo = modelo_gemini
modelo_visao = modelo_visao_gemini
temperatura = 1.0
instrucoes = """
Explique passo a passo a questão e depois diga a alternativa correta
da questão em formato json como no exemplo { "questao 03": "B" }:"""
comecou = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')
if "gemini" in modelo:
genai.configure(api_key=GEMINI_API)
modelo_gemini = genai.GenerativeModel('gemini-pro')
modelo_gemini_vision = genai.GenerativeModel('gemini-pro-vision')
# Record the start time
start_time = time.time()
falhas = 0
falhas_visao = 0
sem_resposta = 0
if selecionar_questoes == []:
number_list = [i for i in range(1, questoes + 1)]
else:
number_list = selecionar_questoes
questoes = len(number_list)
print("Respondendo", len(number_list), "questões.")
def perguntar_ao_chat(messages, model, temperature):
global falhas
if "gemini" in model:
try:
# for mes in messages:
# print("messzz", messages)
response = modelo_gemini.generate_content(messages + " " + instrucoes)
return response.text
except Exception as e:
print("Erro no Gemini", e)
print("Feedback", response.prompt_feedback, response.prompt_feedback.block_reason,
response.prompt_feedback.safety_ratings)
falhas += 1
return "{ 'questao 00': 'Erro: " + str(e) + "' }"
elif "gpt" in model:
try:
response = openai.chat.completions.create(
model=model,
messages=[{"role": "system", "content": instrucoes},
{"role": "user", "content": messages}],
temperature=temperature,
)
return response.choices[0].message.content
except Exception as e:
print("Erro", e)
return e
def encode_image(image_path):
if image_path.startswith("http:"):
return base64.b64encode(requests.get(image_path).content).decode('utf-8')
else:
image_path = image_path.replace("\\", "\\\\")
directory, filename = os.path.split(image_path)
file = rf"{directory}\{filename}"
with open(file, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def perguntar_ao_chat_com_imagem(question_content_img, image_file, model_vision):
global falhas_visao
pergunta = """
Com o auxilio da imagem explique passo a passo a questão e depois diga a alternativa correta
da questão em formato json como no exemplo {'questao 03': 'B'}.\n"""
pergunta = pergunta + question_content_img
if "gemini" in model_vision:
try:
img = PIL.Image.open(image_file)
response = modelo_gemini_vision.generate_content([pergunta, img])
response.resolve()
return response.text
except Exception as e:
print("Erro no Gemini Vision: ", e)
#print("Feedback", response.prompt_feedback, response.prompt_feedback.block_reason,
# response.prompt_feedback.safety_ratings)
falhas_visao += 1
return "{ 'questao 00': 'Erro: " + str(e) + "' }"
else:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"
}
# Getting the base64 string
base64_image = encode_image(image_file)
payload = {
"model": model_vision,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": pergunta
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 4_000
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
resposta = response.json()["choices"][0]['message']['content']
return resposta
with open(gabarito_filename, 'r', encoding='utf-8') as file:
gabarito = json.load(file)
acertos = 0
parciais = {}
assistente_intro = '# Assistente IMG para Resolução da Prova do ENEM 2023 com ChatGPT #'
print('#' * len(assistente_intro))
print(assistente_intro)
print('#' * len(assistente_intro))
print("Modelo de Linguagem:", modelo)
print("Modelo de Visão:", modelo_visao)
# Iterate through 45 questions
# for question_number in range(1, questoes + 1):
questoes_respondidas = 0
for question_number in number_list:
questoes_respondidas += 1
dia = "D1" if question_number <= 90 else "D2"
# Generate the filename
filename = f"questoes/{dia}_questao_{question_number:02d}.txt"
# Check if a corresponding JPG file exists
image_filename = f"questoes/questao_{question_number:02d}.PNG"
image_exists = os.path.exists(image_filename)
# Read and print the content of the file
with open(filename, 'r', encoding='utf-8') as file:
question_content = file.read()
print(60 * "#")
if image_exists:
print("Enviando Questão", question_number, "- Com Imagem")
resp_chat = perguntar_ao_chat_com_imagem(question_content, image_filename, modelo_visao)
else:
print("Enviando Questão", question_number, "- Sem Imagem")
resp_chat = perguntar_ao_chat(question_content, modelo, temperatura)
resp = resp_chat
# print(f"Pergunta atual: {filename}:\n{question_content}")
print(60 * "#")
print(f"Pergunta atual: \n{question_content}")
print("")
print(60 * "#")
print("Resposta do Chat:")
print(resp)
print(40 * "=")
# matches = re.findall(r'["\'](quest\S+ \d+)["\']: ["\'](.*?)["\']', resp, re.IGNORECASE | re.UNICODE)
# matches = re.findall(r'["\']((?:questao|questão)\s*\d+)["\']: ["\'](.*?)["\']', resp,
# re.IGNORECASE | re.UNICODE)
#matches = re.findall(r'["\']((?:questao|questão)_?\s*\d+)["\']: ["\'](.*?)["\']', resp,
# re.IGNORECASE | re.UNICODE)
matches = re.findall(r'["\']((?:questao|questão)_?\s*\d+)["\']\s*:\s*["\'](.*?)["\']', resp,
re.IGNORECASE | re.UNICODE)
#pattern = re.compile(r'A alternativa correta é a ([A-E])\.', re.IGNORECASE)
#matches_fora_do_padrao = pattern.findall(resp)
pattern = re.compile(r'lternativa correta é a ([A-E])\.|lternativa correta: ([A-E])\.|lternativa correta é ([A-E])\.|lternativa correta é a letra ([A-E])\.', re.IGNORECASE)
matches_fora_do_padrao = pattern.findall(resp)
# Iterate over matches
if matches:
for match in matches:
question_num = match[0]
answer = match[1]
print(f"Resposta detectada: {answer}")
elif matches_fora_do_padrao:
#answer = matches_fora_do_padrao[0]
answer = next((group for match in matches_fora_do_padrao for group in match if group), None)
print(f"Resposta detectada: {answer}")
else:
answer = "Sem resposta"
sem_resposta += 1
print("Nula")
print('=' * 40)
correta = gabarito[f"questao {question_number:02d}"]
parcial = "Questao", question_number, "Gabarito", correta, "Respondida", answer, "Sem Resposta", sem_resposta, \
"Falhas", falhas, "Falhas Visâo", falhas_visao
print(parcial)
if correta == answer:
print("!!!!! ACERTOU !!!!!")
acertos += 1
else:
print(">>>>>> ERROU :(((((")
acertos_parciais = int(acertos / questoes_respondidas * 100)
avaliacao = "Acertos: " + str(acertos) + " De: " + str(questoes_respondidas) + \
" questoes - Percentual de Acertos: " + str(acertos_parciais) + "%"
print(avaliacao)
erros_e_falhas = sem_resposta + falhas + falhas_visao
total_de_falhas = erros_e_falhas if erros_e_falhas >= 0 else 0
acertos_ponderados_parciais = int(acertos / (questoes_respondidas - total_de_falhas) * 100) if questoes_respondidas > total_de_falhas else 0
avaliacao_ponderada = "Acertos Ponderados: " + str(acertos) + " De: " + \
str(questoes_respondidas - erros_e_falhas) + " questões - "\
" falhas: " + str(total_de_falhas) + " questoes - Ponderado: " + \
str(acertos_ponderados_parciais) + "%"
print(avaliacao_ponderada)
save_txt = False
if save_txt:
# Append text to a text file
output_text_file = f"resolucao_{comecou}_{modelo}.txt"
with open(output_text_file, 'a', encoding='utf-8') as text_file:
text_file.write(f"Questao: {question_content}\n")
text_file.write(f"Resposta: {resp_chat}\n")
text_file.write(f"Avaliacao: {avaliacao}\n")
parciais[f"questao {question_number:02d}"] = [{"Pergunta": question_content},
{"Resposta": resp},
{"Gabarito": correta},
{"Respondida": answer},
{"Avaliacao Parcial": [{"acertos": acertos,
"questoes": question_number,
"acertos ponderados": acertos_ponderados_parciais,
"percentual de acertos": acertos_parciais}]}]
print("")
# Record the end time
end_time = time.time()
elapsed_time = end_time - start_time
# Calculate hours, minutes, and seconds
hours, remainder = divmod(elapsed_time, 3600)
minutes, seconds = divmod(remainder, 60)
erros_e_falhas = sem_resposta + falhas + falhas_visao
total_de_falhas = erros_e_falhas if erros_e_falhas >= 0 else 0
# print("questoes", questoes, total_de_falhas, erros_e_falhas)
acertos_ponderados = int(acertos / (questoes - total_de_falhas) * 100) if questoes > total_de_falhas else 0
output_text_file = f"resolucao_{modelo}_pts_{acertos_ponderados:03d}_{comecou}_tot_{questoes}.json"
with open(output_text_file, 'a', encoding='utf-8') as text_file:
json.dump({"avaliacao": [
{"tempo decorrido": [{"horas": int(hours), "minutos": int(minutes), "segundos": int(seconds)}],
"total de questoes": questoes,
"acertos": acertos,
"erros": questoes - acertos,
"acertos ponderados": acertos_ponderados,
"percentual de acertos": int(acertos / questoes * 100),
"modelo": modelo,
"modelo visao": modelo_visao,
"temperatura": temperatura,
"falhas": falhas,
"falhas_visao": falhas_visao,
"sem_resposta": sem_resposta,
"nome_arquivo": output_text_file
}],
"prova": parciais
},
text_file, ensure_ascii=False, indent=4)
| [
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | passioneffort/ResumeScroing | resume_parser~application~JD_parser.py | from PyPDF2 import PdfReader
# creating a pdf file object
# pdfObject = open('./data/obama-worlds-matter.pdf', 'rb')
# # creating a pdf reader object
# pdfReader = PdfFileReader(pdfObject)
# # Extract and concatenate each page's content
# text=''
# for i in range(0,pdfReader.numPages):
# # creating a page object
# pageObject = pdfReader.getPage(i)
# # extracting text from page
# text += pageObject.extractText()
# print(text)
import openai
import re
import logging
import json
class resume_to_structured() :
def __init__(self, OPENAI_API_KEY) :
openai.api_key = OPENAI_API_KEY
self.prompt_questions = \
"""Summarize the text below into a JSON with exactly the following structure {basic_info: {first_name, last_name, full_name, email, phone_number, location, portfolio_website_url, linkedin_url, github_main_page_url, university, education_level (BS, MS, or PhD), graduation_year, graduation_month, majors, GPA}, work_experience: [{job_title, company, location, duration, job_summary}], project_experience:[{project_name, project_discription}]}
"""
logging.basicConfig(filename = 'logs/parser.log', level = logging.DEBUG)
self.logger = logging.getLogger()
# def pdf2txt(self: object, pdf_path: str) -> str :
# # creating a pdf file object
# pdfObject = open(pdf_path, 'rb')
# # creating a pdf reader object
# pdfReader = PdfReader(pdfObject)
# # Extract and concatenate each page's content
# text=''
# for i in range(0, len(pdfReader.pages)):
# # creating a page object
# pageObject = pdfReader.pages[i]
# # extracting text from page
# text += pageObject.extract_text()
# print(len(text))
# print(type(text))
# info = (text[:10000] + '..') if len(text) > 75 else text
# # Get PDF and return string of it.
# # with open(pdf_path, "rb") as f:
# # pdf = PdfFileReader(f)
# # pdf_str = "\n\n".join(pdf)
# pdf_str = re.sub('\s[,.]', ',', info)
# pdf_str = re.sub('[\n]+', '\n', pdf_str)
# pdf_str = re.sub('[\s]+', ' ', pdf_str)
# pdf_str = re.sub('http[s]?(://)?', '', pdf_str)
# return info
def convertion(self: object,
prompt: str,
engine: str = 'text-davinci-003',
temperature: float = 0.0,
max_tokens: int = 100,
top_p: int = 1,
frequency_penalty: int = 0,
presence_penalty: int = 0) -> object :
self.logger.info(f'convertion: using {engine}')
estimated_prompt_tokens = int(len(prompt.split()) * 1.6)
self.logger.info(f'estimated prompt tokens: {estimated_prompt_tokens}')
estimated_answer_tokens = 2049 - estimated_prompt_tokens
if estimated_answer_tokens < max_tokens:
self.logger.warning('estimated_answer_tokens lower than max_tokens, changing max_tokens to', estimated_answer_tokens)
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=min(4096-estimated_prompt_tokens, max_tokens),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty
)
return response
def ending_process(self: object, JD) -> dict :
# Get PDF resume and return JSON FILE for resume
resume = {}
# str_resume = self.pdf2txt(resume_path)
prompt = self.prompt_questions + '\n' + JD
max_tokens = 1500
engine = 'text-davinci-002'
response = self.convertion(prompt, engine = engine, max_tokens = max_tokens)
response_text = response['choices'][0]['text'].strip()
print("============================")
print(response)
resume = json.loads(response_text)
return resume
| [
"8",
"\n",
"self.prompt_questions + '\\n' + JD"
] |
2024-01-10 | nenomigami/PromptCompressor | pcrl~algorithms~common~policies.py | from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
)
from stable_baselines3.common.type_aliases import Schedule
from torch import nn
from pcrl.algorithms.common.distributions import MaskableDistribution, make_masked_proba_distribution
class MaskableActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=False,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(
self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
# Action distribution
self.action_dist = make_masked_proba_distribution(action_space)
self._build(lr_schedule)
def forward(
self,
obs: th.Tensor,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:param action_masks: Action masks to apply to the action distribution
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
# dummy lr schedule, not needed for loading policy alone
lr_schedule=self._dummy_schedule,
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
self.action_net = self.action_dist.proba_distribution_net(
latent_dim=self.mlp_extractor.latent_dim_pi)
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(
self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> MaskableDistribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
action_logits = self.action_net(latent_pi)
return self.action_dist.proba_distribution(action_logits=action_logits)
def _predict(
self,
observation: th.Tensor,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:param action_masks: Action masks to apply to the action distribution
:return: Taken action according to the policy
"""
return self.get_distribution(observation, action_masks).get_actions(deterministic=deterministic)
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action and state from an observation (and optional state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:param action_masks: Action masks to apply to the action distribution
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if mask is None:
# mask = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(
observation, deterministic=deterministic, action_masks=action_masks)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(
actions, self.action_space.low, self.action_space.high)
if not vectorized_env:
if state is not None:
raise ValueError(
"Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, state
def evaluate_actions(
self,
obs: th.Tensor,
actions: th.Tensor,
action_masks: Optional[np.ndarray] = None,
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor, action_masks: Optional[np.ndarray] = None) -> MaskableDistribution:
"""
Get the current policy distribution given the observations.
:param obs:
:param action_masks:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
if action_masks is not None:
distribution.apply_masking(action_masks)
return distribution
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class MaskableActorCriticCnnPolicy(MaskableActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MaskableMultiInputActorCriticPolicy(MaskableActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
| [] |
2024-01-10 | ryuukkk/chatopotamus | src~nlp.py | import pickle
import random
import threading
from openai import OpenAI
from src.training_and_prediction import predict, models
from src import audio_mgmt
print('LOADING NER...')
with open('resources/bert/saved/ner_tokenizer.pkl', 'rb') as tn:
bert_ner_tokenizer = pickle.load(tn)
with open('resources/bert/data/label_map.pkl', 'rb') as lm:
label_map = pickle.load(lm)
bert_ner_model = models.trained_entity_classifier()
bert_ner_model.load_weights('resources/bert/saved/ner_trained_weights.h5')
print('LOADED NER')
print('\b' * 18)
print('LOADING INTENT...')
with open('resources/bert/saved/intent_tokenizer.pkl', 'rb') as ti:
bert_intent_tokenizer = pickle.load(ti)
bert_intent_model = models.trained_intent_classifier()
bert_intent_model.load_weights('resources/bert/saved/ir_trained_weights.h5')
print('LOADING GPT...')
with open('resources/API_KEY.txt', 'r') as f:
OPENAI_API_KEY = f.readline()
OPENAI_JOB = "ftjob-Zp11kb3ucXYxFopbsLaHWasg"
GPT3_MODEL = "ft:gpt-3.5-turbo-0613:personal::8PpZSxCF"
client = OpenAI(api_key=OPENAI_API_KEY)
# completion = client.fine_tuning.jobs.retrieve(OPENAI_JOB)
menu = {
"prices": {"coffee": 1.50, "cappuccino": 2.50, "iced coffee": 2, "iced capp": 2.25, "latte": 2, "tea": 1.50,
"hot chocolate": 2.25, "french vanilla": 2.25, "white chocolate": 2.25,
"mocha": 2.25, "espresso": 1, "americano": 2.25, "extra shot": 0.25, "soy milk": 0.3,
"whipped topping": 1, "dark roast": 0.20, "Turkey Bacon Club": 3, "BLT": 2.90,
"grilled cheese": 4, "chicken wrap": 3.50, "soup": 2.80, "donut": 1.5, "double double": 1.50,
"triple triple": 1.50, "muffin": 2.40, "bagel": 3, "timbits": 3, "panini": 2.40, "croissant": 3},
"price multiplier": {"small": 1, "medium": 1.2, "large": 1.4, "extra large": 1.6}
}
def get_all_info(request):
intent = predict.predict_intent(request, model=bert_intent_model, tokenizer=bert_intent_tokenizer)
entities = predict.predict_entities(request, model=bert_ner_model, tokenizer=bert_ner_tokenizer,
label_map=label_map, max_seq_length=26)
response, message = predict.chat_with_assistant(request, client=client, model=GPT3_MODEL, fresh=False)
return intent, entities, (response, message)
def regular_customer(opening, accent):
messages = []
intents = []
entity_tags = []
response = None
total_price = 0
audio_mgmt.speak(opening, accent=accent)
while True:
request = audio_mgmt.speech_to_text()
if not request or len(request) < 4:
audio_mgmt.speak('Visit again, Bye!', accent=accent)
break
response, messages = predict.chat_with_assistant(request, messages=messages, client=client, model=GPT3_MODEL)
intent = predict.predict_intent(request, model=bert_intent_model, tokenizer=bert_intent_tokenizer)
entities = predict.predict_entities(request, model=bert_ner_model, tokenizer=bert_ner_tokenizer,
label_map=label_map,
max_seq_length=26)
order_price = get_price(entities, 0)
total_price += order_price
response = response.replace("<price>", str(total_price))
audio_mgmt.speak(response, accent=accent)
intents.append(intent)
print(f'\nCustomer wants to {intent} : '.upper())
entity_tags.append(entities)
print_formatted_entities(entities)
if 'order' in map(str.lower, intents):
print('Total: $' + str(total_price))
return intents, entity_tags, (response, messages)
def new_customer(opening, face_encoding, accent, cursor):
intents, entity_tags, (response, messages) = regular_customer(opening, accent=accent)
r = random.choice(['um..', 'ugh..'])
audio_mgmt.speak(str(r) + 'One last thing before we see you again, would you like to tell me your name if you want '
'me to remember you when you visit next time?', accent=accent)
response_2 = audio_mgmt.speech_to_text().lower()
audio_mgmt.speak('alright, visit again, bye')
return intents, entity_tags, (response, messages), ''
def get_price(entities, current_price):
global menu # Assuming the menu dictionary is globally available
total_price = current_price
last_beverage_price = 0
size_multiplier = 1
for entity in entities:
entity_value, entity_type = entity
# Handling beverage items
if 'beverage' in entity_type:
# Apply previous multiplier to the last beverage and reset it
total_price += last_beverage_price * size_multiplier
last_beverage_price = menu["prices"].get(entity_value, 0)
size_multiplier = 1 # Reset multiplier for new beverage
# Check if the entity is a beverage size for multiplier
elif entity_type == 'beverage_size':
size_multiplier = menu["price multiplier"].get(entity_value, 1)
# Handling non-beverage items
else:
item_price = menu["prices"].get(entity_value, 0)
total_price += item_price
# Apply multiplier to the last beverage in the list
total_price += last_beverage_price * size_multiplier
return total_price
def print_formatted_entities(entities):
if not entities:
return
beverage_line = ""
food_line = ""
for entity_value, entity_type in entities:
if 'beverage' in entity_type:
beverage_line += f" {entity_value} " if beverage_line else entity_value
elif 'food' in entity_type:
food_line += f" {entity_value} " if food_line else entity_value
# Print formatted lines
if beverage_line:
print("Beverage: " + beverage_line.title())
if food_line:
print("Food: " + food_line.title())
| [] |
2024-01-10 | Gabesarch/HELPER | prompt~run_gpt.py | import os
import openai
import tiktoken
import ipdb
st = ipdb.set_trace
from tqdm import tqdm
import glob
import json
import numpy as np
import copy
from arguments import args
import time
azure = not args.use_openai
if azure:
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-05-15"
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
else:
try:
home_directory = os.path.expanduser( '~' )
with open(os.path.join(home_directory,".openai/openai.key"), 'r') as f:
org_key = f.readlines()
openai.api_key = org_key[1].strip()
openai.organization = org_key[0].strip()
msft_interal_key = org_key[2].strip()
except:
openai.api_key = os.getenv("OPENAI_API_KEY")
from .api_primitives import InteractionObject
class LLMPlanner:
'''
LLM Planner for going from teach dialogue to executable program
'''
def __init__(
self,
gpt_embedding_dir='',
nn_examples=True,
fillable_classes=[],
openable_classes=[],
include_classes=[],
clean_classes=[],
example_mode='',
):
self.nn_examples = nn_examples
self.fillable_classes = fillable_classes
self.openable_classes = openable_classes
self.include_classes = include_classes
self.clean_classes = clean_classes
self.examples = ''
self.example_mode = example_mode
with open('prompt/api_primitives.py') as f:
self.api = f.read()
with open('prompt/api_corrective.py') as f:
self.api_corrective = f.read()
with open('prompt/prompt_plan.txt') as f:
self.prompt_plan = f.read()
with open('prompt/prompt_replan.txt') as f:
self.prompt_replan = f.read()
if nn_examples:
if example_mode in ["teach_eval_tfd", "teach_eval_continual"]:
# initial planning
self.embeddings = np.load(os.path.join(gpt_embedding_dir, 'embeddings.npy'))
with open(os.path.join(gpt_embedding_dir, 'file_order.txt')) as f:
self.file_order = f.readlines()
self.file_order = [f.replace('\n', '') for f in self.file_order]
elif example_mode=="teach_eval_custom":
# initial planning
self.embeddings = np.load(os.path.join(gpt_embedding_dir, 'embeddings_custom.npy'))
with open(os.path.join(gpt_embedding_dir, 'file_order_custom.txt')) as f:
self.file_order = f.readlines()
self.file_order = [f.replace('\n', '') for f in self.file_order]
else:
assert False # what example mode is this?
# error correction
self.embeddings_replanning = np.load(os.path.join(gpt_embedding_dir, 'embeddings_replanning.npy'))
with open(os.path.join(gpt_embedding_dir, 'file_order_replanning.txt')) as f:
self.file_order_replanning = f.readlines()
self.file_order_replanning = [f.replace('\n', '') for f in self.file_order_replanning]
self.topk = 3
else:
assert NotImplementedError
with open('prompt/examples.txt') as f:
self.examples = f.read()
self.azure = azure
self.model = args.gpt_model
if self.model=="gpt-4-32k":
self.topk = len(self.file_order_replanning)
print(f"TOPK={self.topk}")
def get_prompt_plan(self, task_dict):
dialogue_history = task_dict['dialog_history_cleaned']
command = ''
for dialogue in dialogue_history:
command += f'<{dialogue[0]}> {dialogue[1]}'
if command[-1] not in ['.', '!', '?']:
command += '. '
else:
command += ' '
self.command = command
if self.nn_examples:
self.get_examples_planning(self.topk)
prompt = self.prompt_plan
prompt = prompt.replace('{API}', f'{self.api}')
prompt = prompt.replace('{RETRIEVED_EXAMPLES}', f'{self.examples}')
prompt = prompt.replace('{command}', f'{command}')
prompt = self.check_prompt_length(prompt)
print(prompt)
print(f"command is {command}")
return prompt
def get_examples_planning(self, topk):
if args.ablate_example_retrieval:
print("Fixing examples!")
distance_argsort_topk = np.arange(topk)
else:
if self.azure:
embedding = openai.Embedding.create(
engine="text-embedding-ada-002",
input=self.command,
)['data'][0]['embedding']
else:
embedding = openai.Embedding.create(
model="text-embedding-ada-002",
input=self.command,
)['data'][0]['embedding']
embedding = np.asarray(embedding)
# nearest neighbor
distance = np.linalg.norm(self.embeddings - embedding[None,:], axis=1)
distance_argsort_topk = np.argsort(distance)[:topk]
example_text = "Here are a few examples of typical inputs and outputs (only for in-context reference):\n"
example_number = 1
for idx in list(distance_argsort_topk):
example_text += f'Example #{example_number}:\n'
with open(f'{self.file_order[idx]}') as f:
example = f.read()
example_text += example
example_text += '\n\n'
example_number += 1
print(f"most relevant examples are: {[self.file_order[idx] for idx in list(distance_argsort_topk)]}")
self.examples = example_text
def check_prompt_length(self, prompt):
# if prompt too long (> 3/4 of context length), reduce examples
for subtract in range(1, self.topk-1):
if self.model=="gpt-3.5-turbo":
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
elif self.model=="gpt-3.5-turbo-16k":
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/16384
elif self.model=="gpt-4":
enc = tiktoken.encoding_for_model("gpt-4")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/8192
elif self.model=="gpt-4-32k":
enc = tiktoken.encoding_for_model("gpt-4")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/32768
elif self.model=="text-davinci-003":
enc = tiktoken.encoding_for_model("text-davinci-003")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
elif self.model=="text-davinci-002":
enc = tiktoken.encoding_for_model("text-davinci-002")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
elif self.model=="code-davinci-002":
enc = tiktoken.encoding_for_model("code-davinci-002")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
else:
assert(False) # what model is this?
print(f"Prompt percent: {prompt_len_percent}")
if prompt_len_percent>0.85 and self.nn_examples:
self.get_examples_planning(self.topk-subtract)
prompt = self.prompt_plan
prompt = prompt.replace('{API}', f'{self.api}')
prompt = prompt.replace('{RETRIEVED_EXAMPLES}', f'{self.examples}')
prompt = prompt.replace('{command}', f'{self.command}')
else:
break
return prompt
def get_prompt_replan(self, completed_subgoals, future_subgoals, failed_subgoal, execution_error, state_text):
if self.nn_examples:
examples_input_prompt = f'Failed subgoal:\n{failed_subgoal}\nExecution error: {execution_error}'
self.get_examples_replanning(examples_input_prompt, self.topk)
prompt = self.prompt_replan
prompt = prompt.replace('{API}', f'{self.api}')
prompt = prompt.replace('{API_CORRECTIVE}', f'{self.api_corrective}')
prompt = prompt.replace('Failed subgoal: ...', f'Failed subgoal:\n{failed_subgoal}')
prompt = prompt.replace('Execution error: ...', f'Execution error: {execution_error}')
prompt = prompt.replace('Input dialogue: ...', f'Input dialogue: {self.command}')
prompt = prompt.replace('{retrieved_plans}', f'{self.examples_replanning}')
prompt = self.check_prompt_length_replan(prompt, failed_subgoal, execution_error)
print(prompt)
self.populated_replan_prompt = prompt
return prompt
def get_examples_replanning(self, prompt, topk):
if args.ablate_example_retrieval:
print("Fixing examples!")
distance_argsort_topk = np.arange(topk)
else:
if self.azure:
embedding = openai.Embedding.create(
engine="text-embedding-ada-002",
input=self.command,
)['data'][0]['embedding']
else:
embedding = openai.Embedding.create(
model="text-embedding-ada-002",
input=prompt,
)['data'][0]['embedding']
embedding = np.asarray(embedding)
# nearest neighbor
distance = np.linalg.norm(self.embeddings_replanning - embedding[None,:], axis=1)
distance_argsort_topk = np.argsort(distance)[:topk]
example_text = "Here are a few examples of typical inputs and outputs:\n"
example_number = 1
for idx in list(distance_argsort_topk):
example_text += f'Example #{example_number}:\n"""\n'
with open(f'prompt/examples/examples_errors/{self.file_order_replanning[idx]}') as f:
example = f.read()
example_text += example
example_text += '\n"""\n'
example_number += 1
print(f"most relevant examples are: {[self.file_order_replanning[idx] for idx in list(distance_argsort_topk)]}")
self.examples_replanning = example_text
def check_prompt_length_replan(self, prompt, failed_subgoal, execution_error):
# if prompt too long (> 3/4 of context length), reduce examples
for subtract in range(1, self.topk):
if self.model=="gpt-3.5-turbo":
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4096
elif self.model=="gpt-3.5-turbo-16k":
enc = tiktoken.encoding_for_model("gpt-3.5-turbo")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/16384
elif self.model=="gpt-4":
enc = tiktoken.encoding_for_model("gpt-4")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/8192
elif self.model=="gpt-4-32k":
enc = tiktoken.encoding_for_model("gpt-4")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/32768
elif self.model=="text-davinci-003":
enc = tiktoken.encoding_for_model("text-davinci-003")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
elif self.model=="text-davinci-002":
enc = tiktoken.encoding_for_model("text-davinci-002")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
elif self.model=="code-davinci-002":
enc = tiktoken.encoding_for_model("code-davinci-002")
prompt_token_length = len(enc.encode(prompt))
prompt_len_percent = prompt_token_length/4097
else:
assert(False) # what model is this?
if prompt_len_percent>0.75 and self.nn_examples:
examples_input_prompt = f'Failed subgoal:\n{failed_subgoal}\nExecution error: {execution_error}'
self.get_examples_replanning(examples_input_prompt, self.topk-subtract)
prompt = self.prompt_replan
prompt = prompt.replace('{API}', f'{self.api}')
prompt = prompt.replace('{API_CORRECTIVE}', f'{self.api_corrective}')
prompt = prompt.replace('Failed subgoal: ...', f'Failed subgoal:\n{failed_subgoal}')
prompt = prompt.replace('Execution error: ...', f'Execution error: {execution_error}')
prompt = prompt.replace('Input dialogue: ...', f'Input dialogue: {self.command}')
prompt = prompt.replace('{retrieved_plans}', f'{self.examples_replanning}')
else:
break
return prompt
def run_gpt(self, prompt, log_plan=True):
'''
prompt (str): Prompt to feed to GPT
'''
if self.azure:
if self.model=="gpt-3.5-turbo":
for _ in range(5):
try:
print("RUNNING GPT 3.5")
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
engine = "gpt-35-turbo",
messages=messages,
temperature=0,
)
response = response["choices"][0]["message"]["content"]
break
except:
time.sleep(1)
elif self.model=="gpt-4":
for _ in range(5):
try:
print("RUNNING GPT 4")
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
engine = "gpt-4",
messages=messages,
temperature=0,
)
response = response["choices"][0]["message"]["content"]
break
except:
time.sleep(1)
elif self.model=="gpt-4-32k":
for _ in range(5):
try:
print("RUNNING GPT 4 32k")
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
engine = "gpt-4-32k",
messages=messages,
temperature=0,
)
response = response["choices"][0]["message"]["content"]
break
except:
time.sleep(1)
elif self.model=="text-davinci-003":
for _ in range(5):
try:
print("RUNNING text-davinci-003")
enc = tiktoken.encoding_for_model("text-davinci-003")
prompt_token_length = len(enc.encode(prompt))
print(f"Max tokens = {4097 - prompt_token_length}")
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=4097 - prompt_token_length,
)
response = response['choices'][0]['text']
break
except:
time.sleep(1)
elif self.model=="text-davinci-002":
enc = tiktoken.encoding_for_model("text-davinci-002")
prompt_token_length = len(enc.encode(prompt))
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=4097 - prompt_token_length,
)
response = response['choices'][0]['text']
print(f"Max tokens = {4097 - prompt_token_length}")
elif self.model=="code-davinci-002":
enc = tiktoken.encoding_for_model("code-davinci-002")
prompt_token_length = len(enc.encode(prompt))
response = openai.Completion.create(
engine="katefcodedavinci002",
prompt=prompt,
temperature=0,
max_tokens=8001 - prompt_token_length,
)
response = response['choices'][0]['text']
print(f"Max tokens = {8001 - prompt_token_length}")
else:
assert(False) # what model is this?
else:
if self.model=="gpt-3.5-turbo":
messages = [
{"role": "system", "content": prompt},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
)["choices"][0]["message"]["content"]
elif self.model=="gpt-3.5-turbo-16k":
messages = [
{"role": "system", "content": prompt},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
temperature=0,
)["choices"][0]["message"]["content"]
elif self.model=="gpt-4":
messages = [
{"role": "system", "content": prompt},
]
while True:
try:
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=messages,
temperature=0,
)["choices"][0]["message"]["content"]
break
except:
time.sleep(0.1)
elif self.model=="text-davinci-003":
enc = tiktoken.encoding_for_model("text-davinci-003")
prompt_token_length = len(enc.encode(prompt))
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=4097 - prompt_token_length,
)
response = response['choices'][0]['text']
print(f"Max tokens = {4097 - prompt_token_length}")
else:
assert(False) # what model is this?
print(f"\n\ncommand is {self.command}\n\n")
print(response)
if log_plan:
self.plan = response
return response
def parse_object(self, line, subgoals, objects, object_mapping, search_dict):
exec(line)
var_name = line.split(' = InteractionObject')[0]
object_cat = eval(f'{var_name}.object_class')
search_obj = eval(f'{var_name}.landmark')
attributes = eval(f'{var_name}.attributes').copy()
map_ = var_name
if object_cat not in self.include_classes:
object_cat = self.get_closest_category_to_word(object_cat)
if object_cat in self.clean_classes:
attributes.append("clean")
object_mapping[map_] = object_cat
if search_obj is not None:
if search_obj not in self.include_classes:
search_obj = self.get_closest_category_to_word(search_obj)
if search_obj in self.include_classes:
if object_cat not in search_dict.keys():
search_dict[object_cat] = []
search_dict[object_cat].append(search_obj)
for attribute in attributes:
if attribute=="toasted":
subgoals.append("Toast")
objects.append(object_cat)
elif attribute=="clean":
subgoals.append("Clean")
objects.append(object_cat)
elif attribute=="cooked":
subgoals.append("Cook")
objects.append(object_cat)
return subgoals, objects, object_mapping, search_dict
def response_to_subgoals(self, response, remove_objects=True):
'''
Parse output code to Teach subgoals
'''
subgoals = []
objects = []
search_dict = {}
code_lines = response.split('\n')
comment_block = False
object_mapping = {}
agent_mapping = {}
subgoal_mapping = {
"go_to": "Navigate",
"pickup_and_place":"pickup_and_place",
"pickup":"Pickup",
"place":"Place",
"slice":"Slice",
"toggle_on":"ToggleOn",
"toggle_off":"ToggleOff",
"open":"Open",
"close":"Close",
"clean":"Clean",
"put_down":"PutDown",
"pour":"Pour",
"fill_up":"FillUp",
"empty":"Empty",
"toast":"Toast",
"cook":"Cook",
"move_back":"MoveBack",
"move_closer":"MoveCloser",
"move_alternate_viewpoint":"MoveAlternate"
}
for line_i in range(len(code_lines)):
line = code_lines[line_i]
if line[:4]==' ':
line = line[4:] # remove tab
if comment_block:
if line[:3]=='"""':
comment_block = False # out of comment block
continue
elif len(line)==0:
continue # nothing
elif line[0]=="#":
continue # comment
elif line[:5]=='print':
continue # print
elif line[:3]=='"""':
comment_block = True
continue # comment block
elif line[:4]=='def ':
continue # function statement
elif line[:2]=='- ':
continue # bullet
elif line[:5]=="Plan:":
continue # start of plan
elif 'InteractionObject' in line:
try:
subgoals, objects, object_mapping, search_dict = self.parse_object(line, subgoals, objects, object_mapping, search_dict)
except:
continue
elif ' = AgentCorrective' in line:
map_ = line.split(' = ')[0]
agent_mapping[map_] = "agent"
else:
# log subgoal
map_ = line.split('.')[0]
if map_ in agent_mapping.keys():
sub_ = line.split('.')[1].split('(')[0]
if sub_ not in subgoal_mapping.keys():
continue
subgoals.append(subgoal_mapping[sub_])
objects.append("Agent")
continue
# check for bad output by LLM
if map_ not in object_mapping.keys():
try:
if '"' not in map_:
line_ = f'{map_} = InteractionObject("{map_}")'
else:
line_ = f'{map_} = InteractionObject({map_})'
subgoals, objects, object_mapping, search_dict = self.parse_object(line_, subgoals, objects, object_mapping, search_dict)
except:
continue
if map_ not in object_mapping.keys():
continue
object_cat = object_mapping[map_]
if object_cat not in self.include_classes:
continue
try:
sub_ = line.split('.')[1].split('(')[0]
except:
continue
if sub_ not in subgoal_mapping.keys():
continue
subgoal = subgoal_mapping[sub_]
if subgoal in ["Place", "Pour","pickup_and_place"]:
# get placing category
if 'InteractionObject' in line:
object_cat_ = line.split('InteractionObject("')[-1].split('"')[0]
else:
if subgoal in ["Place", "pickup_and_place"]:
map2_ = line.split('place(')[-1].split(')')[0]
if map2_ not in object_mapping.keys():
line_ = f'{map2_} = InteractionObject("{map2_}")'
subgoals, objects, object_mapping, search_dict = self.parse_object(line_, subgoals, objects, object_mapping, search_dict)
if map2_ not in object_mapping.keys():
continue
object_cat_ = object_mapping[map2_]
elif subgoal=="Pour":
map2_ = line.split('pour(')[-1].split(')')[0]
if map2_ not in object_mapping.keys():
line_ = f'{map2_} = InteractionObject("{map2_}")'
subgoals, objects, object_mapping, search_dict = self.parse_object(line_, subgoals, objects, object_mapping, search_dict)
if map2_ not in object_mapping.keys():
continue
object_cat_ = object_mapping[map2_]
if object_cat_ not in self.include_classes:
continue
if subgoal=="pickup_and_place":
subgoals.extend(["Navigate", "Pickup", "Navigate", "Place"])
objects.extend([object_cat, object_cat, object_cat_, object_cat_])
else:
if len(subgoals)<2 or (subgoal=="Place" and subgoals[-2]!="Pickup"):
# need to pickup before placing
subgoals.append("Navigate")
objects.append(object_cat)
subgoals.append("Pickup")
objects.append(object_cat)
subgoals.append("Navigate")
objects.append(object_cat_)
subgoals.append(subgoal)
objects.append(object_cat_)
if remove_objects:
# check if object is used after this, and if not, remove from list of interactable objects
# necessary for "do X with all Y"
object_used_after = False
for line_ in code_lines[line_i+1:]:
if map_ in line_:
object_used_after = True
break
if not object_used_after and (object_cat not in ["Knife"]) and (subgoal not in ["Pour"]):
subgoals.append("ObjectDone")
objects.append(object_cat)
elif subgoal=="Clean":
subgoals.append("Clean")
objects.append(object_cat)
elif subgoal=="FillUp":
subgoals.extend(["Navigate", "Place", "ToggleOn", "ToggleOff", "Pickup"])
objects.extend(["Sink", "Sink", "Faucet", "Faucet", object_cat])
elif subgoal=="PutDown":
subgoals.extend(["PutDown"])
objects.extend(["PutDown"])
elif subgoal=="Toast":
subgoals.append("Toast")
objects.append(object_cat)
elif subgoal=="Cook":
subgoals.append("Cook")
objects.append(object_cat)
elif subgoal in ["Open", "Close"]:
if object_cat in self.openable_classes:
subgoals.append(subgoal)
objects.append(object_cat)
else:
subgoals.append(subgoal)
objects.append(object_cat)
self.object_mapping = object_mapping
self.search_dict = search_dict
return subgoals, objects, search_dict
def get_closest_category_to_word(self, word):
'''
Function for getting closest teach category by querying LLM
'''
with open('prompt/prompt_closest_category.txt') as f:
prompt = f.read()
prompt = prompt.replace('{word}', f'{word}')
response = self.run_gpt(prompt, log_plan=False)
response = response.replace('output:', '')
response = response.replace('Output:', '')
response = response.replace(' ', '')
response = response.replace('\n', '')
response = response.replace('.', '')
response = response.replace(',', '')
return response
def get_get_search_categories(self, target_category):
'''
Function for getting most likely search objects from LLM
'''
with open('prompt/prompt_search.txt') as f:
prompt = f.read()
prompt = prompt.replace('{target}', f'{target_category}')
prompt = prompt.replace('{dialogue}', f'{self.command}')
response = self.run_gpt(prompt, log_plan=False)
response = response.replace('answer:', '')
response = response.replace(' ', '')
response = response.replace('\n', '')
response = response.split(',')
# make sure all are valid categories
response_ = []
for r in response:
if r in self.include_classes:
response_.append(r)
response = response_
return response
def subgoals_to_program(self, subgoals, held_obj=None):
'''
Convert a set of subgoals into a Python program
'''
subgoals = copy.deepcopy(subgoals)
subgoal_mapping = {
"go_to": "Navigate",
"pickup_and_place":"pickup_and_place",
"pickup":"Pickup",
"place":"Place",
"slice":"Slice",
"toggle_on":"ToggleOn",
"toggle_off":"ToggleOff",
"open":"Open",
"close":"Close",
"clean":"Clean",
"put_down":"PutDown",
"pour":"Pour",
"fill_up":"FillUp",
"clean":"Clean",
"empty":"Empty",
"toast":"Toast",
"cook":"Cook",
}
subgoal_mapping_r = {v:k for k,v in subgoal_mapping.items()}
objects = {}
objects_tmp = []
obj_count = {}
search_dict = copy.deepcopy(self.search_dict)
program = ''
first_subgoal=True
if held_obj is not None:
obj = held_obj
if obj not in objects_tmp:
objects[obj] = f'target_{obj.lower()}'
if obj in obj_count.keys():
objects[obj] = objects[obj] + str(obj_count[obj])
if obj in self.search_dict.keys():
program += f'{objects[obj]} = InteractionObject("{obj}", landmark = "{search_dict[obj][0]}")\n'
search_dict[obj].pop(0)
if len(search_dict[obj])==0:
del search_dict[obj]
else:
program += f'{objects[obj]} = InteractionObject("{obj}")\n'
objects_tmp.append(obj)
held_obj = objects[obj]
while len(subgoals)>0:
subgoal = subgoals.pop(0)
obj = subgoal[1]
sub = subgoal[0]
if sub in ["MoveBack", "MoveCloser", "MoveAlternate"]:
continue
if obj=="Sink" and sub=="Navigate" and [s[0] for s in subgoals[:4]]==["Place", "ToggleOn", "ToggleOff", "Pickup"]:
# clean subgoal
obj = subgoals[3][1]
if len(subgoals)>4 and subgoals[4][0]=="Pour":
sub = "Clean"
subgoals = subgoals[5:]
elif "Pour" in [s[0] for s in subgoals]:
# if pour in future, then likely fillup subgoal
sub = "FillUp"
subgoals = subgoals[4:]
else:
sub = "Clean"
subgoals = subgoals[4:]
elif sub=="Navigate" and [s[0] for s in subgoals[:3]]==["Pickup", "Navigate", "Place"]:
if (len(subgoals)>6 and [s[0] for s in subgoals[2:6]]==["Place", "ToggleOn", "ToggleOff", "Pickup"]):
# clean subgoals so skip
pass
else:
# pickup and place subgoal
obj = subgoals[0][1]
if obj not in objects_tmp:
objects[obj] = f'target_{obj.lower()}'
if obj in obj_count.keys():
objects[obj] = objects[obj] + str(obj_count[obj])
if obj in self.search_dict.keys():
program += f'{objects[obj]} = InteractionObject("{obj}", landmark = "{search_dict[obj][0]}")\n'
search_dict[obj].pop(0)
if len(search_dict[obj])==0:
del search_dict[obj]
else:
program += f'{objects[obj]} = InteractionObject("{obj}")\n'
objects_tmp.append(obj)
held_obj = objects[obj]
obj = subgoals[2][1]
sub = "pickup_and_place"
subgoals = subgoals[3:]
if obj not in objects_tmp and obj is not None:
objects[obj] = f'target_{obj.lower()}'
if obj in obj_count.keys():
objects[obj] = objects[obj] + str(obj_count[obj])
if obj in self.search_dict.keys():
program += f'{objects[obj]} = InteractionObject("{obj}", landmark = "{search_dict[obj][0]}"))\n'
search_dict[obj].pop(0)
if len(search_dict[obj])==0:
del search_dict[obj]
else:
program += f'{objects[obj]} = InteractionObject("{obj}")\n'
objects_tmp.append(obj)
if sub=='ObjectDone':
objects_tmp.remove(obj)
if obj not in obj_count.keys():
obj_count[obj] = 1
else:
obj_count[obj] += 1
continue
# add subgoal
subgoal_text = subgoal_mapping_r[sub]
if subgoal_text in ["go_to", "pickup", "slice", "toggle_on", "toggle_off", "open", "close", "clean", "fill_up", "empty", "cook", "toast"]:
program += f'{objects[obj]}.{subgoal_text}()\n'
if subgoal_text=="pickup":
held_obj = objects[obj]
elif subgoal_text in ["put_down"]:
program += f'{held_obj}.{subgoal_text}()\n'
elif subgoal_text in ["place", "pour", "pickup_and_place"]:
program += f'{held_obj}.{subgoal_text}({objects[obj]})\n'
else:
assert(False) # what subgoal is this?
if first_subgoal:
first_subgoal = False
print(program)
return program | [
"Failed subgoal:\nPLACEHOLDER\nExecution error: PLACEHOLDER",
"Failed subgoal: ...",
"{API_CORRECTIVE}",
"{RETRIEVED_EXAMPLES}",
"PLACEHOLDER",
"Failed subgoal:\nPLACEHOLDER",
"Execution error: ...",
"{command}",
"{dialogue}",
"{retrieved_plans}",
"Execution error: PLACEHOLDER",
"Input dialogue: ..."
] |
2024-01-10 | Gabesarch/HELPER | prompt~get_embeddings_examples.py | import os
import openai
import tiktoken
import ipdb
st = ipdb.set_trace
from tqdm import tqdm
import glob
import json
import numpy as np
import argparse
azure = True
if azure:
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-05-15"
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
else:
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", type=str, default=f"./dataset/gpt_embeddings", help="where to save output")
parser.add_argument('--embeddings_to_get', default=['planning', 'replanning'], type=str, nargs='+', help="planning, replanning, custom")
args = parser.parse_args()
if "planning" in args.embeddings_to_get: # embeddings for initial planning examples
files_iterate = glob.glob("examples/*.txt")
filenames = []
embeddings = np.zeros((len(files_iterate), 1536), dtype=np.float64)
for f_i in range(len(files_iterate)):
file = files_iterate[f_i]
with open(file) as f:
prompt = f.read()
# ust keep dialogue
prompt = prompt.split('\n')[0].split('dialogue: ')[-1]
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
embedding = openai.Embedding.create(
engine="text-embedding-ada-002",
input=prompt,
)['data'][0]['embedding']
embedding = np.asarray(embedding)
embeddings[f_i] = embedding
# file_ = file.split('/')[-1]
file_ = os.path.join('prompt', file)
filenames.append(file_)
embedding_dir = os.path.join(args.root_dir, 'embeddings.npy')
np.save(embedding_dir, embeddings)
file_order = os.path.join(args.root_dir, 'file_order.txt')
with open(file_order, 'w') as fp:
fp.write("\n".join(str(item) for item in filenames))
if "replanning" in args.embeddings_to_get: # embeddings for re-planning examples
files_iterate = glob.glob("examples/examples_errors/*.txt")
filenames = []
embeddings = np.zeros((len(files_iterate), 1536), dtype=np.float64)
for f_i in range(len(files_iterate)):
file = files_iterate[f_i]
with open(file) as f:
prompt = f.read()
prompt = prompt.split('\nInput dialogue:')[0]
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
embedding = openai.Embedding.create(
engine="text-embedding-ada-002",
input=prompt,
)['data'][0]['embedding']
embedding = np.asarray(embedding)
embeddings[f_i] = embedding
file_ = file.split('/')[-1]
filenames.append(file_)
embedding_dir = os.path.join(args.root_dir, 'embeddings_replanning.npy')
np.save(embedding_dir, embeddings)
file_order = os.path.join(args.root_dir, 'file_order_replanning.txt')
with open(file_order, 'w') as fp:
fp.write("\n".join(str(item) for item in filenames))
if "custom" in args.embeddings_to_get: # embeddings for custom
files_iterate = glob.glob("examples/*.txt")
files_iterate += glob.glob("examples/examples_custom/*.txt")
filenames = []
embeddings = np.zeros((len(files_iterate), 1536), dtype=np.float64)
for f_i in range(len(files_iterate)):
file = files_iterate[f_i]
with open(file) as f:
prompt = f.read()
# ust keep dialogue
prompt = prompt.split('\n')[0].split('dialogue: ')[-1]
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
embedding = openai.Embedding.create(
engine="text-embedding-ada-002",
input=prompt,
)['data'][0]['embedding']
embedding = np.asarray(embedding)
embeddings[f_i] = embedding
file_ = os.path.join('prompt', file)
filenames.append(file_)
embedding_dir = os.path.join(args.root_dir, 'embeddings_custom.npy')
np.save(embedding_dir, embeddings)
file_order = os.path.join(args.root_dir, 'file_order_custom.txt')
with open(file_order, 'w') as fp:
fp.write("\n".join(str(item) for item in filenames)) | [
"\n",
"\nInput dialogue:",
"dialogue: "
] |
2024-01-10 | GPTKing/modelscope | modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | Bilel-Azz/ProjComptAppli | testpdf.py | from PIL import Image
import pytesseract
import PyPDF2
import os
import fitz # PyMuPDF
from PIL import Image
def pdf_to_images(pdf_path, image_folder):
# Ouvrir le fichier PDF
pdf_document = fitz.open(pdf_path)
# Créer le dossier pour stocker les images
os.makedirs(image_folder, exist_ok=True)
# Parcourir chaque page du PDF
for page_number in range(pdf_document.page_count):
# Extraire la page
page = pdf_document[page_number]
# Convertir la page en image
image = page.get_pixmap()
image_path = os.path.join(image_folder, f"page_{page_number + 1}.png")
# Enregistrer l'image
image.save(image_path)
# Fermer le fichier PDF
pdf_document.close()
from PIL import Image, ImageEnhance, ImageFilter
def preprocess_image(image_path):
# Ouvrir l'image à l'aide de Pillow
image = Image.open(image_path)
# Appliquer des filtres pour améliorer la qualité de l'image
enhanced_image = ImageEnhance.Contrast(image).enhance(2.0) # Augmenter le contraste
#enhanced_image = enhanced_image.filter(ImageFilter.MedianFilter()) # Appliquer un filtre médian
# Convertir l'image en niveaux de gris pour la binarisation
grayscale_image = enhanced_image.convert('L')
# Appliquer la binarisation pour améliorer le contraste entre le texte et l'arrière-plan
threshold = 150 # Ajustez cette valeur en fonction de votre image
binary_image = grayscale_image.point(lambda p: p > threshold and 255)
return binary_image
# Exemple d'utilisation
pdf_path = 'FACTURE.pdf'
image_folder = './'
pdf_to_images(pdf_path, image_folder)
# Spécifiez le chemin de l'image que vous souhaitez traiter
image_path = 'page_1.png'
# Ouvrir l'image à l'aide de Pillow
image = Image.open(image_path)
# Utiliser Tesseract pour extraire le texte
text = pytesseract.image_to_string(image)
# Afficher le texte extrait
print(text)
# "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"
# import openai
# openai.api_key = "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"
# prompt = f"Mon texte est rempli d'erreur corrige le :"+text
# completion = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[{"role": "user", "content": prompt}]
# )
# print(completion['choices'][0]['message']['content'])
image_path = 'page-001.jpg'
# Ouvrir l'image à l'aide de Pillow
image = Image.open(image_path)
image.show()
enhanced_image = ImageEnhance.Contrast(image).enhance(2.0)
# Utiliser Tesseract pour extraire le texte
text = pytesseract.image_to_string(image)
# Afficher le texte extrait
print(text)
from pdf2image import convert_from_path
# Store Pdf with convert_from_path function
images = convert_from_path('FACTURE.pdf')
for i in range(len(images)):
# Save pages as images in the pdf
images[i].save('page'+ str(i) +'.jpg', 'JPEG') | [] |
2024-01-10 | rotysz/DocChat001 | search_docs.py | import os
import sys
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.memory import ChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import LLMChain
from langchain.vectorstores import Pinecone
import pinecone
def GetEmbeddings(_directory_path,_index_file_name, _splitter_class, _embeddings, _gen_emb=False, _chunk_size=1000, _chunk_overlap=0,_separators=None):
if _gen_emb:
# List all files in the directory
files = os.listdir(_directory_path)
# Iterate over each file in the directory
docs = []
for file_name in files:
if file_name.endswith('.txt'):
loader = TextLoader(os.path.join(_directory_path, file_name))
documents = loader.load()
text_splitter = _splitter_class(chunk_size= _chunk_size, chunk_overlap=_chunk_overlap, separators=_separators)
docs.extend(text_splitter.split_documents(documents))
db = FAISS.from_documents(docs, _embeddings)
db.save_local(_index_file_name)
else:
db = FAISS.load_local(_index_file_name, embeddings)
return db
def GetEmbeddingsPineCone(_directory_path,_index_name, _splitter_class, _embeddings, _gen_emb=False, _chunk_size=1000, _chunk_overlap=0,_separators=None):
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"], # find at app.pinecone.io
environment="us-east4-gcp" # next to api key in console
)
if _gen_emb:
# List all files in the directory
files = os.listdir(_directory_path)
# Iterate over each file in the directory
docs = []
for file_name in files:
if file_name.endswith('.txt'):
loader = TextLoader(os.path.join(_directory_path, file_name))
documents = loader.load()
text_splitter = _splitter_class(chunk_size= _chunk_size, chunk_overlap=_chunk_overlap, separators=_separators)
docs.extend(text_splitter.split_documents(documents))
db = Pinecone.from_documents(docs, _embeddings,index_name=_index_name)
else:
db = Pinecone.from_existing_index(index_name=_index_name,embedding=_embeddings)
return db
def GetQuestion( _query, _memory, _temperature=0,_max_tokens=256):
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Question should be in Polish.
Do not repeat the question from the conversation.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question in Polish:"""
Q_PROMPT = PromptTemplate.from_template(_template)
chain = LLMChain(llm=ChatOpenAI(temperature=_temperature, max_tokens=_max_tokens), memory=_memory, prompt=Q_PROMPT)
output = chain.predict(question=_query)
return output
def GetAnswer(_query:str, vectorstore, _temperature=0,_max_tokens=256 ,_search_elements=4):
docs = vectorstore.similarity_search(query, k=_search_elements)
total_words = 0
for i in range(len(docs)):
total_words += len(docs[i].page_content.split())
if total_words > 1200:
docs = docs[:i]
break
prompt_template_p = """ Użyj poniższego kontekstu do wygenerowania wyczerpującej odpowiedzi na końcu. Po podaniu odpowiedzi zasugeruj zbliżone zagadnienia zgodne z kontakstem.
Jeżeli nie znasz odpowiedzi odpowiedz Nie wiem, nie staraj się wymyślić odpowiedzi.
{context}
Pytanie: {question}
Odpowiedź:"""
PROMPT = PromptTemplate(
template=prompt_template_p, input_variables=["context", "question"]
)
print(f'Pytanie -> {_query}\n')
chain = load_qa_chain(ChatOpenAI(temperature=_temperature, max_tokens=_max_tokens), chain_type="stuff", prompt=PROMPT,verbose=False)
output = chain({"input_documents": docs, "question": _query}, return_only_outputs=False)
return output
def PrintAnswer(output, _print_context=False):
print(f'Odpowiedź -> {output["output_text"]}\n')
print("Zrodła:")
for doc in output["input_documents"]:
print(f'[{len(doc.page_content.split())}, {doc.metadata}]')
if _print_context:
print('Konteksty:')
for doc in output["input_documents"]:
print(
f'Kontekst [{len(doc.page_content)},{len(doc.page_content.split())}, {doc.metadata}]-> {doc.page_content}\n')
print("")
return
GEN_EMBEDDINGS = False
print_context = False
if sys.argv[1].lower() == "gen":
GEN_EMBEDDINGS = True
if sys.argv[2].lower() == "trace":
print_context = True
if sys.argv[3].lower() == "PINECONE":
vestorstore= "PINECONE"
else:
vestorstore= "FAISS"
print (f" ===== DocBot V .001 ====== [gen embeddings: {GEN_EMBEDDINGS} trace: {print_context}]")
embeddings = OpenAIEmbeddings()
history = ChatMessageHistory()
#memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
memory = ConversationBufferWindowMemory(return_messages=True,memory_key="chat_history",k=4)
if vestorstore == "FAISS":
db = GetEmbeddings("input", "srch_idx", RecursiveCharacterTextSplitter, embeddings, GEN_EMBEDDINGS,
_chunk_size=3000, _chunk_overlap=0, _separators=[ "\n\n", "\n"," "])
elif vestorstore == "PINECONE":
db = GetEmbeddingsPineCone("input", "docchat", RecursiveCharacterTextSplitter, embeddings, GEN_EMBEDDINGS,
_chunk_size=3000, _chunk_overlap=0, _separators=[ "\n\n", "\n"," "])
while True:
#get query from user
query = input("Pytanie: ")
if query.lower() == 'q':
break
output_q = GetQuestion(query, memory)
query = output_q
#query = "Jakie kryteria wziąć pod uwagę wybierając księgowość dla małej spółki ?"
#query="Na jakie wspatrcie unijne może liczyć mała firma?"
output = GetAnswer(query, db,_temperature=0, _max_tokens=512 ,_search_elements=4)
memory.chat_memory.add_user_message(query)
memory.chat_memory.add_ai_message(output["output_text"])
PrintAnswer(output,print_context)
print ("Bot stopped.")
| [
" Użyj poniższego kontekstu do wygenerowania wyczerpującej odpowiedzi na końcu. Po podaniu odpowiedzi zasugeruj zbliżone zagadnienia zgodne z kontakstem.\n Jeżeli nie znasz odpowiedzi odpowiedz Nie wiem, nie staraj się wymyślić odpowiedzi.\n {context}\n\n Pytanie: {question}\n Odpowiedź:",
"question",
"context",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Question should be in Polish. \n Do not repeat the question from the conversation.\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question in Polish:"
] |
2024-01-10 | privateai/deid-examples | python~LLM%20Examples~secure_prompt.py | import argparse
import json
import sys
import demo_config
from openai import OpenAI
from privateai_client import PAIClient, request_objects
# Initialize parser
parser = argparse.ArgumentParser(description="A Python script with a model parameter.")
parser.add_argument("-m", "--model", required=True, help="Specify the model to use.")
args = parser.parse_args()
# Initialize the openai client
openai_client = OpenAI(api_key=demo_config.openai["API_KEY"])
# initialize the privateai client
PRIVATEAI_SCHEME = "https"
PRIVATEAI_HOST = demo_config.privateai["PROD_URL"]
pai_client = PAIClient(PRIVATEAI_SCHEME, PRIVATEAI_HOST)
pai_client.add_api_key(demo_config.privateai["PROD_KEY"])
############ Vertex AI Config ##########
import vertexai
from vertexai.preview.language_models import ChatModel, InputOutputTextPair
vertexai.init(
project=demo_config.vertex["PROJECT"], location=demo_config.vertex["LOCATION"]
)
chat_model = ChatModel.from_pretrained("chat-bison@001")
parameters = {"temperature": 0.8, "max_output_tokens": 256, "top_p": 0.8, "top_k": 40}
############ Cohere config #############
import cohere
co = cohere.Client(demo_config.cohere["API_KEY"])
models = ["openai", "cohere", "vertexai"]
def prompt_chat_gpt(text):
completion = openai_client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": text}]
)
return completion.choices[0].message.content
def prompt_vertex_ai(prompt, context):
# call vertexAI
chat = chat_model.start_chat(context=context, examples=[])
parameters = {
"temperature": 0.8,
"max_output_tokens": 256,
"top_p": 0.8,
"top_k": 40,
}
completion = chat.send_message(f"{prompt}", **parameters)
return completion.text
def prompt_cohere(prompt):
response = co.generate(
model="command",
prompt=prompt,
max_tokens=300,
temperature=0.9,
k=0,
stop_sequences=[],
return_likelihoods="NONE",
)
return response.generations[0].text
def private_prompt(prompt, raw_text, model):
completions = (
{}
) # a dict that maintains the history of the raw data, redaction, and completions
################################################
############ Identify and Redact ###############
################################################
completions["raw_text"] = raw_text
redaction_request_obj = request_objects.process_text_obj(text=[raw_text])
redaction_response_obj = pai_client.process_text(redaction_request_obj)
################################################
############ Store redactions #################
################################################
deidentified_text = redaction_response_obj.processed_text[0]
completions["redacted_text"] = deidentified_text
entity_list = redaction_response_obj.get_reidentify_entities()
################################################
############ Generate Completion ###############
################################################
completions["redacted_completion"] = [] # create empty list to hold completions
completions["reidentified_completion"] = [] # same thing for re-identifications
match model:
case "openai":
print("OPENAI SELECTED")
llm_response = prompt_chat_gpt(prompt + deidentified_text)
completions["redacted_completion"].append(
{"model": model, "completion": llm_response}
)
case "vertexai":
print("VertexAI/Bard selected")
llm_response = prompt_vertex_ai(prompt, deidentified_text)
completions["redacted_completion"].append(
{"model": model, "completion": llm_response}
)
case "cohere":
print("Cohere selected")
llm_response = prompt_cohere(prompt + deidentified_text)
completions["redacted_completion"].append(
{"model": model, "completion": llm_response}
)
case "all":
completions["redacted_completion"].append(
{
"model": "openai",
"completion": prompt_chat_gpt(prompt + deidentified_text),
}
)
completions["redacted_completion"].append(
{
"model": "vertexai",
"completion": prompt_vertex_ai(prompt, deidentified_text),
}
)
completions["redacted_completion"].append(
{
"model": "cohere",
"completion": prompt_cohere(prompt + deidentified_text),
}
)
case _:
print("No valid model selected, so using chatgpt")
llm_response = prompt_chat_gpt(prompt + deidentified_text)
completions["redacted_completion"].append(
{"model": "openai", "completion": llm_response}
)
################################################
############ Call the reidentify Route #########
################################################
for completion in completions["redacted_completion"]:
reidentification_request_obj = request_objects.reidentify_text_obj(
processed_text=[completion["completion"]], entities=entity_list
)
reidentification_response_obj = pai_client.reidentify_text(
reidentification_request_obj
)
completions["reidentified_completion"].append(
{
"model": completion["model"],
"re-identified": reidentification_response_obj.body[0],
}
)
return completions
raw_sample_text = """
On May 17, 2023, the U.S. District Court for the Southern District of New York entered a final consent judgment against Sam A. Antar, who the SEC previously charged with defrauding investors, many of whom were his friends and acquaintances in a Syrian Jewish community in New Jersey.
The SEC's s complaint, alleged that Antar, of New York, New York, engaged in a fraudulent scheme that deceived numerous investors out of more than $550,000 while claiming he would invest in shares of companies that were not yet public, and then sell those shares to already-identified buyers for a premium in a short period of time. In reality, according to the complaint, Antar never used investor funds to purchase shares of emerging companies, or to make any other investment. Instead, Antar spent investor funds gambling, making gifts to family members, paying for his daughter's wedding, and making Ponzi-like payments to some early investors.
The final judgment permanently enjoins Antar from violating the antifraud provisions of the federal securities laws, Section 17(a) of the Securities Act of 1933 and Section 10(b) of the Securities Exchange Act of 1934 and Rule 10b-5 thereunder. The judgment also orders Antar to pay disgorgement of $567,000 and prejudgment interest of $88,754, with offsets permitted for amounts Antar pays pursuant to a restitution order in a parallel criminal action.
In a parallel criminal action, the New Jersey Office of the Attorney General Division of Criminal Justice filed criminal charges against Antar. On April 22, 2022, Antar pled guilty to certain of the charges and on December 9, 2022, was sentenced to three years in prison and ordered to pay restitution of $15,000.
"""
completions = private_prompt("summarize this: ", raw_sample_text, args.model)
print(completions["redacted_completion"])
print("\n**************************\n")
print(completions["reidentified_completion"])
print(json.dumps(completions))
| [] |
2024-01-10 | privateai/deid-examples | python~LLM%20Examples~secure_prompt_from_file.py | import argparse
import base64
import json
import os
import sys
import demo_config
from openai import OpenAI
from privateai_client import PAIClient, request_objects
# Initialize parser
parser = argparse.ArgumentParser(description="Secure LLM prompting from a file")
parser.add_argument("-d", "--directory", required=True, help="directory")
parser.add_argument("-f", "--file", required=True, help="File to redact")
parser.add_argument("-t", "--filetype", required=True, help="file type")
parser.add_argument("-n", "--filename", required=True, help="file name")
args = parser.parse_args()
PRIVATEAI_API_KEY = demo_config.privateai["PROD_KEY"]
PRIVATEAI_URL = demo_config.privateai["PROD_URL"]
# Initialize the openai client
openai_client = OpenAI(api_key=demo_config.openai["API_KEY"])
file_dir = args.directory
file_name = args.filename
filepath = args.file
file_type = args.filetype.split("/")[1]
PRIVATEAI_SCHEME = "https"
client = PAIClient(PRIVATEAI_SCHEME, PRIVATEAI_URL)
client.add_api_key(PRIVATEAI_API_KEY)
def prompt_chat_gpt(text):
completion = openai_client.chat.completions.create(
model="gpt-4", messages=[{"role": "user", "content": text}]
)
return completion.choices[0].message.content
# Read from file
with open(filepath, "rb") as b64_file:
file_data = base64.b64encode(b64_file.read())
file_data = file_data.decode("ascii")
# Make the request
file_obj = request_objects.file_obj(data=file_data, content_type=args.filetype)
request_obj = request_objects.file_base64_obj(file=file_obj)
redaction_response = client.process_files_base64(request_object=request_obj)
entity_list = redaction_response.get_reidentify_entities()
# Write to file
with open(os.path.join(file_dir, f"{file_name}.{file_type}"), "wb") as redacted_file:
processed_file = redaction_response.processed_file.encode("ascii")
processed_file = base64.b64decode(processed_file, validate=True)
redacted_file.write(processed_file)
print("\n**************************************************\n")
print(f"redacted file contents: {redaction_response.processed_text}\n")
file_summary = prompt_chat_gpt(
f"summarize this file: {redaction_response.processed_text}"
)
print("\n SUMMARY \n")
print("********************** redacted summary **********************")
print(file_summary)
print("********************** REID summary **********************")
reid_req_obj = request_objects.reidentify_text_obj(
processed_text=[file_summary], entities=entity_list
)
reidentification_response_obj = client.reidentify_text(reid_req_obj)
print(reidentification_response_obj.body)
| [] |
2024-01-10 | davidwaldherr/UniversalSummarier | 1_Universal.py | import openai
openai.api_key ='INSERT OPENAI KEY HERE'
from openai.embeddings_utils import get_embedding
import pandas as pd
# You need to create a virtual environment
# pip3 install virtualenv
# . venv/bin/activate
# This program accepts a text file as an input and then divides it into chapters,
# sections, and paragraphs, and sentences. It then uses that information to create a bookAuthorVoice.jsonl
# file, BookSentence.csv file, and Book.txt summary.
# Before launching this program, you must create a bookSentences.csv file
# Surround the table of contents and the content itself with "Start" and "End" to automatically
# parse the text.
# x is deadspace
# 0 is empty line
# 1 is paragraph
# 2 is table of contents item
# 3 is chapter or part
# 4 is section
# prompt the user to enter a file name
def getFileName():
filename = input('Enter the name of the file: ')
return filename
# # prompt the user if they would like to embed all of the sentences in the book
def summarizeDecision():
summary = input('Would you like to summarize the book? (y/n): ')
return summary
def embedDecision():
summary = input('Would you like to embed each sentence in book? (y/n): ')
return summary
def sectionsDecision():
sections = input('Would you like to create sections? (y/n): ')
return sections
def embedSentences(filename):
df = pd.read_csv(filename, dtype=str)
df = df[['chapter', 'section', 'paragraph', 'sentence']] # # remove any duplicate rows
df = df.drop_duplicates()
# # This will take ~many~ minutes
df['babbage_search'] = df.sentence.apply(lambda x: get_embedding(x, engine='text-search-babbage-doc-001'))
df.to_csv(filename)
# import the text file for a book, make it lowercase, and parse it into the text list
def readFile(filename):
text = []
with open(filename, 'r') as f:
for line in f:
# make each line lowercase
if line != '':
# strip the line of whitespace
line = line.strip()
line = line.lower()
text.append(line)
# print(line)
return text
def startTable(text):
table = []
start = 0
end = 0
for i in range(len(text)):
if text[i] == 'start':
if start == 0:
table.append('2')
start = 1
else:
table.append('1')
elif text[i] == 'end':
if end == 0:
table.append('2')
end = 1
else:
table.append('1')
elif text[i] == '':
table.append('0')
# elif text[i] == '• • •':
# table.append('0')
else:
table.append('1')
return table
# this function labels every line in the table of contents with 2.
# it also marks an x before the start of the table of contents.
def markContents(table):
start = 999
end = 999
for i in range(len(table)):
if table[i] == '2':
if start == 999:
start = i
else:
end = i
# between the start and end, if the line = 1, change it to 2
for i in range(start, end):
if table[i] == '1':
table[i] = '2'
if start == 0:
pass
else:
for i in range(0, start):
table[i] = 'x'
return table
# this function creates a list of the table of contents
def createContents(text, table):
contents = []
for i in range(len(table)):
if table[i] == '2':
contents.append(text[i])
return contents
def lastElement(contents):
last = contents[-1]
return last
def markEndDeadspace(text, contents, table):
# store the last element in contents in a variable
last = lastElement(contents)
# iterate through the text list and if the line is == last, save the index
for i in range(len(text)):
if text[i] == last:
end = i
# mark every line in table as an x after the end index
for i in range(end, len(table)):
table[i] = 'x'
return table
def extendContents(contents):
# remove one character at a time from each item in contents and append it to contents2
contents2 = []
for i in range(len(contents)):
for j in range(len(contents[i])):
contents2.append(contents[i][j:])
# remove leading and trailing whitespace from each item in contents2
for i in range(len(contents2)):
contents2[i] = contents2[i].strip()
contents3 = []
for i in range(len(contents2)):
if contents2[i] not in contents3:
contents3.append(contents2[i])
# remove any empty strings from contents2
contents3 = [x for x in contents3 if x != '']
return contents3
def findChapters(text, contents, table):
for i in range(len(table)):
if table[i] == '1':
for j in range(len(contents)):
if contents[j] == text[i]:
table[i] = '3'
break
return table
def findSections(text, table):
for i in range(len(table)):
if table[i] == '1':
text[i] = text[i].strip()
# if text[i] does not end with a period, question mark, exclamation point, astrix, comma
# it is under 15 words, and does not start with a '-', mark it in the table as a 4
if text[i][-1] not in ['.', '?', '!', '”', '*', ',', '†', ';', ':', '‡', '’']:
# if the line does not start with "-"
if text[i][0] != '—':
if len(text[i].split()) < 8: # set how many words are allowed in a section
table[i] = '4'
return table
# this function creates a JSONL file of all the paragraphs in the book
def createJSONL(text, table):
jsonl = []
for i in range(len(table)):
if table[i] == '1':
if len(text[i].split()) > 60:
par = text[i].replace('"', '')
jsonl.append('{"prompt":"", "completion":" ' + par + '"}')
return jsonl
# print the list to a jsonl file
def writeJSONL(filename, list):
with open(filename, 'w') as f:
for i in range(len(list)):
f.write(list[i] + '\n')
# this function creates a list that can be transformed into a csv file
# CSP - Chapter, Section, Paragraph
def getCSP(text, table):
sentenceCSP = []
for i in range(len(table)):
if table[i] == '1':
sentenceCSP.append("Paragraph: " + text[i])
elif table[i] == '3':
sentenceCSP.append("Chapter: " + text[i])
elif table[i] == '4':
sentenceCSP.append("Section: " + text[i])
return sentenceCSP
# this function creates a CSPS format to be read for CSV and Summary
# CSPS - Chapter, Section, Paragraph, Sentence
def createEmbed(sentenceCSP):
sentenceEmbed = []
for line in sentenceCSP:
if line.startswith('Paragraph: '):
sentenceEmbed.append(line)
line = line.replace('Paragraph: ', '')
line = line.split('.')
# remove outside whitespace
line = [x.strip() for x in line]
# remove empty strings from list
line = [x for x in line if x]
for sentence in line:
sentenceEmbed.append("Sentence: " + sentence)
else:
sentenceEmbed.append(line)
return sentenceEmbed
# this function creates a list that can be printed directly to the csv file
def createCSV(sentenceEmbed):
csv = []
chapter = ""
section = ""
paragraph = ""
sentence = ""
for i in range(len(sentenceEmbed)):
# remove all instances of " from the string
sentenceEmbed[i] = sentenceEmbed[i].replace('"', '')
if sentenceEmbed[i].startswith('Chapter: '):
# set the chapter variable to the line after "Chapter: "
chapter = sentenceEmbed[i].replace('Chapter: ', '')
chapter = '"' + chapter + '"'
elif sentenceEmbed[i].startswith('Section: '):
# set the section variable to the line after "Section: "
section = sentenceEmbed[i].replace('Section: ', '')
section = '"' + section + '"'
elif sentenceEmbed[i].startswith('Paragraph: '):
# set the paragraph variable to the line after "Paragraph: "
paragraph = sentenceEmbed[i].replace('Paragraph: ', '')
paragraph = '"' + paragraph + '"'
elif sentenceEmbed[i].startswith('Sentence: '):
# set the paragraph variable to the line after "Paragraph: "
sentence = sentenceEmbed[i].replace('Sentence: ', '')
sentence = '"' + sentence + '"'
csv.append("{},{},{},{}".format(chapter, section, paragraph, sentence))
return csv
# print the list to a csv file
def writeCSV(filename, csv):
with open(filename, 'w') as f:
f.write("chapter,section,paragraph,sentence\n")
for line in csv:
f.write(line + '\n')
f.close()
def prepareToSummarize(sentenceCSP):
# prepend and append any line that starts with "Chapter: ", or "Section: " with "==="
for i in range(len(sentenceCSP)):
if sentenceCSP[i].startswith('Chapter: ') or sentenceCSP[i].startswith('Section: '):
sentenceCSP[i] = '===' + sentenceCSP[i]
if sentenceCSP[i].startswith('==='):
sentenceCSP[i] = sentenceCSP[i] + '==='
# if there are over 5 lines in a row starting with "Paragraph: ", prepend and append every fourth line with "==="
for i in range(len(sentenceCSP)):
if sentenceCSP[i].startswith('Paragraph: '):
if i % 4 == 0:
sentenceCSP[i] = '===' + sentenceCSP[i]
if i % 4 == 3:
sentenceCSP[i] = sentenceCSP[i] + '==='
# combine the list of lines into a single string
sentenceCSP = ' '.join(sentenceCSP)
# split the string by '==='
sentenceCSP = sentenceCSP.split('===')
sentenceCSP = [line.strip() for line in sentenceCSP]
sentenceCSP = [x for x in sentenceCSP if x != '']
# for each line that starts with "Paragraph: ", remove every instance of it after the first one
for i in range(len(sentenceCSP)):
if sentenceCSP[i].startswith('Paragraph: '):
sentenceCSP[i] = sentenceCSP[i].replace('Paragraph: ', '')
sentenceCSP[i] = "Paragraph: " + sentenceCSP[i]
# remove any empty strings from the list
sentenceCSP = [x for x in sentenceCSP if x != '']
return sentenceCSP
def summarize(text):
response = openai.Completion.create(
model="text-davinci-002",
prompt="Summarize this for a second-grade student:\n\n" + text + "\n\n",
temperature=0.69,
max_tokens=2048,
)
return response.choices[0].text
def summarizeTwo(text):
response = openai.Completion.create(
model="text-davinci-002",
prompt="Summarize the following:\n\n" + text + "\n\n",
temperature=0.69,
max_tokens=2048,
)
return response.choices[0].text
def get_completions(preparedToSummarize):
func = []
for paragraph in preparedToSummarize:
# if the line contains "Chapter", or "Section" skip it
if paragraph.startswith("Chapter") or paragraph.startswith("Section"):
func.append(paragraph)
elif paragraph.startswith("Paragraph: "):
completeMe = paragraph.replace("Paragraph: ", "")
completion = summarize(completeMe)
func.append(completion)
else:
completion = summarizeTwo(paragraph)
func.append(completion)
# remove any empty strings
func = [x for x in func if x]
return func
def processSummary(extendedSummary):
# remove any empty strings
for line in extendedSummary:
if line.startswith("Summary of "):
line = line + '\n'
elif line.startswith("Chapter: "):
line = '\n\n' + line
elif line.startswith("Section: "):
line = '\n' + line
return extendedSummary
# print the list to a csv file
def writeSummary(filename, extendedSummary):
with open(filename, 'w') as f:
f.write("Summary of " + filename + "\n\n")
for line in extendedSummary:
f.write(line + '\n')
f.close()
# Get the file name and summarize/embed decisions from the user
myFile = getFileName()
summarizeDecision = summarizeDecision()
embedDecision = embedDecision()
sectionDecision = sectionsDecision()
# convert the file into a table that marks the significance of each line
text = readFile(myFile + '.txt')
table = startTable(text)
table = markContents(table)
contents = createContents(text, table)
table = markEndDeadspace(text, contents, table)
contents = extendContents(contents)
table = findChapters(text, contents, table)
if sectionDecision == "y":
table = findSections(text, table)
# Create the CSV file for the contents of the book
sentenceCSP = getCSP(text, table) # use this for Summary
sentenceEmbed = createEmbed(sentenceCSP) # use this for CSV
csv = createCSV(sentenceEmbed)
writeCSV(myFile + 'Sentences.csv', csv) # write to the csv file
# Summarize the book
preparedToSummarize = prepareToSummarize(sentenceCSP) # there are no token issues with this
if summarizeDecision == 'y':
firstSummary = get_completions(preparedToSummarize)
extendedSummary = get_completions(firstSummary)
writeSummary(myFile + '.txt', extendedSummary)
# Create BookAuthorVoice.jsonl
jsonl = createJSONL(text, table)
writeJSONL(myFile + 'AuthorVoice.jsonl', jsonl) # create a JSONL file of the Paragraphs
# Embed each sentence in the book
if embedDecision == 'y':
embedSentences(myFile + 'Sentences.csv')
##
# Testing functions and everything below not needed for program
##
# print an edited version of the book to a text file
def writeBook(filename, text, table):
with open(filename, 'w') as f:
section = []
paragraphs = []
for i in range(len(text)):
if table[i] == '4':
section.append(text[i])
elif table[i] == '1':
paragraphs.append(text[i])
for i in range(len(section)):
f.write('SECTIONS\n')
f.write(section[i])
f.write('\n')
for i in range(len(paragraphs)):
f.write('PARAGRAPHS\n')
f.write(paragraphs[i])
f.write('\n')
# print the list to a text file
def writeTable(filename, table):
with open(filename, 'w') as f:
for i in range(len(table)):
f.write(table[i])
# print the list to a text file
def writeContents(filename, contents):
with open(filename, 'w') as f:
for i in range(len(contents)):
f.write(contents[i] + '\n')
writeBook('bookSections_Paragraphs.txt', text, table)
writeTable('bookTable.txt', table)
writeContents('bookContents.txt', contents)
print(len(text))
print(len(table)) | [
"Summarize this for a second-grade student:\n\nPLACEHOLDER\n\n",
"Summarize the following:\n\nPLACEHOLDER\n\n"
] |
2024-01-10 | davidwaldherr/UniversalSummarier | 2_Universal.py | import openai
openai.api_key ='INSERT OPENAI KEY HERE'
from openai.embeddings_utils import get_embedding
import pandas as pd
# Before launching this program, you must create a bookSentences.csv file
# This program is a continuation of the 'hot' output from 1_bookToUniversal.py.
# It takes that boiling output (in the form of Book.txt) and converts it to
# and embedded BookSummary.csv and BookSummary.jsonl. It also reformats the
# contents of the Book.txt file to be more readable.
# The final step in the process is to create fine tunes from BookAuthorVoice.jsonl
# and BookSummary.jsonl, which will be utalized by the essay writing program.
# prompt the user to enter a file name
def getFileName():
filename = input('Enter the name of the file: ')
return filename
def embedDecision():
summary = input('Would you like to embed the book summary? (y/n): ')
return summary
def getSummary(filename):
summary = []
with open(filename, 'r') as f:
for line in f:
# make each line lowercase
if line != '\n':
# strip the line of whitespace
line = line.strip()
if filename in line:
summary.append(line)
elif "Chapter: " in line:
summary.append(line)
elif "Section: " in line:
summary.append(line)
else:
summary.append("Paragraph: " + line)
f.close()
return summary
def reformatBookTxt(filename, summary):
summaryFormat = []
for line in summary:
if filename in line:
summaryFormat.append(line + '\n-----------------------------------\n\n')
elif "Chapter: " in line:
summaryFormat.append('\n\n\n' + line)
elif "Section: " in line:
summaryFormat.append('\n\n' + line)
else:
text = line.replace("Paragraph: ", "")
summaryFormat.append('\n' + text)
with open(filename, 'w') as f:
for i in range(len(summaryFormat)):
f.write(summaryFormat[i])
f.close()
def createSummaryJSONL(summary):
summaryJSONL = []
for line in summary:
if line.startswith("Paragraph: "):
par = line.replace("Paragraph: ", "")
par = par.replace('"', '')
summaryJSONL.append('{"prompt":"", "completion":" ' + par + '"}')
return summaryJSONL
# print the list to a jsonl file
def writeJSONL(filename, summaryJSONL):
with open(filename, 'w') as f:
for i in range(len(summaryJSONL)):
f.write(summaryJSONL[i] + '\n')
# Create the list that contains the contents of the CSV file
def createCSV(mySummaryEmbed):
summaryCSV = []
chapter = ""
section = ""
paragraph = ""
for i in range(len(mySummaryEmbed)):
# remove all instances of " from the string
mySummaryEmbed[i] = mySummaryEmbed[i].replace('"', '')
if mySummaryEmbed[i].startswith('Chapter: '):
# set the chapter variable to the line after "Chapter: "
chapter = mySummaryEmbed[i].replace('Chapter: ', '')
chapter = '"' + chapter + '"'
elif mySummaryEmbed[i].startswith('Section: '):
# set the section variable to the line after "Section: "
section = mySummaryEmbed[i].replace('Section: ', '')
section = '"' + section + '"'
elif mySummaryEmbed[i].startswith('Paragraph: '):
# set the paragraph variable to the line after "Paragraph: "
paragraph = mySummaryEmbed[i].replace('Paragraph: ', '')
paragraph = '"' + paragraph + '"'
summaryCSV.append("{},{},{}".format(chapter, section, paragraph))
return summaryCSV
# print the list to a csv file
def writeSummaryCSV(filename, summaryCSV):
with open(filename, 'w') as f:
f.write("chapter,section,paragraph\n")
for line in summaryCSV:
f.write(line + '\n')
f.close()
def embedSummaryParagraphs(filename):
df = pd.read_csv(filename, dtype=str)
df = df[['chapter', 'section', 'paragraph']]
# remove any duplicate rows
df = df.drop_duplicates()
# This will take ~multiple~ minutes
df['babbage_search'] = df.paragraph.apply(lambda x: get_embedding(x, engine='text-search-babbage-doc-001'))
df.to_csv(filename)
# Gather the necessary components
filename = getFileName()
embedDecision = embedDecision()
summary = getSummary(filename + '.txt')
# reformat the book.txt file
reformatBookTxt(filename + '.txt', summary)
# Create the BookSummary.jsonl file
summaryJSONL = createSummaryJSONL(summary)
writeJSONL(filename + 'Summary.jsonl', summaryJSONL) # create a JSONL file of the Summary
# Create the bookSummary.csv file
summaryCSV = createCSV(summary)
writeSummaryCSV(filename + 'Summary.csv', summaryCSV) # write to the csv file
# Embed the book summary
if embedDecision == 'y':
embedSummaryParagraphs(filename + 'Summary.csv') | [] |
2024-01-10 | AdieLaine/Model-Sliding | src~model-sliding.py | import openai
import streamlit as st
import time
from datetime import datetime
import pandas as pd
# Set OpenAI API key
openai.api_key = "set-your-api-key-here"
# Set OpenAI API key using Streamlit secrets management
openai.api_key = st.secrets["OPENAI_API_KEY"]
# Streamlit configurations
st.set_page_config(
page_title="Model Sliding",
page_icon="🛝",
layout="wide",
menu_items={
'Get Help': 'https://github.com/AdieLaine/Model-Sliding/',
'Report a bug': 'https://github.com/AdieLaine/Model-Sliding/',
'About': """
# Model Sliding
This application demonstrates 'model sliding', a method of logic that selects the best OpenAI model for a given task based on the user's prompt. Each task type is associated with specific keywords that link to the most suitable model. If no keywords match, a default model is used.
https://github.com/AdieLaine/Model-Sliding/
"""
}
)
st.markdown('<h1 style="text-align: center; color: seaGreen; margin-top: -70px;">Model Sliding</h1>', unsafe_allow_html=True)
st.markdown('<h3 style="text-align: center;"><strong>AI Model Automation</strong></h3>', unsafe_allow_html=True)
st.markdown('<hr>', unsafe_allow_html=True)
def part_of_day(hour):
"""
Determines the part of the day (morning, afternoon, evening) based on the hour.
Args:
hour (int): The current hour.
Returns:
str: The part of the day.
"""
return (
"morning" if 5 <= hour <= 11
else
"afternoon" if 12 <= hour <= 17
else
"evening"
)
@st.cache_data
def meta_model(prompt, model_roles):
"""
Determines the most appropriate model to use based on the user's prompt.
Args:
prompt (str): The task description provided by the user.
model_roles (dict): A dictionary mapping keywords to models and roles.
Returns:
tuple: The selected model and the role of the assistant.
"""
# Convert the prompt to lower case for case insensitive matching
prompt = prompt.lower()
# Iterate over the dictionary to find the appropriate model and role
for keywords, (model, role) in model_roles.items():
if any(keyword in prompt for keyword in keywords):
return model, role
# If no keywords match, default to the base model and a general role
return "gpt-3.5-turbo", 'A helpful assistant.'
# Define the model_roles dictionary
model_roles = {
("code", "programming", "algorithm"): ("gpt-3.5-turbo", 'You are an programming assistant that logically weaves code together.'),
("essay", "paper", "report", "article"): ("gpt-4", 'You are an writing assistant that excels at creating amazing written material.'),
("story", "narrative", "tale", "fable"): ("gpt-3.5-turbo", 'You are an storyeller assistant that can weave intricate and compelling stories.'),
("social media", "post", "content", "engaging"): ("gpt-4", 'You are an social media assistant skilled in creating engaging and creative content for social media.')
}
def add_message(role, content):
"""
Generates a new message in the required format. If the message is a system message,
it generates a dynamic greeting based on the time of day with keywords.
Args:
role (str): The role of the message ("system", "user", or "assistant").
content (str): The content of the message.
Returns:
dict: A dictionary representing the message.
"""
if role == "assistant" and content == "greeting":
current_hour = datetime.now().hour
day_part = part_of_day(current_hour)
content = f'Good {day_part}! I\'m your AI assistant. I can help you with a variety of tasks.'
elif content.strip() == "!":
content = "helpme"
return {"role": role, "content": content}
def generate_response(model, messages, message_placeholder):
"""
Generates a response using the selected OpenAI model.
Args:
model (str): The model to use for generation.
messages (list): The list of message dicts for the conversation history.
message_placeholder (streamlit.delta_generator.DeltaGenerator): An empty Streamlit container for the message to be generated.
Returns:
str: The generated response.
"""
if model == "fine-tuned-model":
response = openai.Completion.create(
model=model,
prompt=messages[-1]["content"],
max_tokens=60
)
return response.choices[0].text.strip()
else:
accumulated_response = ""
# Use streaming approach for standard models
for response in openai.ChatCompletion.create(
model=model,
messages=messages,
stream=True,
):
chunk = response.choices[0].delta.get("content", "")
accumulated_response += chunk
for word in chunk.split():
message_placeholder.markdown(accumulated_response + " " + "▌", unsafe_allow_html=True)
time.sleep(0.01) # delay between words for realistic typing effect
message_placeholder.markdown(accumulated_response, unsafe_allow_html=True)
return accumulated_response
@st.cache_data
def display_model_table():
"""
Creates and displays a table of models and their associated keywords along with example usages using Streamlit's st.table function.
Returns:
None
"""
# Create a DataFrame for the table
model_table = pd.DataFrame({
"Model": ["gpt-3.5-turbo", "gpt-3.5-turbo", "gpt-4", "gpt-4"],
"First Keyword": ["code", "story", "essay", "social media"],
"Other Keywords": [
"programming, algorithm",
"narrative, tale, fable",
"paper, report, article",
"post, content, engaging"
],
"Role": [
"Assistant specialized in generating Python code",
"Assistant that can weave intricate and compelling stories",
"Assistant that excels at writing well-structured and grammatically correct text",
"Assistant skilled in creating engaging and creative content for social media"
],
"Example": [
"Create a Streamlit app with docstrings.",
"Tell me a story about AI.",
"Write an technical essay on LLM's.",
"Craft a social media post with exciting news."
]
})
# Remove the index
model_table.set_index("Model", inplace=True)
# Display the table
st.table(model_table)
# Initialize the session state if not already done
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
current_hour = datetime.now().hour
day_part = part_of_day(current_hour)
greeting_message = f"Good {day_part}! I\'m your AI assistant. I can help you with a variety of tasks."
st.session_state.messages = [add_message("assistant", "greeting")]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"], unsafe_allow_html=True)
# Capture the user's input and generate the AI's response
if prompt := st.chat_input("Enter your prompt or 'helpme' or '!' for commands."):
model, role = meta_model(prompt, model_roles)
if prompt.lower().strip() in ["helpme", "!"]:
st.markdown(
"""
<style>
.model-table {
width: 100%;
border-collapse: collapse;
}
.model-table th, .model-table td {
border: 1px solid #dddddd;
padding: 8px;
text-align: left;
}
.model-table tr:nth-child(even) {
background-color: #slategray;
}
.keyword {
font-weight: bold;
}
.model-name {
color: seaGreen;
font-weight: bold;
}
.example-word {
color: CornflowerBlue;
font-weight: bold;
}
</style>
<table class="model-table">
<tr>
<th>Model</th>
<th>First Keyword</th>
<th>Other Keywords</th>
<th>Role</th>
</tr>
<tr>
<td class="model-name">GPT-3.5-Turbo</td>
<td class="keyword">code</td>
<td>programming, algorithm</td>
<td>Assistant specialized in generating Python code</td>
</tr>
<tr>
<td colspan="4"><span class="example-word">Example:</span> Code a Streamlit app with docstrings.</td>
</tr>
<tr>
<td class="model-name">GPT-3.5-Turbo</td>
<td class="keyword">story</td>
<td>narrative, tale, fable</td>
<td>Assistant that can weave intricate and compelling stories</td>
</tr>
<tr>
<td colspan="4"><span class="example-word">Example:</span> Tell me a <span class="keyword">story</span> about AI.</td>
</tr>
<tr>
<td class="model-name">GPT-4</td>
<td class="keyword">essay</td>
<td>paper, report, article</td>
<td>Assistant that excels at writing well-structured and grammatically correct text</td>
</tr>
<tr>
<td colspan="4"><span class="example-word">Example:</span> Write an technical <span class="keyword">essay</span> on LLM's.</td>
</tr>
<tr>
<td class="model-name">GPT-4</td>
<td class="keyword">social media</td>
<td>post, content, engaging</td>
<td>Assistant skilled in creating engaging and creative content for social media</td>
</tr>
<tr>
<td colspan="4"><span class="example-word">Example:</span> Craft an <span class="keyword">engaging social media</span> post with exciting news.</td>
</tr>
</table>
""",
unsafe_allow_html=True
)
else:
st.session_state.messages.append(add_message("user", prompt))
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Model selection explanation
model_explanations = {
"gpt-3.5-turbo": "The 'GPT-3.5-Turbo' model was chosen because it is optimized for generating application code and storytelling.",
"gpt-4": "The 'GPT-4' model was chosen because it excels at writing well-structured and grammatically correct text, and creating engaging and creative content for social media."
}
full_response = generate_response(model, st.session_state.messages, message_placeholder)
message_placeholder.markdown(full_response)
# For demonstration purposes, we print the model explanation
st.info(model_explanations[model])
st.session_state.messages.append(add_message("assistant", full_response))
#iapjiw | [
"content"
] |
2024-01-10 | Subphase/rss-gpt | rss-gpt.py | import feedparser
import requests
import openai
import time
from os import getenv
from bs4 import BeautifulSoup
from typing import List, Dict
OPENAI_API_KEY = getenv("OPENAI_API_KEY")
class Article:
def __init__(self, title:str, url:str, summary:str) -> None:
self.title = title
self.url = url
self.summary = summary
self.text = self.scrape_website()
def __repr__(self) -> str:
return f"{self.title} - {self.url}"
# Scrape the self.url and return the text
def scrape_website(self) -> str:
# Send an HTTP GET request to the website
response = requests.get(self.url)
response.raise_for_status() # Raise an exception if request fails
# Create a Beautiful Soup object to parse the HTML content
soup = BeautifulSoup(response.content, 'html.parser')
# Find specific elements and extract the text
text_elements = soup.find_all('p')
# Clean up the extracted text, if necessary
cleaned_text = [element.get_text().strip() for element in text_elements]
return ' '.join(cleaned_text)
def write_to_file(self) -> None:
with open('articles.md', 'a') as f:
f.write(f'### {self.title} \n')
f.write(f'{self.text} \n\n')
# ChatGPT API query class
class Ai:
def __init__(self) -> None:
openai.api_key = OPENAI_API_KEY
self.model = "gpt-3.5-turbo"
self.sys_role = {"role": "system", "content": "You are a cybersecurity professional who is tasked with staying up to date on the latest news. You are reading the following article and want to know how important it is to your job. The most important articles have news about the latest cyber attacks, new vulnerabilities, and new tools."}
def chat_prompt(self, prompt):
response = openai.ChatCompletion.create(
model = self.model,
messages = [
self.sys_role,
{"role": "user", "content": prompt}
]
)
if 'choices' in response and len(response.choices) > 0:
return response.choices[0].message.content
else:
return ""
def rate_importance(self, article: Article) -> int:
prompt = f'''
Rate the following article on a scale of 1-10, with 10 being the most important and 1 being the least important.\n
``` {article.text} ```\n
The response should ONLY include a number between 1 and 10 and nothing else'''
response = self.chat_prompt(prompt)
return response
def main() -> None:
# Guard clause to ensure OPENAI_API_KEY is set
if not OPENAI_API_KEY: raise Exception("OPENAI_API_KEY not set")
websites = ['https://feeds.feedburner.com/TheHackersNews?format=xml']
rss_feed = collect_rss_feed(websites)
# limit rss feed to 6 articles for debugging
#rss_feed = rss_feed[:1]
articles = instantiate_articles_factory(rss_feed)
articles = rate_importance(articles)
# write the article to a file if they are rated above 7
for article in articles:
if article.importance < 8: break
article.write_to_file()
def collect_rss_feed(feed_urls: List[str]) -> List[Dict[str, str]]:
latest_posts = []
for url in feed_urls:
feed = feedparser.parse(url)
for entry in feed.entries:
post = {
'title': entry.title,
'url': entry.link,
'summary': entry.summary
}
latest_posts.append(post)
return latest_posts
# function to instantiate articles from rss feed
def instantiate_articles_factory(rss_feed: List[str]) -> List[Article]:
return [Article(title = item['title'], url = item['url'], summary = item['summary']) for item in rss_feed]
# function to sort articles by importance
def sort_articles(articles: List[Article]) -> List[Article]:
return sorted(articles, key=lambda x: x.importance, reverse=True)
# function to rate the importance of each article (uses OpenAI API)
def rate_importance(articles: List[Article]) -> List[Article]:
ai = Ai()
for article in articles:
attempt_count = 1
try:
article.importance = int(ai.rate_importance(article))
except:
# wait for 10 secs and try again
attempt_count += 1
print(f"Error in rating article {article.title}. Retry attempt {attempt_count}")
if attempt_count > 3: continue
time.sleep(10)
article.importance = int(ai.rate_importance(article))
time.sleep(20) # sleep for 20 seconds to avoid OpenAI API rate limit
return sort_articles(articles)
if __name__ == '__main__':
main()
| [
"You are a cybersecurity professional who is tasked with staying up to date on the latest news. You are reading the following article and want to know how important it is to your job. The most important articles have news about the latest cyber attacks, new vulnerabilities, and new tools."
] |
2024-01-10 | Anjali-Anju7/Own-Chat-GPT | ChtGPT.py | import openai
import os
import sys
question = input("Q: what is your question/instruction\n")
while True:
try:
openai.api_key = os.environ['OPENAI_API_KEY']
except KeyError:
sys.stderr.write("""
You haven't set up your API key yet.
If you don't have an API key yet, visit:
https://platform.openai.com/signup
1. Make an account or sign in
2. Click "View API Keys" from the top right menu.
3. Click "Create new secret key"
Then, open the Secrets Tool and add OPENAI_API_KEY as a secret.
""")
exit(1)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
#{"role": "user", "content": "Who won the world series in 2020?"},
#{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": question}
]
)
output =response['choices'][0]['message']['content']
print("A:",output,"\n")
question = input("Q: what is your question/instruction\n") | [
"You are a helpful assistant."
] |
2024-01-10 | Shubodh/GibsonEnv | gibson~utils~fuse_policy2.py | import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm
from baselines.common.distributions import make_pdtype
import gym.spaces
import baselines.common.tf_util as U
## Fuse policy using PPO2 from OpenAI Baseline
class FusePolicy(object):
def __init__(self, sess, ob_space, sensor_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
ob_shape = (nbatch,) + ob_space.shape
ob_sensor_shape = (nbatch,) + sensor_space.shape
if self.is_discrete:
actdim = ac_space.n
else:
actdim = ac_space.shape[0]
X_camera = tf.placeholder(tf.uint8, ob_shape, name='Ob_camera') #obs
X_sensor = tf.placeholder(tf.float32, ob_sensor_shape, name='Ob_sensor')
self.pdtype = make_pdtype(ac_space)
with tf.variable_scope("model", reuse=reuse):
h_camera = conv(tf.cast(X_camera, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2_camera = conv(h_camera, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3_camera = conv(h2_camera, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3_camera = conv_to_fc(h3_camera)
h4_camera = fc(h3_camera, 'fc1', nh=512, init_scale=np.sqrt(2))
pi_camera = fc(h4_camera, 'pi', actdim, init_scale=0.01)
vf_camera = fc(h4_camera, 'v', 1)[:,0]
self.pd = self.pdtype.pdfromflat(pi_camera)
with tf.variable_scope("model_sensor", reuse=reuse):
h1_sensor = fc(X_sensor, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi_sensor = fc(h2_sensor, 'pi', actdim, init_scale=0.01)
h1_sensor = fc(X_sensor, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf_sensor = fc(h2_sensor, 'vf', 1)[:,0]
with tf.variable_scope("model", reuse=reuse):
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
X = tf.concat([X_camera, X_sensor], 0)
pi_full = tf.concat([pi_camera, pi_sensor], 0)
pi = fc(pi_full, 'pi', actdim, init_scale=0.01)
vf_full = tf.concat([vf_camera, vf_sensor], 0)
vf = fc(vf_full, 'vf', 1)[:,0]
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, ob_sensor, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X_camera:ob, X_sensor: ob_sensor})
return a, v, self.initial_state, neglogp
def value(ob, ob_sensor, *_args, **_kwargs):
return sess.run(vf, {X_camera:ob, X_sensor: ob_sensor})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, is_discrete=False): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
print("nbatch%d" % (nbatch))
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
if self.is_discrete:
nact = ac_space.n
else:
nact = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape) #obs
with tf.variable_scope("model", reuse=reuse):
h = conv(X, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
h5 = fc(h3, 'fc_vf', nh=512, init_scale=np.sqrt(2))
pi = fc(h4, 'pi', nact, init_scale=0.05)
vf = fc(h5, 'v', 1, act=lambda x: x)[:,0]
if not self.is_discrete:
logstd = tf.get_variable(name="logstd", shape=[1, nact],
initializer=tf.zeros_initializer())
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob.astype(np.float32)/255.0})
assert(a.shape[0] == 1) # make sure a = a[0] don't throw away actions
a = a[0]
print(a,v, neglogp)
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class MlpPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
ob_shape = (nbatch,) + ob_space.shape
if self.is_discrete:
actdim = ac_space.n
else:
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
h1 = fc(X, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(X, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class MlpPolicy2(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
ob_shape = (nbatch,) + ob_space.shape
if self.is_discrete:
actdim = ac_space.n
else:
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
X2 = conv_to_fc(X)
h1 = fc(X2, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(X2, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob.astype(np.float32) / 255.0})
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy2(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
ob_shape = (nbatch,) + ob_space.shape
if self.is_discrete:
actdim = ac_space.n
else:
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
h_c = conv(X, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2_c = conv(h_c, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h2_c = conv_to_fc(h_c)
h1 = fc(h2_c, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(h2_c, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob.astype(np.float32) / 255.0})
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
| [] |
2024-01-10 | klppl/sopel-modules | aina.py | import sopel
import openai
import requests
import textwrap
import apikeys
@sopel.module.commands('aina')
def aina(bot, trigger):
# Set up the OpenAI API client - https://beta.openai.com/account/api-keys
openai.api_key = apikeys.OPENAI_API_KEY
model_engine = "text-davinci-003"
prompt = trigger
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
response = completion.choices[0].text
response = textwrap.fill(response, width=100)
response = response.encode("utf-8")
response = requests.post("https://dumpinen.com", data=response)
aina_output = response.text
bot.say(f'AIna: {aina_output}')
| [] |
2024-01-10 | mithunpaul08/transformers | src~transformers~tokenization_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class. """
import logging
from collections import OrderedDict
from transformers.configuration_mobilebert import MobileBertConfig
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
ElectraConfig,
FlaubertConfig,
GPT2Config,
LongformerConfig,
MBartConfig,
OpenAIGPTConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
)
from .configuration_marian import MarianConfig
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bart import BartTokenizer, MBartTokenizer
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_electra import ElectraTokenizer, ElectraTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_longformer import LongformerTokenizer
from .tokenization_marian import MarianTokenizer
from .tokenization_mobilebert import MobileBertTokenizer, MobileBertTokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_reformer import ReformerTokenizer
from .tokenization_retribert import RetriBertTokenizer, RetriBertTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
logger = logging.getLogger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)),
(T5Config, (T5Tokenizer, None)),
(MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(MBartConfig, (MBartTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(MarianConfig, (MarianTokenizer, None)),
(BartConfig, (BartTokenizer, None)),
(LongformerConfig, (LongformerTokenizer, None)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(ReformerConfig, (ReformerTokenizer, None)),
(ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
]
)
class AutoTokenizer:
r""":class:`~transformers.AutoTokenizer` is a generic tokenizer class
that will be instantiated as one of the tokenizer classes of the library
when created with the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
class method.
The `from_pretrained()` method takes care of returning the correct tokenizer class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string:
- `t5`: T5Tokenizer (T5 model)
- `distilbert`: DistilBertTokenizer (DistilBert model)
- `albert`: AlbertTokenizer (ALBERT model)
- `camembert`: CamembertTokenizer (CamemBERT model)
- `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- `longformer`: LongformerTokenizer (AllenAI Longformer model)
- `roberta`: RobertaTokenizer (RoBERTa model)
- `bert`: BertTokenizer (Bert model)
- `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- `xlnet`: XLNetTokenizer (XLNet model)
- `xlm`: XLMTokenizer (XLM model)
- `ctrl`: CTRLTokenizer (Salesforce CTRL model)
- `electra`: ElectraTokenizer (Google ELECTRA model)
This class cannot be instantiated using `__init__()` (throw an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r""" Instantiate one of the tokenizer classes of the library
from a pre-trained model vocabulary.
The tokenizer class to instantiate is selected
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string:
- `t5`: T5Tokenizer (T5 model)
- `distilbert`: DistilBertTokenizer (DistilBert model)
- `albert`: AlbertTokenizer (ALBERT model)
- `camembert`: CamembertTokenizer (CamemBERT model)
- `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
- `longformer`: LongformerTokenizer (AllenAI Longformer model)
- `roberta`: RobertaTokenizer (RoBERTa model)
- `bert-base-japanese`: BertJapaneseTokenizer (Bert model)
- `bert`: BertTokenizer (Bert model)
- `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
- `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
- `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
- `xlnet`: XLNetTokenizer (XLNet model)
- `xlm`: XLMTokenizer (XLM model)
- `ctrl`: CTRLTokenizer (Salesforce CTRL model)
- `electra`: ElectraTokenizer (Google ELECTRA model)
Params:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
use_fast: (`optional`) boolean, default False:
Indicate if transformers should try to load the fast version of the tokenizer (True) or use the Python one (False).
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# Download vocabulary from S3 and cache.
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in str(pretrained_model_name_or_path):
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", False)
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
logger.info("tokenizer is of class tokenizer_class_fast")
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
logger.info(f"tokenizer is of class tokenizer_class_fast. going to return tokenizer whose name is {pretrained_model_name_or_path}")
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
| [] |
2024-01-10 | pingsutw/ray | rllib~examples~env~cliff_walking_wall_env.py | import gym
from gym import spaces
ACTION_UP = 0
ACTION_RIGHT = 1
ACTION_DOWN = 2
ACTION_LEFT = 3
class CliffWalkingWallEnv(gym.Env):
"""Modified version of the CliffWalking environment from OpenAI Gym
with walls instead of a cliff.
### Description
The board is a 4x12 matrix, with (using NumPy matrix indexing):
- [3, 0] or obs==36 as the start at bottom-left
- [3, 11] or obs==47 as the goal at bottom-right
- [3, 1..10] or obs==37...46 as the cliff at bottom-center
An episode terminates when the agent reaches the goal.
### Actions
There are 4 discrete deterministic actions:
- 0: move up
- 1: move right
- 2: move down
- 3: move left
You can also use the constants ACTION_UP, ACTION_RIGHT, ... defined above.
### Observations
There are 3x12 + 2 possible states, not including the walls. If an action
would move an agent into one of the walls, it simply stays in the same position.
### Reward
Each time step incurs -1 reward, except reaching the goal which gives +10 reward.
"""
def __init__(self, seed=42) -> None:
self.observation_space = spaces.Discrete(48)
self.action_space = spaces.Discrete(4)
self.observation_space.seed(seed)
self.action_space.seed(seed)
def reset(self):
self.position = 36
return self.position
def step(self, action):
x = self.position // 12
y = self.position % 12
# UP
if action == ACTION_UP:
x = max(x - 1, 0)
# RIGHT
elif action == ACTION_RIGHT:
if self.position != 36:
y = min(y + 1, 11)
# DOWN
elif action == ACTION_DOWN:
if self.position < 25 or self.position > 34:
x = min(x + 1, 3)
# LEFT
elif action == ACTION_LEFT:
if self.position != 47:
y = max(y - 1, 0)
else:
raise ValueError(f"action {action} not in {self.action_space}")
self.position = x * 12 + y
done = self.position == 47
reward = -1 if not done else 10
return self.position, reward, done, {}
| [] |
2024-01-10 | zeroAska/tianshou | tianshou~env~venvs.py | from typing import Any, Callable, List, Optional, Tuple, Union
import gym
import numpy as np
from tianshou.env.worker import (
DummyEnvWorker,
EnvWorker,
RayEnvWorker,
SubprocEnvWorker,
)
from tianshou.utils import RunningMeanStd
class BaseVectorEnv(gym.Env):
"""Base class for vectorized environments wrapper.
Usage:
::
env_num = 8
envs = DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
assert len(envs) == env_num
It accepts a list of environment generators. In other words, an environment
generator ``efn`` of a specific task means that ``efn()`` returns the
environment of the given task, for example, ``gym.make(task)``.
All of the VectorEnv must inherit :class:`~tianshou.env.BaseVectorEnv`.
Here are some other usages:
::
envs.seed(2) # which is equal to the next line
envs.seed([2, 3, 4, 5, 6, 7, 8, 9]) # set specific seed for each env
obs = envs.reset() # reset all environments
obs = envs.reset([0, 5, 7]) # reset 3 specific environments
obs, rew, done, info = envs.step([1] * 8) # step synchronously
envs.render() # render all environments
envs.close() # close all environments
.. warning::
If you use your own environment, please make sure the ``seed`` method
is set up properly, e.g.,
::
def seed(self, seed):
np.random.seed(seed)
Otherwise, the outputs of these envs may be the same with each other.
:param env_fns: a list of callable envs, ``env_fns[i]()`` generates the i-th env.
:param worker_fn: a callable worker, ``worker_fn(env_fns[i])`` generates a
worker which contains the i-th env.
:param int wait_num: use in asynchronous simulation if the time cost of
``env.step`` varies with time and synchronously waiting for all
environments to finish a step is time-wasting. In that case, we can
return when ``wait_num`` environments finish a step and keep on
simulation in these environments. If ``None``, asynchronous simulation
is disabled; else, ``1 <= wait_num <= env_num``.
:param float timeout: use in asynchronous simulation same as above, in each
vectorized step it only deal with those environments spending time
within ``timeout`` seconds.
:param bool norm_obs: Whether to track mean/std of data and normalize observation
on return. For now, observation normalization only support observation of
type np.ndarray.
:param obs_rms: class to track mean&std of observation. If not given, it will
initialize a new one. Usually in envs that is used to evaluate algorithm,
obs_rms should be passed in. Default to None.
:param bool update_obs_rms: Whether to update obs_rms. Default to True.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
worker_fn: Callable[[Callable[[], gym.Env]], EnvWorker],
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
norm_obs: bool = False,
obs_rms: Optional[RunningMeanStd] = None,
update_obs_rms: bool = True,
) -> None:
self._env_fns = env_fns
# A VectorEnv contains a pool of EnvWorkers, which corresponds to
# interact with the given envs (one worker <-> one env).
self.workers = [worker_fn(fn) for fn in env_fns]
self.worker_class = type(self.workers[0])
assert issubclass(self.worker_class, EnvWorker)
assert all([isinstance(w, self.worker_class) for w in self.workers])
self.env_num = len(env_fns)
self.wait_num = wait_num or len(env_fns)
assert 1 <= self.wait_num <= len(env_fns), \
f"wait_num should be in [1, {len(env_fns)}], but got {wait_num}"
self.timeout = timeout
assert self.timeout is None or self.timeout > 0, \
f"timeout is {timeout}, it should be positive if provided!"
self.is_async = self.wait_num != len(env_fns) or timeout is not None
self.waiting_conn: List[EnvWorker] = []
# environments in self.ready_id is actually ready
# but environments in self.waiting_id are just waiting when checked,
# and they may be ready now, but this is not known until we check it
# in the step() function
self.waiting_id: List[int] = []
# all environments are ready in the beginning
self.ready_id = list(range(self.env_num))
self.is_closed = False
# initialize observation running mean/std
self.norm_obs = norm_obs
self.update_obs_rms = update_obs_rms
self.obs_rms = RunningMeanStd() if obs_rms is None and norm_obs else obs_rms
self.__eps = np.finfo(np.float32).eps.item()
def _assert_is_not_closed(self) -> None:
assert not self.is_closed, \
f"Methods of {self.__class__.__name__} cannot be called after close."
def __len__(self) -> int:
"""Return len(self), which is the number of environments."""
return self.env_num
def __getattribute__(self, key: str) -> Any:
"""Switch the attribute getter depending on the key.
Any class who inherits ``gym.Env`` will inherit some attributes, like
``action_space``. However, we would like the attribute lookup to go straight
into the worker (in fact, this vector env's action_space is always None).
"""
if key in [
'metadata', 'reward_range', 'spec', 'action_space', 'observation_space'
]: # reserved keys in gym.Env
return self.get_env_attr(key)
else:
return super().__getattribute__(key)
def get_env_attr(
self,
key: str,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> List[Any]:
"""Get an attribute from the underlying environments.
If id is an int, retrieve the attribute denoted by key from the environment
underlying the worker at index id. The result is returned as a list with one
element. Otherwise, retrieve the attribute for all workers at indices id and
return a list that is ordered correspondingly to id.
:param str key: The key of the desired attribute.
:param id: Indice(s) of the desired worker(s). Default to None for all env_id.
:return list: The list of environment attributes.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
return [self.workers[j].get_env_attr(key) for j in id]
def set_env_attr(
self,
key: str,
value: Any,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> None:
"""Set an attribute in the underlying environments.
If id is an int, set the attribute denoted by key from the environment
underlying the worker at index id to value.
Otherwise, set the attribute for all workers at indices id.
:param str key: The key of the desired attribute.
:param Any value: The new value of the attribute.
:param id: Indice(s) of the desired worker(s). Default to None for all env_id.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
for j in id:
self.workers[j].set_env_attr(key, value)
def _wrap_id(
self,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> Union[List[int], np.ndarray]:
if id is None:
return list(range(self.env_num))
return [id] if np.isscalar(id) else id # type: ignore
def _assert_id(self, id: Union[List[int], np.ndarray]) -> None:
for i in id:
assert i not in self.waiting_id, \
f"Cannot interact with environment {i} which is stepping now."
assert i in self.ready_id, \
f"Can only interact with ready environments {self.ready_id}."
def reset(
self, id: Optional[Union[int, List[int], np.ndarray]] = None
) -> np.ndarray:
"""Reset the state of some envs and return initial observations.
If id is None, reset the state of all the environments and return
initial observations, otherwise reset the specific environments with
the given id, either an int or a list.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
# send(None) == reset() in worker
for i in id:
self.workers[i].send(None)
obs_list = [self.workers[i].recv() for i in id]
try:
obs = np.stack(obs_list)
except ValueError: # different len(obs)
obs = np.array(obs_list, dtype=object)
if self.obs_rms and self.update_obs_rms:
self.obs_rms.update(obs)
return self.normalize_obs(obs)
def step(
self,
action: np.ndarray,
id: Optional[Union[int, List[int], np.ndarray]] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Run one timestep of some environments' dynamics.
If id is None, run one timestep of all the environments’ dynamics;
otherwise run one timestep for some environments with given id, either
an int or a list. When the end of episode is reached, you are
responsible for calling reset(id) to reset this environment’s state.
Accept a batch of action and return a tuple (batch_obs, batch_rew,
batch_done, batch_info) in numpy format.
:param numpy.ndarray action: a batch of action provided by the agent.
:return: A tuple including four items:
* ``obs`` a numpy.ndarray, the agent's observation of current environments
* ``rew`` a numpy.ndarray, the amount of rewards returned after \
previous actions
* ``done`` a numpy.ndarray, whether these episodes have ended, in \
which case further step() calls will return undefined results
* ``info`` a numpy.ndarray, contains auxiliary diagnostic \
information (helpful for debugging, and sometimes learning)
For the async simulation:
Provide the given action to the environments. The action sequence
should correspond to the ``id`` argument, and the ``id`` argument
should be a subset of the ``env_id`` in the last returned ``info``
(initially they are env_ids of all the environments). If action is
None, fetch unfinished step() calls instead.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
assert len(action) == len(id)
for i, j in enumerate(id):
self.workers[j].send(action[i])
result = []
for j in id:
obs, rew, done, info = self.workers[j].recv()
info["env_id"] = j
result.append((obs, rew, done, info))
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for act, env_id in zip(action, id):
self.workers[env_id].send(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(
self.waiting_conn, self.wait_num, self.timeout
)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.recv()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
obs_list, rew_list, done_list, info_list = zip(*result)
try:
obs_stack = np.stack(obs_list)
except ValueError: # different len(obs)
obs_stack = np.array(obs_list, dtype=object)
rew_stack, done_stack, info_stack = map(
np.stack, [rew_list, done_list, info_list]
)
if self.obs_rms and self.update_obs_rms:
self.obs_rms.update(obs_stack)
return self.normalize_obs(obs_stack), rew_stack, done_stack, info_stack
def seed(
self,
seed: Optional[Union[int, List[int]]] = None
) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:return: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments."""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(
f"Environments {self.waiting_id} are still stepping, cannot "
"render them now."
)
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be called during
garbage collected). This way, ``close`` of all workers can be assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
def normalize_obs(self, obs: np.ndarray) -> np.ndarray:
"""Normalize observations by statistics in obs_rms."""
if self.obs_rms and self.norm_obs:
clip_max = 10.0 # this magic number is from openai baselines
# see baselines/common/vec_env/vec_normalize.py#L10
obs = (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.__eps)
obs = np.clip(obs, -clip_max, clip_max)
return obs
class DummyVectorEnv(BaseVectorEnv):
"""Dummy vectorized environment wrapper, implemented in for-loop.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
super().__init__(env_fns, DummyEnvWorker, **kwargs)
class SubprocVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on subprocess.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=False)
super().__init__(env_fns, worker_fn, **kwargs)
class ShmemVectorEnv(BaseVectorEnv):
"""Optimized SubprocVectorEnv with shared buffers to exchange observations.
ShmemVectorEnv has exactly the same API as SubprocVectorEnv.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=True)
super().__init__(env_fns, worker_fn, **kwargs)
class RayVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on ray.
This is a choice to run distributed environments in a cluster.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
try:
import ray
except ImportError as exception:
raise ImportError(
"Please install ray to support RayVectorEnv: pip install ray"
) from exception
if not ray.is_initialized():
ray.init()
super().__init__(env_fns, RayEnvWorker, **kwargs)
| [] |
2024-01-10 | davewotton85/dave | app6.py | import os
from flask import Flask, request, render_template
from flask_wtf import FlaskForm
from wtforms import FileField, SelectMultipleField, SubmitField
from werkzeug.utils import secure_filename
from docx import Document
import openai
from flask_wtf.file import FileRequired
from flask_bootstrap import Bootstrap
from docx.shared import RGBColor
app = Flask(__name__)
app.config['SECRET_KEY'] = 'ef1ed9ef3d738bb2c12bdc436828ac6b'
Bootstrap(app)
openai.api_key = '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'
# Define form
class UploadForm(FlaskForm):
file = FileField('Upload File', validators=[FileRequired()])
language = SelectMultipleField('Select Languages', choices=[
('Mandarin Chinese', 'Mandarin Chinese'),
('Spanish', 'Spanish'),
# ... add all the other languages here ...
('Simplified Chinese', 'Simplified Chinese'),
('Traditional Chinese', 'Traditional Chinese')
])
submit = SubmitField('Translate')
@app.route('/', methods=['GET', 'POST'])
def index():
form = UploadForm()
if form.validate_on_submit():
filename = secure_filename(form.file.data.filename)
upload_folder = 'uploads'
if not os.path.exists(upload_folder):
os.makedirs(upload_folder)
form.file.data.save(os.path.join(upload_folder, filename))
selected_languages = form.language.data # get selected languages
return process_file(os.path.join(upload_folder, filename), selected_languages)
return render_template('index.html', form=form)
def process_file(filepath, languages):
document = Document(filepath)
translation_doc = Document()
for language in languages:
translation_doc.add_heading(language, level=1)
for paragraph in document.paragraphs:
# Skip paragraph if it is empty or only contains whitespace
if not paragraph.text.strip():
continue
# Add the original English text in red
original_paragraph = translation_doc.add_paragraph()
original_paragraph.add_run(paragraph.text).font.color.rgb = RGBColor(255, 0, 0) # RGB values for red
translation = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": 'Translate the following English text to {}: "{}"'.format(language, paragraph.text)}
]
)
translation_doc.add_paragraph(translation['choices'][0]['message']['content'])
translation_doc.add_paragraph()
translation_doc.save('translated_doc.docx')
return 'Translation completed'
if __name__ == "__main__":
app.run(debug=True)
| [
"Translate the following English text to {}: \"{}\""
] |
2024-01-10 | Baichenjia/PBRL | d4rl~rlkit~torch~pytorch_util.py | import torch
import numpy as np
def soft_update_from_to(source, target, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def copy_model_params_from_to(source, target):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def fanin_init(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
return tensor.data.uniform_(-bound, bound)
def fanin_init_weights_like(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
new_tensor = FloatTensor(tensor.size())
new_tensor.uniform_(-bound, bound)
return new_tensor
"""
GPU wrappers
"""
_use_gpu = False
device = None
_gpu_id = 0
def set_gpu_mode(mode, gpu_id=0):
global _use_gpu
global device
global _gpu_id
_gpu_id = gpu_id
_use_gpu = mode
device = torch.device("cuda:" + str(gpu_id) if _use_gpu else "cpu")
def gpu_enabled():
return _use_gpu
def set_device(gpu_id):
torch.cuda.set_device(gpu_id)
# noinspection PyPep8Naming
def FloatTensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.FloatTensor(*args, **kwargs, device=torch_device)
def from_numpy(*args, **kwargs):
return torch.from_numpy(*args, **kwargs).float().to(device)
def get_numpy(tensor):
return tensor.to('cpu').detach().numpy()
def zeros(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros(*sizes, **kwargs, device=torch_device)
def ones(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones(*sizes, **kwargs, device=torch_device)
def ones_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones_like(*args, **kwargs, device=torch_device)
def randn(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.randn(*args, **kwargs, device=torch_device)
def zeros_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros_like(*args, **kwargs, device=torch_device)
def tensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.tensor(*args, **kwargs, device=torch_device)
def normal(*args, **kwargs):
return torch.normal(*args, **kwargs).to(device)
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
""" From OpenAI baselines
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
# See Schedule.value
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
""" From OpenAI baselines
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
# See Schedule.value
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p) | [] |
2024-01-10 | michel-aractingi/soloRL | agents~running_mean_std.py | import numpy as np
#from PyTorchAgents.envs import VecEnvWrapper
from abc import ABC, abstractmethod
##################
#class and function taken from openAI baselines repo:
#https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
#################
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, np.float32)
self.var = np.ones(shape, np.float32)
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
class VecEnvWrapper(ABC):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv):
self.venv = venv
self.nenvs = venv.nenvs
def step_(self, actions):
try:
return self.venv.step(actions)
except AttributeError:
self.step_async(actions)
return self.step_wait()
@abstractmethod
def reset(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.venv = venv
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self._ret_shape = (self.venv.nenvs,)
if isinstance(self.action_space, list):
self._ret_shape = (self.venv.nenvs, len(self.action_space))
self.ret = np.zeros(self._ret_shape)
self.gamma = gamma
self.epsilon = epsilon
self.training = True
def step(self, actions):
obs, rews, news, infos = self.step_(actions)
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
if self.training:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
self.ret[news] = 0.
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
if self.training:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
self.ret = np.zeros(self._ret_shape)
obs = self.venv.reset()
return self._obfilt(obs)
def eval(self):
self.training = False
def __len__(self):
return self.nenvs
| [] |
2024-01-10 | bartohanew/DMLLM | dmllm~common.py | from modularity import flatten_whitespace, indent
import json
from pymongo import MongoClient
from modularity import OpenAI
import traceback
import datetime as dt
from bson import ObjectId as oid
db = MongoClient()['DMLLM']
openai = OpenAI()
#DEFAULT_MODEL = "gpt-4-1106-preview"
DEFAULT_MODEL = "gpt-3.5-turbo"
def get_response(messages, model=DEFAULT_MODEL, **kwargs):
messages = [{'role': m['role'], 'content': m['content']} for m in messages]
kwargs = {
'max_tokens': None,
'temperature': 0.9,
'top_p': 1,
'frequency_penalty': 0.0,
'presence_penalty': 0.6,
**kwargs,
}
response = openai.chat.completions.create(
model=model,
messages = messages,
**kwargs,
)
return response.choices[0].message.content
def json_retry_loop(messages, model=DEFAULT_MODEL, loop_i=0):
while True:
response = get_response(messages, model=model)
try:
return json.loads(response)
except json.decoder.JSONDecodeError:
messages.append({'role': 'system', 'content': "Invalid JSON. Please try again."})
loop_i += 1
if loop_i > 3:
raise
return json_retry_loop(messages, model=model, loop_i=loop_i)
import logging
import sys
def setup_logging():
# Create a handler that writes log messages to sys.stdout
stdout_handler = logging.StreamHandler(sys.stdout)
# Set the format for the handler
stdout_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
# Get the root logger and set its handler and level
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
root_logger.addHandler(stdout_handler)
def enable_logging_for_submodules(submodules, level=logging.DEBUG):
for submodule in submodules:
logger = logging.getLogger(submodule)
logger.setLevel(level) | [
"content",
"Invalid JSON. Please try again."
] |
2024-01-10 | bartohanew/DMLLM | sequence%20v0.3%20combat.py | from common import *
from modularity import flatten_whitespace, indent
import json
from pymongo import MongoClient
db = MongoClient()['DMLLM']
adventure_name = "alec_first"
# find the last summary of the state the game was left in
#model = "gpt-3.5-turbo"
model = "gpt-4-1106-preview"
from modularity import OpenAI
import traceback
client = OpenAI()
class DM:
def __init__(self, story_name):
self.story_name = story_name
self.state = db['current_state'].find_one({'name': story_name})
if self.state is None:
self.state = {
'name': story_name,
'quests': ['lost-mines'],
}
db['current_state'].insert_one(self.state)
dialogue = self.get_txt("dialogue")
if dialogue is None:
self.M = []
else:
self.M = [{"role": "user", "content": 'PREVIOUS DIALOGUE:\n' + dialogue[-1500:] + "..."}]
self.briefly_summarize()
self.summary = []
self.characters = []
self.main_character = None
# ------------------
# GETTING GPT
# ------------------
def json_retry_loop(self, messages, model=model, loop_i=0):
while True:
response = get_response(messages, model=model)
try:
return json.loads(response)
except json.decoder.JSONDecodeError:
messages.append({'role': 'system', 'content': "Invalid JSON. Please try again."})
loop_i += 1
if loop_i > 3:
raise
return self.json_retry_loop(messages, model=model, loop_i=loop_i)
# ------------------
# SAYING STUFF
# ------------------
def humansay(self, content):
self.M.append({"role": "user", "content": content})
self.add_txt("dialogue", f"Player:\n{content}")
def computersay(self, content):
self.M.append({"role": "assistant", "content": content})
self.add_txt("dialogue", f"DM:\n{content}")
print("DM:", content)
def computersay_self(self, content):
self.M.append({"role": "system", "content": content})
self.add_txt("dialogue", f"DM (to themselves):\n{content}")
# ------------------
# Thinking, Acting, and Responding
# ------------------
def think(self):
prompt = f"""
You are an assistant to the DM.
Speak directly to the DM (not the player).
Give some thoughts or ideas to the DM to help them conduct their duties.
If you think everything is clear, type 'pass'.
Be concise, specific, and clear.
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you think to yourself? Be brief."},
]
response = get_response(messages, model=model)
self.computersay_self("(thinking...) " + response)
def act(self):
story_part = self.state['story_part']
next_steps = "\n".join([f"\t{x}: {y}" for x, y in story_part['next_steps'].items()])
inventory = self.main_character.inventory()
actions = self._format_actions()
prompt = f"""
Your current inventory is:
{inventory}
Based on the dialogue so far, you are to decide what actions to take next.
Most of the time no action will need to be taken. In this case, simply type "pass".
Please do not act in a way not directly implied by the dialogue so far.
Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.
{actions}
ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS
You can type a command on each line.
You CANNOT mix commands and statements.
Scenes available, their names and descriptions:
{next_steps}
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you do? (type = change_scene, roll_hit_dice, or inventory). Use only JSON strings, one per line. If no action need be taken from the most recent message, simply type 'pass'."},
]
response = get_response(messages, model=model)
if response.strip() == "pass":
return
parts = response.split("}")
for part in parts:
if part == "":
continue
part += "}"
try:
part = json.loads(part)
self.act_on(part)
except json.decoder.JSONDecodeError:
print("Invalid JSON:", part)
def act_on(self, action):
print("Executing... ", json.dumps(action, indent=2))
act = dict(action)
typ = action.pop('type')
try:
fn = getattr(self, typ)
response = fn(**action)
self.computersay_self(response)
except Exception as e:
# first get the last line of the traceback
tb = traceback.format_exc().splitlines()[-1]
# then get the last line of the error
error = str(e).splitlines()[-1]
self.computersay_self(f"Error in command '{json.dumps(act, indent=2)}': {error} ({tb})")
self.computersay_self("Please rewrite this one.")
self.act()
def respond(self):
story_part = self.story_part
my_messages = []
inventory = self._format_inventory()
prompt = f"""
You are a DM.
You are currently in the scene "{story_part['name']}".
Your current inventory is:
{inventory}
The message you type will be sent to the player from the DM.
Description of the current scene:
{story_part['description']}
"""
my_messages.append({'role': 'system', 'content': prompt})
response = get_response(my_messages + self.M, model=model)
self.computersay(response)
def consolidate_messages(self):
# remember a summary of the messages
self.summary.append(self.summarize_plotline())
# (mostly) clear the messages
self.M = self.M[-2:]
# ------------------
# Running the Conversation
# ------------------
def run(self):
# human does its thing
query = input(">> ")
self.humansay(query)
# computer does its thing
self.act()
self.think()
self.respond()
self.act()
def loop(self):
while True:
self.run()
class Entity:
def __init__(self, name, description, stats):
self.name = name
self.description = description
self.stats = stats
def __repr__(self):
return f"Entity({self.name}, {self.description}, {self.stats})"
def __str__(self):
return f"Entity({self.name}, {self.description}, {self.stats})"
def inventory(self):
inventory = self.get_txt("inventory")
if inventory is None:
if self.story_part_name == 'start':
self.inventory("add", "10 gold pieces")
self.inventory("add", "a backpack")
self.inventory("add", "a bedroll")
self.inventory("add", "a mess kit")
self.inventory("add", "a tinderbox")
self.inventory("add", "10 torches")
self.inventory("add", "10 days of rations")
self.inventory("add", "a waterskin")
self.inventory("add", "50 feet of hempen rope")
return self._format_inventory()
else:
inventory = "The player has nothing."
return inventory
class Battle(Convo):
def __init__(self, battle_description):
self.battle_description = battle_description
self.generate_enemies()
super().__init__(adventure_name)
def generate_enemies(self, model=model):
my_messages = []
prompt1 = flatten_whitespace(f"""
Your goal will be to set up for a tabletop RPG battle.
You are to interpret the following description of the battle, and generate enemies for the battle.
""")
prompt2 = flatten_whitespace(f"""
Your response should be a JSON list of dictionaries, each with the following keys:
- name
- description
- stats
For example:
[
{{"name": "Thwark", "description": "A goblin. A small, green creature.", "stats": {{"hp": 10, "ac": 15, "str": 10, "dex": 10, "con": 10, "int": 10, "wis": 10, "cha": 10}}}},
{{"name": "Mannard", "description": "A goblin. A small, green creature", "stats": {{"hp": 10, "ac": 15, "str": 10, "dex": 10, "con": 10, "int": 10, "wis": 10, "cha": 10}}}},
]
""")
prompt3 = flatten_whitespace(f"""
The battle description is:
{indent(self.battle_description, 2)}
""")
my_messages.append({'role': 'system', 'content': prompt1})
my_messages.append({'role': 'system', 'content': prompt2})
my_messages.append({'role': 'user', 'content': prompt3})
enemy_json = self.json_retry_loop(my_messages, model=model)
self.enemies = [
Entity(**enemy)
for enemy in enemy_json
]
c = Convo(adventure_name)
c.loop() | [
"\n You are an assistant to the DM.\n Speak directly to the DM (not the player).\n Give some thoughts or ideas to the DM to help them conduct their duties.\n If you think everything is clear, type 'pass'.\n Be concise, specific, and clear.\n ",
"PREVIOUS DIALOGUE:\nPLACEHOLDER...",
"\n Your response should be a JSON list of dictionaries, each with the following keys:\n - name\n - description\n - stats\n \n For example:\n [\n {\"name\": \"Thwark\", \"description\": \"A goblin. A small, green creature.\", \"stats\": {\"hp\": 10, \"ac\": 15, \"str\": 10, \"dex\": 10, \"con\": 10, \"int\": 10, \"wis\": 10, \"cha\": 10}},\n {\"name\": \"Mannard\", \"description\": \"A goblin. A small, green creature\", \"stats\": {\"hp\": 10, \"ac\": 15, \"str\": 10, \"dex\": 10, \"con\": 10, \"int\": 10, \"wis\": 10, \"cha\": 10}},\n ]\n ",
"\n Your goal will be to set up for a tabletop RPG battle.\n You are to interpret the following description of the battle, and generate enemies for the battle.\n ",
"\n You are a DM. \n You are currently in the scene \"PLACEHOLDER\".\n\n Your current inventory is:\n PLACEHOLDER\n\n The message you type will be sent to the player from the DM.\n\n Description of the current scene:\n PLACEHOLDER\n ",
"Invalid JSON. Please try again.",
"What do you think to yourself? Be brief.",
"\n Your current inventory is:\n PLACEHOLDER\n \n Based on the dialogue so far, you are to decide what actions to take next.\n Most of the time no action will need to be taken. In this case, simply type \"pass\".\n Please do not act in a way not directly implied by the dialogue so far.\n Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.\n\n PLACEHOLDER\n\n ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS\n\n You can type a command on each line.\n You CANNOT mix commands and statements.\n\n Scenes available, their names and descriptions:\n PLACEHOLDER\n "
] |
2024-01-10 | bartohanew/DMLLM | sequence%20copy.py | from common import *
import json
from pymongo import MongoClient
client = MongoClient()
storyline = list(client.lost_mines.story.find())
story_part_name = 'start'
from modularity import OpenAI
client = OpenAI()
# def summarize_plotline(messages):
# message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in messages])
# prompt = f"""
# Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.
# In each plot point, be as specific as possible.
# Keep note of any characters, locations, or items that are mentioned.
# Do not include any information not present in the following messages!
# Messages:
# {message_text}
# """
# print(prompt)
# messages = [
# {"role": "system", "content": prompt},
# ]
# response = get_response(messages)#, model="gpt-3.5-turbo")
# print('Summarized!')
# print(response)
# return response
M = []
summary = []
def respond():
global M, summary, story_part_name, storyline
story_part = [x for x in storyline if x['name'] == story_part_name][0]
next_steps = "\n".join([f"\t{x}: {y}" for x, y in story_part['next_steps'].items()])
my_messages = []
prompt = f"""
You are the Dungeon Master (DM), using the DnD 5E rules and content. Be clever, witty, and sometimes funny. But serious when the time comes
I am the player.
You are not the player.
I am not the DM.
You are currently in the scene "{story_part['name']}"
The player's character is in a four person party
When the player creates a character, you take on the role of the remaining three members of the player's party
Roleplay these characters. Give them personalities. Invent backgrounds for them that tie them to the world, but do not state this information to the player. Take actions you think they would make.
Be sure to have the make skill checks throughout the adventure when it seems appropriate. But do not state the numbers you roll unless asked. It ruins the immersion. You roll for the player and state the outcome.
When combat starts, consult the 5E rules for combat.
During combat, you take the turn of the NPCs. Play out their turns in the initiative order and do not move on to the next character in initiative until you have completed the current character's turn
If you want to change the scene, type:
{{"type": "change_scene", "to": "scene name"}}
Description of the current scene:
{story_part['description']}
Scenes available, their names and descriptions:
{next_steps}
"""
#Otherwise, any (non-JSON) message you type will be sent to the player. (I REMOVED THIS TO TRY TO DEAL WITH THE RAMBLING MESSAGES)
#print(prompt)
my_messages.append({'role': 'system', 'content': prompt})
response = get_response(my_messages + M)#, model="gpt-3.5-turbo")
# determine if the response is an action
is_action = False
try:
response = json.loads(response)
is_action = response['type'] == 'change_scene'
except:
pass
# if not, just add it to the messages
if not is_action:
M.append({"role": "assistant", "content": response})
print("\n\n" + "DM: " + response + "\n\n")
# if so, change the scene
else:
story_part_name = response['to']
print(f"Changed to scene: {story_part_name}")
M.append({"role": "system", "content": f"Changed to scene: {story_part_name}"})
# since the computer used its turn on changing the scene, let it go again (to respond with text)
respond()
# consolidate things, if it's getting long
# if len(M) > 10:
# # remember a summary of the messages
# summary.append(summarize_plotline(M))
# # clear the messages
# M = M[-2:]
# this is actually the interactive loop!
while True:
# human does its thing
query = input("Player: ")
M.append({"role": "user", "content": query})
# computer does its thing
respond() | [
"\nYou are the Dungeon Master (DM), using the DnD 5E rules and content. Be clever, witty, and sometimes funny. But serious when the time comes\nI am the player.\nYou are not the player.\nI am not the DM.\nYou are currently in the scene \"PLACEHOLDER\"\nThe player's character is in a four person party\nWhen the player creates a character, you take on the role of the remaining three members of the player's party\nRoleplay these characters. Give them personalities. Invent backgrounds for them that tie them to the world, but do not state this information to the player. Take actions you think they would make. \n\nBe sure to have the make skill checks throughout the adventure when it seems appropriate. But do not state the numbers you roll unless asked. It ruins the immersion. You roll for the player and state the outcome.\n\nWhen combat starts, consult the 5E rules for combat.\n\nDuring combat, you take the turn of the NPCs. Play out their turns in the initiative order and do not move on to the next character in initiative until you have completed the current character's turn\n\nIf you want to change the scene, type:\n{\"type\": \"change_scene\", \"to\": \"scene name\"}\n\n\n\nDescription of the current scene:\n PLACEHOLDER\n\nScenes available, their names and descriptions:\nPLACEHOLDER\n\n\n ",
"Changed to scene: start"
] |
2024-01-10 | bartohanew/DMLLM | sequence.py | from common import *
import json
storyline = [
{
'name': "start",
'description': "In Neverwinter, Gundren Rockseeker, a dwarf, hires you to transport provisions to Phandalin. Gundren, with a secretive demeanor, speaks of a significant discovery he and his brothers made. He promises ten gold pieces for safe delivery to Barthen's Provisions in Phandalin. Accompanied by Sildar Haliwinter, he leaves ahead on horseback. Your journey begins on the High Road from Neverwinter, moving southeast. Danger lurks on this path, known for bandits and outlaws.",
'next_steps': {
'introduce characters': "A good first step",
'determine marching order': "Optional",
'driving the wagon': "When the journey really begins. Make sure they know some of the plot before beginning."
}
},
{
'name': "introduce characters",
'description': "Players take turns introducing their characters. They describe their appearance, background, and how they came to know Gundren Rockseeker. Encourage creativity in their backstories, like childhood friendships or past rescues by Gundren.",
'next_steps': {
'determine marching order': "At any time."
}
},
{
'name': "determine marching order",
'description': "The party discusses and decides their traveling formation. Who will drive the wagon, scout ahead, or guard the rear? This arrangement is crucial for upcoming encounters and navigating the terrain.",
'next_steps': {
'driving the wagon': "Whenever the party is ready."
}
},
{
'name': "driving the wagon",
'description': "The wagon, pulled by two oxen, contains various mining supplies and food worth 100 gp. The journey is uneventful initially, but the path is known for its dangers. The players must remain alert as they navigate the road.",
'next_steps': {
'finding horses': "At some point along the road, probably after some time has passed, the party encounters two dead horses blocking the path."
}
},
{
'name': "finding horses",
'description': "As the party nears Phandalin, they encounter two dead horses blocking the path, riddled with black-feathered arrows.",
'next_steps': {
'combat with goblins': "Investigating the horses triggers the ambush from the goblins hiding in the thicket.",
},
},
{
'name': "combat with goblins",
'description': "The party must quickly react to the goblin attack. Goblins, skilled in stealth and ambush tactics, launch their assault. The players must use their wits and combat skills to overcome this threat.",
'next_steps': {
'follow goblin trail': "If the party decides to track the goblins, they find a trail leading to the Cragmaw hideout."
}
},
{
'name': "follow goblin trail",
'description': "The path is treacherous, with potential traps and signs of frequent goblin activity. Stealth and caution are advised.",
'next_steps': {
'cragmaw hideout': "They eventually alight on the hideout itself."
}
},
{
'name': "cragmaw hideout",
'description': "The trail leads to a well-hidden cave, the Cragmaw hideout. The party must navigate through this dangerous lair, facing goblins and their traps. Their goal is to find Gundren, Sildar, or any information about their whereabouts.",
'next_steps': {
'rescue mission': "Attempt to rescue Gundren and Sildar, if they are found within the hideout.",
'return to Phandalin': "After exploring the hideout, decide whether to return to Phandalin."
}
},
{
'name': "rescue mission",
'description': "In the event Gundren or Sildar are found captive, the party must devise a plan to rescue them. This might involve combat, negotiation, or stealth. The fate of their mission and the lives of the captives hang in the balance.",
'next_steps': {
'return to Phandalin': "With or without the captives, make the journey back to Phandalin."
}
},
{
'name': "return to Phandalin",
'description': "The journey concludes as the party arrives in Phandalin. They must report the outcome of their mission, deliver the supplies, and seek out Elmar Barthen at Barthen's Provisions. The town may offer more clues or quests related to the Cragmaw goblins and the fate of Gundren and Sildar.",
'next_steps': {
'explore Phandalin': "Explore the town of Phandalin for further adventures and quests."
}
}
]
story_part_name = 'start'
from modularity import OpenAI
client = OpenAI()
def summarize_plotline(messages):
message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in messages])
prompt = f"""
Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.
In each plot point, be as specific as possible.
Keep note of any characters, locations, or items that are mentioned.
Do not include any information not present in the following messages!
Messages:
{message_text}
"""
print(prompt)
messages = [
{"role": "system", "content": prompt},
]
response = get_response(messages)#, model="gpt-3.5-turbo")
print('Summarized!')
print(response)
return response
M = []
summary = []
def respond():
global M, summary, story_part_name, storyline
story_part = [x for x in storyline if x['name'] == story_part_name][0]
next_steps = "\n".join([f"\t{x}: {y}" for x, y in story_part['next_steps'].items()])
my_messages = []
prompt = f"""
You are a DM.
You are currently in the scene "{story_part['name']}".
Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.
If you want to change the scene, type:
{{"type": "change_scene", "to": "scene name"}}
Otherwise, any (non-JSON) message you type will be sent to the player.
Description of the current scene:
{story_part['description']}
Scenes available, their names and descriptions:
{next_steps}
"""
#print(prompt)
my_messages.append({'role': 'system', 'content': prompt})
response = get_response(my_messages + M)#, model="gpt-3.5-turbo")
# determine if the response is an action
is_action = False
try:
response = json.loads(response)
is_action = response['type'] == 'change_scene'
except:
pass
# if not, just add it to the messages
if not is_action:
M.append({"role": "assistant", "content": response})
print(response)
# if so, change the scene
else:
story_part_name = response['to']
print(f"Changed to scene: {story_part_name}")
M.append({"role": "system", "content": f"Changed to scene: {story_part_name}"})
# since the computer used its turn on changing the scene, let it go again (to respond with text)
respond()
# consolidate things, if it's getting long
if len(M) > 10:
# remember a summary of the messages
summary.append(summarize_plotline(M))
# clear the messages
M = M[-2:]
# this is actually the interactive loop!
while True:
# human does its thing
query = input(">> ")
M.append({"role": "user", "content": query})
# computer does its thing
respond() | [
"\n Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.\n In each plot point, be as specific as possible.\n Keep note of any characters, locations, or items that are mentioned.\n Do not include any information not present in the following messages!\n\n Messages:\n PLACEHOLDER\n ",
"Changed to scene: PLACEHOLDER",
"\nYou are a DM. \nYou are currently in the scene \"PLACEHOLDER\".\nAlthough there is no rush to change the 'scene', you must eventually do so, in order to continue the story.\n\nIf you want to change the scene, type:\n{\"type\": \"change_scene\", \"to\": \"scene name\"}\n\nOtherwise, any (non-JSON) message you type will be sent to the player.\n\nDescription of the current scene:\n PLACEHOLDER\n\nScenes available, their names and descriptions:\nPLACEHOLDER\n "
] |
2024-01-10 | bartohanew/DMLLM | sequence%20v0.2%20copy.py | from common import *
import json
from pymongo import MongoClient
client = MongoClient()
storyline = list(client.lost_mines.story.find())
# NPCs = list(client.lost_mines.NPCs.find())
adventure_name = "alec_first"
#model = "gpt-3.5-turbo"
model = "gpt-4-1106-preview"
from modularity import OpenAI
import traceback
client = OpenAI()
class Convo:
def __init__(self, story_name):
self.story_name = story_name
self.story_dir = Path(f"stories/{story_name}")
self.story_dir.mkdir(exist_ok=True, parents=True)
self.story_part_name = self.get_txt("story_part")
if self.story_part_name is None:
self.story_part_name = 'start'
self.set_txt("story_part", self.story_part_name)
dialogue = self.get_txt("dialogue")
if dialogue is None:
self.M = []
else:
self.M = [{"role": "user", "content": 'PREVIOUS DIALOGUE:\n' + dialogue[-1500:] + "..."}]
self.briefly_summarize()
self.summary = []
self.story_part_name = 'start'
self.type_modifiers = {
'strength': 2,
'dexterity': 1,
'constitution': 0,
'intelligence': -1,
'wisdom': -2,
'charisma': -3,
}
@property
def story_part(self):
return [x for x in storyline if x['name'] == self.story_part_name][0]
def briefly_summarize(self):
self.computersay(f"(summarizing from last time...) " + self.summarize_plotline("Explain this to the player, bringing them up to speed on what just happened. Hopefully just one sentence will suffice."))
def get_txt(self, name):
story_part_file = self.story_dir / f"{name}.txt"
if story_part_file.exists():
return story_part_file.read_text().strip()
else:
return None
def set_txt(self, name, content):
f = self.story_dir / f"{name}.txt"
f.write_text(content)
def add_txt(self, name, content):
f = self.story_dir / f"{name}.txt"
if not f.exists():
f.write_text(content)
else:
f.write_text(f.read_text() + "\n" + content)
# ------------------
# Summarizing is probably useful!
# ------------------
def summarize_character(self):
message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in self.M])
prompt = f"""
Your goal is to extract full character sheets for the players involved in this adventure.
Messages:
{message_text}
"""
print(prompt)
messages = [
{"role": "system", "content": prompt},
]
response = get_response(messages, model=model)
return response
def summarize_plotline(self, prompt=None):
message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in self.M])
if prompt is None:
prompt = f"""
Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.
In each plot point, be as specific as possible.
Keep note of any characters, locations, or items that are mentioned.
Do not include any information not present in the following messages!
Please be extremely concise.
"""
prompt += f"""
Messages:
{message_text}
"""
#print(prompt)
messages = [
{"role": "system", "content": prompt},
]
response = get_response(messages, model=model)
#print('Summarized!')
#print(response)
return response
def inventory(self, action, object):
self.add_txt("inventory", f"{action}: {object}")
return f"Inventory {action}: {object}"
def change_scene(self, to):
self.story_part_name = to
self.set_txt("story_part", self.story_part_name)
return "Changed scene to " + to
def roll_hit_dice(self, n_sides, n_dice, type=None, **kwargs):
import random
result = [ random.randint(1, n_sides) for i in range(n_dice) ]
result = result_og = sum(result)
mod = 0
if type is not None and type in self.type_modifiers:
mod += self.type_modifiers[type]
result += mod
return f"Rolled {n_dice}d{n_sides} ({type}) {result_og} + {mod} = {result}"
# ------------------
# SAYING STUFF
# ------------------
def humansay(self, content):
self.M.append({"role": "user", "content": content})
self.add_txt("dialogue", f"Player:\n{content}")
def computersay(self, content):
self.M.append({"role": "assistant", "content": content})
self.add_txt("dialogue", f"DM:\n{content}")
print("DM:", content)
def computersay_self(self, content):
self.M.append({"role": "system", "content": content})
self.add_txt("dialogue", f"DM (to themselves):\n{content}")
def _format_inventory(self):
inventory = self.get_txt("inventory")
if inventory is None:
if self.story_part_name == 'start':
self.inventory("add", "10 gold pieces")
self.inventory("add", "a backpack")
self.inventory("add", "a bedroll")
self.inventory("add", "a mess kit")
self.inventory("add", "a tinderbox")
self.inventory("add", "10 torches")
self.inventory("add", "10 days of rations")
self.inventory("add", "a waterskin")
self.inventory("add", "50 feet of hempen rope")
return self._format_inventory()
else:
inventory = "The player has nothing."
return inventory
# ------------------
# Thinking, Acting, and Responding
# ------------------
def think(self):
prompt = f"""
You are an assistant to the DM.
Speak directly to the DM (not the player).
Give some thoughts or ideas to the DM to help them conduct their duties.
If you think everything is clear, type 'pass'.
Be concise, specific, and clear.
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you think to yourself? Be brief."},
]
response = get_response(messages, model=model)
self.computersay_self("(thinking...) " + response)
def act(self):
story_part = self.story_part
next_steps = "\n".join([f"\t{x}: {y}" for x, y in story_part['next_steps'].items()])
inventory = self._format_inventory()
prompt = f"""
Your current inventory is:
{inventory}
Based on the dialogue so far, you are to decide what actions to take next.
Most of the time no action will need to be taken. In this case, simply type "pass".
Please do not act in a way not directly implied by the dialogue so far.
Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.
If you want to change the scene, type:
{{"type": "change_scene", "to": "scene name"}}
To roll hit dice, type:
{{"type: "roll_hit_dice", "n_dice": 1, "n_sides": 6, "type": "strength"}}
To add or remove from inventory, type:
{{"type: "inventory", "action": "add|remove", "object": "object name, description, and/or stats"}}
ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS
You can type a command on each line.
You CANNOT mix commands and statements.
Scenes available, their names and descriptions:
{next_steps}
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you do? (type = change_scene, roll_hit_dice, or inventory). Use only JSON strings, one per line. If no action need be taken from the most recent message, simply type 'pass'."},
]
response = get_response(messages, model=model)
if response.strip() == "pass":
return
parts = response.split("}")
for part in parts:
if part == "":
continue
part += "}"
try:
part = json.loads(part)
self.act_on(part)
except json.decoder.JSONDecodeError:
print("Invalid JSON:", part)
def act_on(self, action):
print("Executing... ", json.dumps(action, indent=2))
act = dict(action)
typ = action.pop('type')
try:
fn = getattr(self, typ)
response = fn(**action)
self.computersay_self(response)
except Exception as e:
# first get the last line of the traceback
tb = traceback.format_exc().splitlines()[-1]
# then get the last line of the error
error = str(e).splitlines()[-1]
self.computersay_self(f"Error in command '{json.dumps(act, indent=2)}': {error} ({tb})")
self.computersay_self("Please rewrite this one.")
self.act()
def respond(self):
story_part = self.story_part
my_messages = []
inventory = self._format_inventory()
prompt = f"""
You are a DM.
You are currently in the scene "{story_part['name']}".
Your current inventory is:
{inventory}
The message you type will be sent to the player from the DM.
Description of the current scene:
{story_part['description']}
"""
my_messages.append({'role': 'system', 'content': prompt})
response = get_response(my_messages + self.M, model=model)
self.computersay(response)
# consolidate things, if it's getting long
if len(self.M) > 10:
# remember a summary of the messages
self.summary.append(self.summarize_plotline())
# (mostly) clear the messages
self.M = self.M[-2:]
# ------------------
# Running the Conversation
# ------------------
def run(self):
# human does its thing
query = input(">> ")
self.humansay(query)
# computer does its thing
self.act()
self.think()
self.respond()
self.act()
def loop(self):
while True:
self.run()
c = Convo(adventure_name)
c.loop() | [
"\n You are an assistant to the DM.\n Speak directly to the DM (not the player).\n Give some thoughts or ideas to the DM to help them conduct their duties.\n If you think everything is clear, type 'pass'.\n Be concise, specific, and clear.\n ",
"PREVIOUS DIALOGUE:\nPLACEHOLDER...",
"\n You are a DM. \n You are currently in the scene \"PLACEHOLDER\".\n\n Your current inventory is:\n PLACEHOLDER\n\n The message you type will be sent to the player from the DM.\n\n Description of the current scene:\n PLACEHOLDER\n ",
"\n Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.\n In each plot point, be as specific as possible.\n Keep note of any characters, locations, or items that are mentioned.\n Do not include any information not present in the following messages!\n Please be extremely concise.\n ",
"\n Your current inventory is:\n PLACEHOLDER\n \n Based on the dialogue so far, you are to decide what actions to take next.\n Most of the time no action will need to be taken. In this case, simply type \"pass\".\n Please do not act in a way not directly implied by the dialogue so far.\n Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.\n\n If you want to change the scene, type:\n {\"type\": \"change_scene\", \"to\": \"scene name\"}\n\n To roll hit dice, type:\n {\"type: \"roll_hit_dice\", \"n_dice\": 1, \"n_sides\": 6, \"type\": \"strength\"}\n\n To add or remove from inventory, type:\n {\"type: \"inventory\", \"action\": \"add|remove\", \"object\": \"object name, description, and/or stats\"}\n\n ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS\n\n You can type a command on each line.\n You CANNOT mix commands and statements.\n\n Scenes available, their names and descriptions:\n PLACEHOLDER\n ",
"\n Your goal is to extract full character sheets for the players involved in this adventure.\n\n Messages:\n PLACEHOLDER\n ",
"What do you think to yourself? Be brief.",
"\n\n Messages:\n PLACEHOLDER\n "
] |
2024-01-10 | bartohanew/DMLLM | common.py | import openai
from openai import OpenAI
from pathlib import Path
import os
from dotenv import load_dotenv
load_dotenv()
open_ai_key = os.getenv("OPEN_AI_KEY")
# set API KEY
client = OpenAI(api_key = open_ai_key)
def get_response(messages, model="gpt-4-1106-preview"):
response = client.chat.completions.create(
model=model,
messages = messages,
max_tokens=None,
temperature=0.9,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
)
return response.choices[0].message.content | [] |
2024-01-10 | bartohanew/DMLLM | pdf_extractor.py | import PyPDF2
import openai
from common import * # Importing the API key from the common module
def extract_text_from_pdf(pdf_file):
with open(pdf_file, 'rb') as file: # Open the PDF file in binary mode
reader = PyPDF2.PdfReader(file)
text = ''
for page_num in range(len(reader.pages)): # Iterate through each page
page = reader.pages[page_num]
text += page.extract_text() # Extract and accumulate text from each page
return text
def convert_to_json_with_gpt4(text):
openai.api_key = open_ai_key # Set the API key using the imported variable
response = openai.chat.completions.create(
model="gpt-4-1106-preview", # Specify your model here
messages=[
{"role": "user", "content": text}
],
max_tokens=4096 # Adjust the maximum token limit as needed
)
return response['choices'][0]['text'].strip()
# Replace the path with the actual path to your PDF file
pdf_path = r"C:\Users\thbar\OneDrive\Desktop\Document Cloud\StarterSet_Characters-part-1.pdf"
extracted_text = extract_text_from_pdf(pdf_path)
# Print the extracted text (optional, for verification)
print("Extracted Text:\n", extracted_text)
# Convert the extracted text to JSON using GPT-4
json_output = convert_to_json_with_gpt4(extracted_text)
print("\nJSON Output:\n", json_output)
| [] |
2024-01-10 | bartohanew/DMLLM | demo_guidance.py | import guidance
from guidance import assistant, system, user, gen
from dotenv import load_dotenv
import json
load_dotenv()
dm = guidance.models.OpenAIChat("gpt-4-1106-preview")
#dm = guidance.models.OpenAIChat("gpt-3.5-turbo")
commands_description = """
Here are the available commands:
+ ask_assistant: <your question>
Whenever you are unsure of how to properly execute your duties, you may ask your assistant for help.ond "ask_assistant: <your question>".
+ roll_die: <number of sides>, <number of dice>, <type>
When a player attempts an action that has a chance of failure.
Type can be any of the following: strength, dexterity, constitution, intelligence, wisdom, charisma, attack.
+ spell_lookup: <spell name>
When a player attempts to cast a spell, you may respond with "spell_lookup: <spell name>".
+ confusion: <problem with executing the action>
When you cannot fully understand what to do, use this command.
You may end the session without any outcomes as long as you state why you are confused.
+ objection: <player's action>
When a player attempts an action that is not allowed by the rules, you may respond with "objection: <player's action>".
+ outcome: <change in the world>
In order to specify a change in the world, you may respond with "outcome: <change in the world>".
+ END
When you are finished considering the implications, you may respond with "END".
"""
with system():
dm += "You are the rulemaster for a game of Dungeons and Dragons."
dm += """
The current state of the world is:
+ There are three players: Alice, Bob, and Charlie.
+ Alice
+ 10 hit points
+ human wizard
+ can cast fireball, magic missile, and shield
+ has a dagger
+ has a spellbook
+ Bob
+ 10 hit points
+ human fighter
+ has a sword
+ Charlie
+ 10 hit points
+ human cleric
+ can cast cure wounds
+ has a mace
+ There are three monsters: a goblin, an orc, and a troll.
+ goblin
+ 3 hit points
+ 30 ft from Alice, 100ft from Bob, 50ft from Charlie
+ orc
+ 6 hit points
+ 50 ft from Alice, 30ft from Bob, 100ft from Charlie
+ troll
+ 10 hit points
+ 100 ft from Alice, 50ft from Bob, 30ft from Charlie
"""
with user():
dm += "Alice: I cast fireball at the goblin."
modifiers = {
"strength": 4,
"dexterity": -1,
"constitution": -2,
"intelligence": 0,
"wisdom": 2,
"charisma": 0,
}
def spell_lookup(name):
name = name.strip().lower()
print('searching for spell', name)
import requests
nameq = name.strip().replace(' ', '+')
url = f"https://www.dnd5eapi.co/api/spells/?name={nameq}"
response = requests.get(url)
response = response.json()['results']
response = [r for r in response if r['name'].lower() == name.lower()]
full = []
for result in response:
url = result['url']
response = requests.get(f"https://www.dnd5eapi.co{url}")
full.append(response.json())
if not len(full):
return "No spells found."
result = []
for f in full:
result.append(f"{f['name']}\n\tDescription: {f['desc'][0]}\n\tRange: {f['range']}\n")
return "\n".join(result)
objections = []
outcomes = []
while True:
dmp = dm.copy()
with assistant():
dmp += "Let me describe very briefly (one sentence, ideally) what to do next...\n"
print(dmp)
with assistant():
dmp = dmp + gen("thought", max_tokens=200)
print('THOUGHT', dmp["thought"])
dm += "Thinking to myself... " + dmp["thought"]
with system():
dmp += "\n" + commands_description
dmp += "\nThe only available commands are 'roll_die', 'spell_lookup', 'objection', 'outcome', and 'END'."
dmp += "\nWhen this specific turn in D&D combat is over, respond with 'END' to move to the next player's turn."
dmp += "\nAlice is the only player acting now. Do not act for others."
with assistant():
dmp += "\nI will now specify my command in the format '<command name>: <arguments>'.\n"
dmp = dmp + gen("command")
print('COMMAND', dmp["command"])
c, *args = dmp["command"].split(":")
args = ":".join(args).strip()
with assistant():
if c == "roll_die":
sides, num, typ = args.split(",")
import random
n_sides = int(sides.strip())
n_dice = int(num.strip())
typ = typ.strip().lower()
result = [ random.randint(1, n_sides) for _ in range(n_dice) ]
result = sum(result)
mod = modifiers[typ] if typ in modifiers else 0
self_message = f"I rolled a {result} + {mod} = {result+mod} for {typ}."
elif c == "spell_lookup":
self_message = "I looked up the spell '%s', finding the following:\n"%args + spell_lookup(args)
elif c == "objection":
self_message = "I make the objection '%s'" % args
objections.append(args)
elif c == "outcome":
self_message = "I specify the outcome '%s'" % args
outcomes.append(args)
elif c == "END":
break
else:
self_message = "I gave an invalid command, '%s'" % dmp["command"]
print("ACTION RESULT:", self_message)
dm += "Action completed: " + self_message + "\n"
print("Objections:")
for o in objections:
print('+ ', o)
print("Outcomes:")
for o in outcomes:
print('+ ', o) | [] |
2024-01-10 | bartohanew/DMLLM | sequence%20v0.2.py | from common import *
import json
adventure_name = "alec_first"
model = "gpt-3.5-turbo"
#model = "gpt-4-1106-preview"
storyline = [
{
'name': "start",
'description': "In Neverwinter, Gundren Rockseeker, a dwarf, hires you to transport provisions to Phandalin. Gundren, with a secretive demeanor, speaks of a significant discovery he and his brothers made. He promises ten gold pieces for safe delivery to Barthen's Provisions in Phandalin. Accompanied by Sildar Haliwinter, he leaves ahead on horseback. Your journey begins on the High Road from Neverwinter, moving southeast. Danger lurks on this path, known for bandits and outlaws.",
'next_steps': {
'introduce characters': "A good first step",
'determine marching order': "Optional",
'driving the wagon': "When the journey really begins. Make sure they know some of the plot before beginning."
}
},
{
'name': "introduce characters",
'description': "Players take turns introducing their characters. They describe their appearance, background, and how they came to know Gundren Rockseeker. Encourage creativity in their backstories, like childhood friendships or past rescues by Gundren.",
'next_steps': {
'determine marching order': "At any time."
}
},
{
'name': "determine marching order",
'description': "The party discusses and decides their traveling formation. Who will drive the wagon, scout ahead, or guard the rear? This arrangement is crucial for upcoming encounters and navigating the terrain.",
'next_steps': {
'driving the wagon': "Whenever the party is ready."
}
},
{
'name': "driving the wagon",
'description': "The wagon, pulled by two oxen, contains various mining supplies and food worth 100 gp. The journey is uneventful initially, but the path is known for its dangers. The players must remain alert as they navigate the road.",
'next_steps': {
'finding horses': "At some point along the road, probably after some time has passed, the party encounters two dead horses blocking the path."
}
},
{
'name': "finding horses",
'description': "As the party nears Phandalin, they encounter two dead horses blocking the path, riddled with black-feathered arrows.",
'next_steps': {
'combat with goblins': "Investigating the horses triggers the ambush from the goblins hiding in the thicket.",
},
},
{
'name': "combat with goblins",
'description': "The party must quickly react to the goblin attack. Goblins, skilled in stealth and ambush tactics, launch their assault. The players must use their wits and combat skills to overcome this threat.",
'next_steps': {
'follow goblin trail': "If the party decides to track the goblins, they find a trail leading to the Cragmaw hideout."
}
},
{
'name': "follow goblin trail",
'description': "The path is treacherous, with potential traps and signs of frequent goblin activity. Stealth and caution are advised.",
'next_steps': {
'cave_1': "They eventually alight on the hideout itself."
}
},
{
'name': "cave_1",
'description': """
The trail leads to a well-hidden cave, the Cragmaw hideout.
"Following the goblins' trail, you come across a large cave in a hillside five miles from the scene of the ambush. A shallow stream flows out of the cave mouth, which is screened by dense briar thickets. A narrow dry path leads into the cave on the right-hand side of the stream."
The goblins in the dense (completely hidden and impenetrable) thicket on the other side of the river are supposed to be keeping watch on this area, but they are not paying attention. (Goblins can be lazy that way.)
However, if the characters make a lot of noise here-for example, loudly arguing about what to do next, setting up a camp, cutting down brush, and so on-the goblins in area 2 notice and attack them through the thicket, which provides the goblins with three-quarters cover (see the rule book for rules on cover).
""",
'next_steps': {
'approach cave': "If the party decides to enter the cave.",
'trigger goblin attack': "If the party decides to make a lot of noise outside the cave.",
}
},
{
'name': "approach cave",
'description': """
When the characters cross to the east side of the stream, they can see around the screening thickets to area 2.
This is a goblin guard post, though the goblins here are bored and inattentive.
On the east side of the stream flowing from the cave mouth, a small area in the briar thickets has been hollowed out to form a lookout post or blind.
Wooden planks flatten out the briars and provide room for guards to lie hidden and watch the area-including a pair of goblins lurking there right now!
Two goblins are stationed here. If the goblins notice intruders in area 1, they open fire with their bows, shooting through the thickets and probably catching the characters by surprise. If the goblins don't notice the adventurers in area 1, they spot them when they splash across the stream, and neither side is surprised.
Characters moving carefully or scouting ahead might be able to surprise the goblin lookouts. Have each character who moves ahead make a Dexterity (Stealth) check contested by the goblins' passive Wisdom (Perception)
Thickets. The thickets around the clearing are difficult terrain, but they aren't dangerous-just annoying. They provide half cover to creatures attacking through them. (See "Difficult Terrain" and "Cover" in the rulebook for more information.)
""",
'next_steps': {
'trigger goblin attack': "If the party alerts the goblins",
'cave_3': "If the party sneaks by successfully, they enter the cave.",
}
},
{
"name": "trigger goblin attack",
"description": """
"""
},
{
"name": "cave_3",
"description": """
The Cragmaws keep a kennel of foul-tempered wolves that they are training for battle.
Just inside the cave mouth, a few uneven stone steps lead up to a small, dank chamber on the east side of the passage. The cave narrows to a steep fissure at the far end, and is filled with the stench of animals. Savage snarls and the sounds of rattling chains greet your ears where two wolves are chained up just inside the opening. Each wolf's chain leads to an iron rod driven into the base of a stalagmite.
Three wolves are confined here. They can't reach targets standing on the steps, but all three attack any creature except a goblin that moves into the room (see the "Developments" section).
Goblins in nearby caves ignore the sounds of fighting wolves, since they constantly snap and snarl at each other.
A character who tries-to calm the animals can attempt a DC 15 Wisdom (Animal Handling) check.
On a success, the wolves allow the character to move throughout the room. If the wolves are given food, the DC drops to 10.
Fissure. A narrow opening in the east wall leads to a natural chimney that climbs 30 feet to area 8.
At the base of the fissure is rubbish that's been discarded through the opening above.
A character attempting to ascend or descend the chimney shaft must make a DC 10 Strength (Athletics) check.
If the check succeeds, the character moves at half speed up or down the shaft, as desired.
On a check result of 6-9, the character neither gains nor loses ground;
on a result of 5 or less, the character falls and takes 1d6 bludgeoning damage per 10 feet fallen, landing prone at the base of the shaft.
DEVELOPMENTS
If the wolves are goaded by enemies beyond their reach, they are driven into a frenzy that allows them to yank the iron rod securing their chains out of the floor. Each round that any character remains in sight, the wolves attempt a single DC 15 Strength check.
On the first success, they loosen the rod and the DC drops to 10. On a second success, they yank the rod loose, bending it so that their chains are freed.
A goblin or bugbear can use its action to release one wolf from its chain.
""",
"next_steps": {
"cave_4": "If the party decides to continue into the cave.",
"cave_1": "If the party decides to leave the cave.",
"cave_8": "If the party successfully climbs the fissure.",
}
},
{
"name": "cave_4",
"description": """
From this point on, characters without darkvision will need light to see their surroundings.
The main passage from the cave mouth climbs steeply upward, the stream plunging and splashing down its west side.
In the shadows, a side passage leads west across the other side of the stream.
Characters using light or darkvision to look farther up the passage spot the bridge at area 5. Add:
In the shadows of the ceiling to the north, you can just make out the dim shape of a rickety bridge of wood and rope crossing over the passage ahead of you. Another passage intersects this one, twenty feet above the floor.
Any character who can see the bridge in area 5 might also notice the goblin guarding the bridge.
Doing so requires a Wisdom (Perception) check contested by the goblin's Dexterity (Stealth) check result.
The goblin notices the characters if they carry any light or don't use stealth as they approach the bridge.
The goblin does not attack. Instead, it attempts to sneak away to the east to inform its companions in area 7 to release a flood.
The goblin moves undetected if its Dexterity (Stealth) check exceeds the passive Wisdom (Perception) score of any character who might notice its movements.
Western Passage.
This passage is choked with rubble and has steep escarpments.
Treat the area as difficult terrain (see "Difficult Terrain" in the rulebook).
The ledge between the two escarpments is fragile.
Any weight in excess of 100 pounds loosens the whole mass and sends it tumbling down to the east.
Any creature on the ledge when it falls must make a DC 10 Dexterity saving throw, taking 2d6 bludgeoning damage on a failure, or half as much damage on a success.
The creature also falls prone on a failed save (see "Being Prone" in the rulebook).
""",
"next_steps": {
"cave_5": "If the party continues towards the bridge.",
"cave_6": "If the party successfully makes it to the other side of the ledge.",
"cave_3": "If the party decides to leave the cave."
}
},
{
"name": "cave_5_lower",
"description": """
Where a high tunnel passes through the larger tunnel cavern below, the goblins have set up a bridge guard post.
The stream passage continues up beyond another set of uneven steps ahead, bending eastward as it goes.
A waterfall I sounds out from a larger cavern somewhere ahead of you.
If the characters didn't spot the bridge while navigating area 4, they spot it now. Add:
A rickety bridge spans the passage, connecting two tunnels that are 20 feet above the stream.
One goblin stands watch on the bridge.
It is hiding, and characters can spot it by succeeding on a Wisdom (Perception) check contested by the goblin's Dexterity (Stealth) check.
This guard is lazy and inattentive. If no characters are using light sources, each character can attempt a Dexterity (Stealth) check against the goblin's passive Wisdom (Perception) score to creep by without being noticed.
If the goblin spots the adventurers, it signals the goblins in area 7 to release a flood, then throws javelins down at the characters.
Bridge. This bridge spans the passage 20 feet above the stream. It's possible to climb up the cavern walls from the lower passage to the bridge. The 20-foot-high walls are rough but slick with spray, requiring a successful DC 15 Strength (Athletics) check to climb.
The bridge has an Armor Class (AC) of 5 and 10 hit points. If the bridge is reduced to 0 hit points, it collapses.
Creatures on the collapsing bridge must succeed on a DC 10 Dexterity saving throw or fall, taking 2d6 bludgeoning damage and landing prone (see "Being Prone" in the rulebook).
Those who succeed hold onto the bridge and must climb it to safety.
The players are on the ground.
""",
"next_steps": {
"flood": "if the goblin signals to start the flood.",
"cave_7": "if the party continues under and beyond the bridge.",
"cave_7": "if the party is able to get to the top of the bridge, and heads west",
"cave_6": "if the party is able to get to the top of the bridge, and heads east",
}
},
{
"name": "cave_7",
"description": """
If the goblins have drained either pool to flood the passage, adjust the following boxed text accordingly.
This cavern is half filled with two large pools of water. A narrow waterfall high in the eastern wall feeds the pool, which drains out the western end of the chamber to form the stream that flows out of the cave mouth below. Low fieldstone walls serve as dams holding the water in. A wide exit stands to the south, while two smaller passages lead west. The sound of the waterfall echoes through the cavern, making it difficult
to hear.
Three goblins guard this cave. If the goblin in area 5 spotted the characters and warned the goblins here, they are ready for trouble. The noise of the waterfall means that the creatures in area 8 can't hear any fighting that takes place here, and vice versa. Therefore, as soon as a fight breaks out here, one goblin flees to area 8 to warn Klarg.
Rock Dams. The goblins built simple dams to control the flow of water through the heart of the complex. If the goblin sentry in area 5 has called for the goblins here to release a flood, one or both of the pools are mostly empty and the stream is flowing unimpeded.
""",
"next_steps": {
"cave_8": "if the party continues south.",
"cave_5_upper": "if the party continues east.",
}
},
{
"name": "cave_5_upper",
"description": """
Where a high tunnel passes through the larger tunnel cavern below, the goblins have set up a bridge guard post.
The stream passage continues up beyond another set of uneven steps ahead, bending eastward as it goes.
A waterfall I sounds out from a larger cavern somewhere ahead of you.
If the characters didn't spot the bridge while navigating area 4, they spot it now. Add:
A rickety bridge spans the passage, connecting two tunnels that are 20 feet above the stream.
One goblin stands watch on the bridge.
It is hiding, and characters can spot it by succeeding on a Wisdom (Perception) check contested by the goblin's Dexterity (Stealth) check.
This guard is lazy and inattentive. If no characters are using light sources, each character can attempt a Dexterity (Stealth) check against the goblin's passive Wisdom (Perception) score to creep by without being noticed.
If the goblin spots the adventurers, it signals the goblins in area 7 to release a flood, then throws javelins down at the characters.
The players are approaching the bridge from the west tunnel, and can now cross the bridge.
""",
"next_steps": {
"flood": "if the goblin signals to start the flood.",
"cave_6": "after crossing the bridge, there's a long passageway to the east, which leads to the goblin's den",
}
},
{
"name": "cave_6",
"description": """
The Cragmaw raiders stationed in the hideout use this area as a common room and barracks.
This large cave is divided in half by a ten-foot-high
escarpment. A steep natural staircase leads from the lower portion to the upper ledge. The air is hazy with the smoke of a cooking fire, and pungent from the smell of poorly cured hides and unwashed goblins.
Six goblins inhabit this den, and one of them is a leader with 12 hit points. The five ordinary goblins tend the cooking fire in the lower (northern) part of the cave near the entrance passage, while the leader rests in the upper (southern) part of the cave.
Sildar Hallwinter, a human warrior, is held prisoner in this chamber. He is securely bound on the southern ledge of the cavern. The goblins have been beating and tormenting him, so he is weak and at 1 hit point.
The goblin leader, Yeemik, is second-in-command
of the whole hideout. If he sees that the characters are getting the upper hand, he grabs Sildar and drags him over to the edge of the upper level. "Truce, or this human dies!" he shouts.
Yeemik wants to oust Klarg and become the new boss. If the adventurers agree to parley, Y~e.mik tries to(co~vince them to kill Klarg in area 8, prormsing to release Sildar when they bring back the bugbear's head. Sildar groggily warns the characters that they shouldn't trust the goblin, and he's right. If the characters take the deal, Yeemik tries to force them to pay a rich ransom for Sildar even after
they complete their part of the bargain.
If the characters refuse to parley, Yeemik shoves Sildar over the edge and continues with the fight. Sildar takes
Id6 bludgeoning damage from the fall, which is enough to drop him to 0 hit points. Quick-acting characters can try to stabilize him before he dies (see "Damage, Healing, and Dying" in the rulebook).
ROLE PLAYING SILDAR
Sildar Kinter is a kindhearted human male of nearly fifty years who holds a place of honor in the famous
griffon cavalry of the great city of Water deep. He is an agent of the Lords' Alliance, a group of allied political powers concerned with mutual security and prosperity. Members of the order ensure the safety of cities and other settlements by proactively eliminating threats by any means, while bringing honor and glory to their leaders
and homelafds.
Sildar me\ Gundren Rockseeker in Neverwinter and agreed to accpmpany him back to Phandalin. Sildar wants to investigate the fate of larno Albrek, a human wizard and fellow membe~of the Lords' Alliance who disappeared shortly after arriving in Phandalin. Sildar hopes to learn what happened to larno, assist Gundren in reopening the old mine, and help restore Phandalin to a civilized center of wealth and prosperity.
Sildar provides the characters with four pieces of useful information:
To The three Rockseeker brothers (Gundren, Tharden, and Nundro) recently located an entrance to the long-lost Wave Echo Cave, site of the mines of the Phandelver's Pact. (Share the information in the first two paragraphs of the "Background" section to the players at this time.) Klarg, the bugbear who leads this goblin band, had orders to waylay Gundren. Sildar heard from the goblins that the Black Spider sent word that the dwarf was to
be brought to him. Sildar doesn't know who or what the
Black Spider is.
o Gundren had a map showing the secret location of Wave
Echo Cave, but the goblins took it when they captured him. Sildar believes that Klarg sent the map and the dwarf to the chief of the Cragmaws at a place called Cragmaw Castle. Sildar doesn't know where that might be, but he suggests someone in Phandalin might know. (It doesn't occur to Sildar immediately, but a captured goblin might also be persuaded to divulge the castle's location. See the "What the Goblins Know" sidebar on page 8.)
Sildar's contact in Phandalin is a human wizard named Iarno Albrek. The wizard traveled to the town two months ago to establish order there. After the Lords' Alliance received no word from Iarno, Sildar decided to investigate.
Sildar tells the characters that he intends to continue on to Phandalin, since it's the nearest settlement. He offers to pay the party 50 gp to provide escort. Although he has no money on him, Sildar can secure a loan to pay the characters within a day after arriving in Phandalin. First, he hopes they'll put a stop to the goblin raids by clearing out the caves.
DEVELOPMENTS
If he is rescued and healed, Sildar Hallwinter remains with the party but is anxious to reach Phandalin as quickly as possible. He doesn't have any weapons or armor, but
he can take a shortsword from a defeated goblin or use a weapon loaned to him by a character.
If Sildar joins the party, see the "NPC Party Members" sidebar for tips on how to run him.
TREASURE
Yeemik carries a pouch containing three gold teeth (1 gp each) and 15 sp. Sildar's gear, along with Gundren Rockseeker, was taken to Cragmaw Castle.
NPC PARTY MEMBERS
An NPC might join the party, if only for a short time. Here are some tips to help you run an NPC party member:
+ Let the characters make the important decisions. They are the protagonists of the adventure. If the characters ask an N PC party member for advice or direction, remember that NPCs make mistakes too.
+ An NPC won't deliberately put himself or herself in harm's
way unless there's a good reason to do so.
An NPC won't treat all party members the same way, which can create some fun friction. As an N PC gets to know the characters, think about which characters the NPC likes most and which ones the NPC likes least, and let those likes and dislikes affect how the NPC interacts with the party members. In a combat encounter, keep the NPC's actions simple and straightforward. Also, look for things that the N PC can do besides fighting. For example, an NPC might stabilize a dying character, guard a prisoner, or help barricade a door.
+ If an NPC contributes greatly to the party's success in a
battle, the NPC should receive an equal share ofthe XP
earned for the encounter. (The characters receive less XP as a consequence.)
+ NPCs have their own lives and goals. Consequently, an NPC should remain with the party only as long as doing so makes sense for those goals.
""",
"next_steps": {
"cave_1": "if the party decides to leave the cave.",
}
},
{
"name": "flood",
"description": """
The large pools in area 7 have collapsible walls that can be yanked out of place to release a surge of water down the main passage of the lair.
In the round after the goblins in area 7 are signaled by the lookout in area 5, they start knocking away the supports.
In the following round on the goblins' initiative count, a water surge pours from area 7 down to area 1.
The passage is suddenly filled with a mighty roar, as a huge 1surge of rushing water pours down from above!
The flood threatens all creatures in the tunnel. (Creatures on the bridge at area 5 are out of danger, as are any characters successfully climbing the cavern walls.)
Any creature within 10 feet of the disused passage at area 4 or the steps leading up to area 3 can attempt a DC 10 Dexterity saving throw to avoid being swept away.
A creature that fails to get out of the way can attempt a DC 15 Strength saving throw to hold on.
On a failed save, the character is knocked prone and washed down to area 1, taking 1d6 bludgeoning damage along the way.
The goblins in area 7 can release a second flood by opening the second pool, but they don't do this unless the goblin on the bridge tells them to.
The goblin on the bridge waits to see if the first flood got rid of all the intruders before calling for the second to be released.
"""
},
{
"name": "cave_8",
"description": """
The leader of the goblinoids insists on keeping the bulk of the raiders' stolen goods in his den. The Cragmaws' plunder from the last month of raiding and ambushing caravans is here.
Sacks and crates of looted provisions are piled up in the south end of this large cave_ To the west, the floor slopes toward a narrow opening that descends into darkness. A larger opening leads north down a set of natural stone steps, the roar of falling water echoing from beyond. In the middle of the cavern, the coals ofa large fire smolder.
Klarg the bugbear shares this cave with his mangy pet wolf, Ripper, and two goblins. The bugbear is filled with delusions of grandeur and views himself as a mighty warlord just beginning his career of conquest. He is not entirely sane, referring to himself in the third person ("Who dares defy Klarg?" or "Klarg will build a throne
from your bones, puny
ones!"). The goblins under his command resent his bullying.
Fire Pit. The hot coals in the central fire pit deal 1 fire damage to any creature that enters the fire pit, or 1d6 fire damage to any creature that falls prone there. A creature can take each type of damage only once per round.
Natural Chimney. A niche
in the western wall forms the top of a shaft that descends 30 feet to area 3. See that area for information on climbing the natural chimney.
Supplies. The piles of sacks and crates can provide half cover to any creature fighting or hiding behind them. Most are marked with the image of a blue lion--the symbol of the Lionshield Coster, a merchant company with a warehouse
and trading post in Phandalin. Hidden among the supplies is an unlocked treasure chest
belonging to Klarg (see the "Treasure" section). Any character who searches the supplies finds the chest.
DEVELOPMENTS
If Klarg is warned by the goblins in area 7 that the hideout is under attack, he and his wolf hide behind stalagmites while the goblins take cover behind the piles of supplies, hoping to ambush the characters when they enter the cave.
If Klarg and company are not warned about possible attackers, the characters have a good chance to surprise them. The easiest way for the characters to achieve this
is to climb the chimney from area 3, since Klarg does not expect an attack from that direction.
If the wolf is killed, the bugbear attempts to climb down the chimney to area 3 and flee the cave complex.
TREASURE
The captured stores are bulky, and the characters will need a wagon to transport them. If they return the supplies to the Lionshield Coster in Phandalin (see part 2, "Phandalin''), they earn a reward of 50 gp and the friendship of Linene and her company.
In addition to the stolen provisions, Klarg has a treasure chest that contains 600 cp, 110 sp, two potions of healing, and ajade statuette of a frog with tiny golden orbs for eyes (40 gp). The frog statuette is small enough to fit in a pocket or pouch.
""",
"next_steps": {
"cave_7": "if the party heads north, towards the waterfall",
"cave_3": "if the party goes through the fissure",
}
}
]
from modularity import OpenAI
import traceback
client = OpenAI()
class Convo:
def __init__(self, story_name):
self.story_name = story_name
self.story_dir = Path(f"stories/{story_name}")
self.story_dir.mkdir(exist_ok=True, parents=True)
self.story_part_name = self.get_txt("story_part")
if self.story_part_name is None:
self.story_part_name = 'start'
self.set_txt("story_part", self.story_part_name)
print("Continuing from...", self.story_part_name)
dialogue = self.get_txt("dialogue")
if dialogue is None:
self.M = []
else:
self.M = [{"role": "user", "content": 'PREVIOUS DIALOGUE:\n' + dialogue[-1500:] + "..."}]
self.briefly_summarize()
self.summary = []
self.type_modifiers = {
'strength': 2,
'dexterity': 1,
'constitution': 0,
'intelligence': -1,
'wisdom': -2,
'charisma': -3,
}
@property
def story_part(self):
return [x for x in storyline if x['name'] == self.story_part_name][0]
def briefly_summarize(self):
self.computersay(f"(summarizing from last time...) " + self.summarize_plotline("Explain this to the player, bringing them up to speed on what just happened. Hopefully just one sentence will suffice."))
def get_txt(self, name):
story_part_file = self.story_dir / f"{name}.txt"
if story_part_file.exists():
return story_part_file.read_text().strip()
else:
return None
def set_txt(self, name, content):
f = self.story_dir / f"{name}.txt"
f.write_text(content)
def add_txt(self, name, content):
f = self.story_dir / f"{name}.txt"
if not f.exists():
f.write_text(content)
else:
f.write_text(f.read_text() + "\n" + content)
# ------------------
# Summarizing is probably useful!
# ------------------
def summarize_character(self):
message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in self.M])
prompt = f"""
Your goal is to extract full character sheets for the players involved in this adventure.
Messages:
{message_text}
"""
print(prompt)
messages = [
{"role": "system", "content": prompt},
]
response = get_response(messages, model=model)
return response
def summarize_plotline(self, prompt=None):
message_text = "\n".join([f"+ {x['role']}: {x['content']}" for x in self.M])
if prompt is None:
prompt = f"""
Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.
In each plot point, be as specific as possible.
Keep note of any characters, locations, or items that are mentioned.
Do not include any information not present in the following messages!
Please be extremely concise.
"""
prompt += f"""
Messages:
{message_text}
"""
#print(prompt)
messages = [
{"role": "system", "content": prompt},
]
response = get_response(messages, model=model)
#print('Summarized!')
#print(response)
return response
def inventory(self, action, object):
self.add_txt("inventory", f"{action}: {object}")
return f"Inventory {action}: {object}"
def change_scene(self, to):
self.story_part_name = to
self.set_txt("story_part", self.story_part_name)
return "Changed scene to " + to
def roll_hit_dice(self, n_sides, n_dice, kind=None, **kwargs):
import random
result = [ random.randint(1, n_sides) for i in range(n_dice) ]
result = result_og = sum(result)
mod = 0
if kind is not None and kind in self.type_modifiers:
mod += self.type_modifiers[kind]
result += mod
return f"Rolled {n_dice}d{n_sides} (kind={kind}) {result_og} + {mod} = {result}"
# ------------------
# SAYING STUFF
# ------------------
def humansay(self, content):
self.M.append({"role": "user", "content": content})
self.add_txt("dialogue", f"Player:\n{content}")
def computersay(self, content):
self.M.append({"role": "assistant", "content": content})
self.add_txt("dialogue", f"DM:\n{content}")
print("DM:", content)
def computersay_self(self, content):
self.M.append({"role": "system", "content": content})
self.add_txt("dialogue", f"DM (to themselves):\n{content}")
def _format_inventory(self):
inventory = self.get_txt("inventory")
if inventory is None:
if self.story_part_name == 'start':
self.inventory("add", "10 gold pieces")
self.inventory("add", "a backpack")
self.inventory("add", "a bedroll")
self.inventory("add", "a mess kit")
self.inventory("add", "a tinderbox")
self.inventory("add", "10 torches")
self.inventory("add", "10 days of rations")
self.inventory("add", "a waterskin")
self.inventory("add", "50 feet of hempen rope")
return self._format_inventory()
else:
inventory = "The player has nothing."
return inventory
# ------------------
# Thinking, Acting, and Responding
# ------------------
def think(self):
prompt = f"""
You are an assistant to the DM.
Speak directly to the DM (not the player).
Give some thoughts or ideas to the DM to help them conduct their duties.
If you think everything is clear, type 'pass'.
Be concise, specific, and clear.
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you think to yourself? Be brief."},
]
response = get_response(messages, model=model)
self.computersay_self("(thinking...) " + response)
def act(self):
story_part = self.story_part
next_steps = "\n".join([f"\t{x}: {y}" for x, y in story_part['next_steps'].items()])
inventory = self._format_inventory()
prompt = f"""
Your current inventory is:
{inventory}
Based on the dialogue so far, you are to decide what actions to take next.
Most of the time no action will need to be taken. In this case, simply type "pass".
Please do not act in a way not directly implied by the dialogue so far.
Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.
If you want to change the scene, type:
{{"type": "change_scene", "to": "scene name"}}
To roll hit dice, type:
{{"type": "roll_hit_dice", "n_dice": 1, "n_sides": 6, "kind": "strength"}}
To add or remove from inventory, type:
{{"type: "inventory", "action": "add|remove", "object": "object name, description, and/or stats"}}
ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS
You can type a command on each line.
You CANNOT mix commands and statements.
Scenes available, their names and descriptions:
{next_steps}
"""
messages = [
{"role": "system", "content": prompt},
*self.M,
{"role": "system", "content": "What do you do? (type = change_scene, roll_hit_dice, or inventory). Use only JSON strings, one per line. If no action need be taken from the most recent message, simply type 'pass'."},
]
response = get_response(messages, model=model)
if response.strip() == "pass":
return
parts = response.split("}")
for part in parts:
if part == "":
continue
part += "}"
try:
part = json.loads(part)
self.act_on(part)
except json.decoder.JSONDecodeError:
print("Invalid JSON:", part)
def act_on(self, action):
print("Executing... ", json.dumps(action, indent=2))
act = dict(action)
typ = action.pop('type')
try:
fn = getattr(self, typ)
response = fn(**action)
self.computersay_self(response)
except Exception as e:
# first get the last line of the traceback
tb = traceback.format_exc().splitlines()[-1]
# then get the last line of the error
error = str(e).splitlines()[-1]
self.computersay_self(f"Error in command '{json.dumps(act, indent=2)}': {error} ({tb})")
self.computersay_self("Please rewrite this one.")
self.act()
def respond(self):
story_part = self.story_part
my_messages = []
inventory = self._format_inventory()
prompt = f"""
You are a DM.
Speak directly to the player.
You should not reveal information about the scene they would not otherwise know.
Usually they can access otherwise unseen information if they roll a perception, history, investigation, nature, or insight check.
Be relatively brief in your responses.
You are currently in the scene "{story_part['name']}".
Your current inventory is:
{inventory}
The message you type will be sent to the player from the DM.
Description of the current scene:
{story_part['description']}
"""
my_messages.append({'role': 'system', 'content': prompt})
response = get_response(my_messages + self.M, model=model)
self.computersay(response)
# consolidate things, if it's getting long
if len(self.M) > 10:
# remember a summary of the messages
self.summary.append(self.summarize_plotline())
# (mostly) clear the messages
self.M = self.M[-2:]
# ------------------
# Running the Conversation
# ------------------
def run(self):
# human does its thing
query = input(">> ")
self.humansay(query)
# computer does its thing
self.act()
self.think()
self.respond()
self.act()
def loop(self):
while True:
self.run()
c = Convo(adventure_name)
c.loop() | [
"\n You are an assistant to the DM.\n Speak directly to the DM (not the player).\n Give some thoughts or ideas to the DM to help them conduct their duties.\n If you think everything is clear, type 'pass'.\n Be concise, specific, and clear.\n ",
"PREVIOUS DIALOGUE:\nPLACEHOLDER...",
"\n Your current inventory is:\n PLACEHOLDER\n \n Based on the dialogue so far, you are to decide what actions to take next.\n Most of the time no action will need to be taken. In this case, simply type \"pass\".\n Please do not act in a way not directly implied by the dialogue so far.\n Although there is no rush to change the 'scene', you must eventually do so, in order to continue the story.\n\n If you want to change the scene, type:\n {\"type\": \"change_scene\", \"to\": \"scene name\"}\n\n To roll hit dice, type:\n {\"type\": \"roll_hit_dice\", \"n_dice\": 1, \"n_sides\": 6, \"kind\": \"strength\"}\n\n To add or remove from inventory, type:\n {\"type: \"inventory\", \"action\": \"add|remove\", \"object\": \"object name, description, and/or stats\"}\n\n ALWAYS USE DOUBLE QUOTES FOR JSON STRINGS\n\n You can type a command on each line.\n You CANNOT mix commands and statements.\n\n Scenes available, their names and descriptions:\n PLACEHOLDER\n ",
"\n Your goal is to summarize the plotpoints contained in the following conversation between a DM and a player.\n In each plot point, be as specific as possible.\n Keep note of any characters, locations, or items that are mentioned.\n Do not include any information not present in the following messages!\n Please be extremely concise.\n ",
"\n Your goal is to extract full character sheets for the players involved in this adventure.\n\n Messages:\n PLACEHOLDER\n ",
"What do you think to yourself? Be brief.",
"\n\n Messages:\n PLACEHOLDER\n ",
"\n You are a DM.\n Speak directly to the player.\n You should not reveal information about the scene they would not otherwise know.\n Usually they can access otherwise unseen information if they roll a perception, history, investigation, nature, or insight check.\n Be relatively brief in your responses.\n\n You are currently in the scene \"PLACEHOLDER\".\n\n Your current inventory is:\n PLACEHOLDER\n\n The message you type will be sent to the player from the DM.\n\n Description of the current scene:\n PLACEHOLDER\n "
] |
2024-01-10 | janhavidoshi/crisp-journey | crisp~sentiment_analysis.py | from sqlalchemy import create_engine
import sqlite3
import pandas as pd
from transformers import BertTokenizer, BertForSequenceClassification
from torch.nn.functional import softmax
import torch
# Load pre-trained model and tokenizer
model = BertForSequenceClassification.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment')
tokenizer = BertTokenizer.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment')
# Connect to the SQLite database
conn = sqlite3.connect('data/stocks.db')
# Add a sentiment column to the stock_news table if it does not already exist
conn.execute("ALTER TABLE stock_news ADD COLUMN sentiment INTEGER")
# Define a query to get the news headlines
query = """
SELECT datetime, Stock, summary
FROM stock_news;
"""
# Read the data into a pandas DataFrame
df = pd.read_sql_query(query, conn)
# Define a function to get the sentiment of a text
def get_sentiment(text):
if not text:
return None
inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=512)
outputs = model(**inputs)
probs = softmax(outputs.logits, dim=-1)
sentiment = torch.argmax(probs, dim=-1).item()
return sentiment
# Get the sentiment for each news headline
df['sentiment'] = df['summary'].apply(get_sentiment)
# Update the stock_news table with the sentiment scores
for index, row in df.iterrows():
conn.execute("UPDATE stock_news SET sentiment = ? WHERE datetime = ? AND Stock = ?", (row['sentiment'], row['datetime'], row['Stock']))
# Commit the changes and close the connection
conn.commit()
conn.close()
print('Sentiment analysis completed and updated in the stock_news table!')
# from sqlalchemy import create_engine
# import sqlite3
# import pandas as pd
# from transformers import BertTokenizer, BertForSequenceClassification
# from torch.nn.functional import softmax
# import torch
#
# # Load pre-trained model and tokenizer
# model = BertForSequenceClassification.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment')
# tokenizer = BertTokenizer.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment')
#
# # Connect to the SQLite database
# conn = sqlite3.connect('data/stocks.db')
#
# # Define a query to get the news headlines
# query = """
# SELECT datetime, Stock, summary
# FROM stock_news
# WHERE datetime BETWEEN '2023-08-01' AND '2023-08-31';
# """
#
# # Read the data into a pandas DataFrame
# df = pd.read_sql_query(query, conn)
#
# # Close the connection
# conn.close()
#
# # Define a function to get the sentiment of a text
# def get_sentiment(text):
# inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=512)
# outputs = model(**inputs)
# probs = softmax(outputs.logits, dim=-1)
# sentiment = torch.argmax(probs, dim=-1).item()
# return sentiment
#
# # Get the sentiment for each news headline
# df['sentiment'] = df['headline'].apply(get_sentiment)
#
# # Store the results in a new SQLite table
# engine = create_engine('sqlite:///data/stocks.db')
# df.to_sql('stock_news_sentiment', engine, index=False, if_exists='replace')
# print('Sentiment analysis completed and stored in the SQLite database!')
# import openai
# import sqlite3
# import pandas as pd
# from sqlalchemy import create_engine
# # OpenAI API key
# openai.api_key = "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"
#
# # Connect to the SQLite database
# conn = sqlite3.connect('data/stocks.db')
#
# # Define a query to get the news headlines
# query = """
# SELECT datetime, Stock, headline
# FROM stock_news
# WHERE datetime BETWEEN '2023-08-01' AND '2023-08-31'
# AND Stock = "AAPL";
# """
#
# # Read the data into a pandas DataFrame
# df = pd.read_sql_query(query, conn)
#
# # Close the connection
# conn.close()
#
# # Define a function to get the sentiment of a text
# def get_sentiment(text):
# response = openai.Completion.create(
# engine="text-davinci-002",
# prompt=f"The sentiment of the following text is: {text}",
# max_tokens=5,
# )
# sentiment = response.choices[0].text.strip()
# return sentiment
#
# # Get the sentiment for each news headline
# df['sentiment'] = df['headline'].apply(get_sentiment)
#
# # Store the results in a new SQLite table
# engine = create_engine('sqlite:///data/stocks.db')
# df.to_sql('stock_news_sentiment', engine, index=False, if_exists='replace')
#
# print('Sentiment analysis completed and stored in the SQLite database!')
| [] |
2024-01-10 | vigneshwerv/Deep-Reinforcement-Learning-in-Video-Games | environments~AtariEnvironment.py | from BaseEnvironment import BaseEnvironment
import cv2
import gym
import numpy as np
class AtariEnvironment(BaseEnvironment):
"""Wrapper class for the OpenAI Gym Atari Environment."""
def __init__(self, render=False, **kwargs):
self.environment = gym.make('Pong-v0')
args = kwargs.get('args')
self.width = 84
self.height = 84
self.possible_actions = self.environment.action_space
if (type(self.possible_actions) != type(gym.spaces.discrete.Discrete(0))):
raise AssertionError("The sample action space does not consist of Discrete actions.")
self.previous_observation = None
self.recent_observation = None
self.total_score = 0
self.num_games = 0
self.should_render = args.test
self.done = False
self.reset()
def getReward(self):
return self.recent_reward
def getObservation(self):
return self.recent_observation
def getActionPerformed(self):
return self.recent_action
def resetStatistics(self):
self.total_score = 0
self.num_games = 0
self.reset()
def getStatistics(self):
return self.total_score, self.num_games
def performAction(self, action):
self.recent_action = action
observation, reward, done, _ = self.environment.step(self.recent_action)
self.done = done
self.recent_observation = self._preprocess_observation_(observation)
self.recent_reward = reward
self.total_score += self.recent_reward
if self.should_render:
self.environment.render()
return True
def isTerminalState(self):
return self.done
def getPossibleActions(self):
return self.possible_actions.n
def sampleRandomAction(self):
return self.possible_actions.sample()
def _preprocess_observation_(self, observation):
"""This method is to preprocess observation images.
The RGB images from OpenAI are converted CMYK images and the luminance (Y)
channel is extracted, downsampled to a width and height of 84x84 using
Anti-aliasing, and returned.
"""
return cv2.resize(cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY), (self.width, self.height))
def reset(self):
self.recent_observation = self._preprocess_observation_(self.environment.reset())
self.num_games += 1
self.done = False
if self.should_render:
self.environment.render()
| [] |
2024-01-10 | amitgupta4407/All_About_PDF | FileQueryHub.py | import os
import streamlit as st
import pandas as pd
from docx import Document
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def process_multiple_files(files):
combined_text = ""
for uploaded_file in files:
file_extension = os.path.splitext(uploaded_file.name)[1].lower()
if file_extension == ".pdf":
pdf_reader = PdfReader(uploaded_file)
for page in pdf_reader.pages:
combined_text += page.extract_text()
elif file_extension == ".txt":
combined_text += uploaded_file.read().decode("utf-8")
elif file_extension == ".xlsx":
excel_data = pd.read_excel(uploaded_file)
combined_text += excel_data.to_string()
elif file_extension == ".sql":
combined_text += uploaded_file.read().decode("utf-8")
elif file_extension == ".docx":
doc = Document(uploaded_file)
for paragraph in doc.paragraphs:
combined_text += paragraph.text + "\n"
elif file_extension == ".csv":
csv_data = pd.read_csv(uploaded_file)
combined_text += csv_data.to_string()
# Add more file type handling here as needed
else:
st.warning(f"Unsupported file type: {file_extension}. Skipping.")
# print(combined_text)
return combined_text
def main():
st.set_page_config(page_title="FileQueryHub", page_icon="📄")
st.header("FileQueryHub 📂🤖")
files = st.file_uploader(
"Upload multiple files",
type=["pdf", "txt", "xlsx", "sql", "docx", "csv"],
accept_multiple_files=True
)
if files:
combined_text = process_multiple_files(files)
# with st.expander("See explanation"):
# st.write(combined_text)
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type="password")
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(combined_text)
# st.write(chunks)
# creating embeddings
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
with st.spinner("Creating Knowledge Base..."):
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.success("Knowledge Base created")
st.write("Chat with Multiple Files 🗣️📚")
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
print(user_question)
if user_question:
with st.spinner("Searching for answers..."):
docs = knowledge_base.similarity_search(user_question)
with st.expander("See docs"):
st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
if __name__ == "__main__":
main()
| [] |
2024-01-10 | vmueller71/amazon-aiml-genai-streamlit-example | bedrock_util.py | import boto3
import botocore
import constants as const
from langchain.llms.bedrock import Bedrock
region = const.region_name
url = const.bedrock_ep_url
inference_modifier_titan = {
"maxTokenCount": 4096,
"stopSequences": [],
"temperature":0.1,
"topP":0.9
}
inference_modifier_claude = {
'max_tokens_to_sample':4096,
"temperature":0.5,
"top_k":250,
"top_p":1,
"stop_sequences": ["\n\nHuman"]
}
def bedrock_test():
bedrock= boto3.client(service_name='bedrock', region_name=region, endpoint_url=url)
output_text = bedrock.list_foundation_models()
return output_text
def get_genAI_llm():
session = boto3.Session()
bedrock= session.client(service_name='bedrock', region_name=region, endpoint_url=url)
genAI_llm = Bedrock(model_id = "amazon.titan-tg1-large",
client = bedrock,
model_kwargs = inference_modifier_titan)
return genAI_llm | [] |
2024-01-10 | yonjovi/trance_scrubber | trance_scriber.py | import whisper
import streamlit as st
import streamlit_ext as ste
import boto3
import pytube
import openai
import time
s3 = boto3.client('s3',
region_name=st.secrets['region_name'],
aws_access_key_id=st.secrets['aws_access_key_id'],
aws_secret_access_key=st.secrets['aws_secret_access_key'])
formatted_result_app = None
p_url = None
yt_video = None
# with st.sidebar:
# st.subheader("Open AI API Key (Optional)")
# OPEN_AI_API_KEY = st.text_input("Enter your Open AI API Key here:", type="password")
def upload_audio_to_s3(file, bucket, s3_file):
try:
s3.upload_fileobj(file, bucket, s3_file)
return True
except FileNotFoundError:
time.sleep(9)
st.error('File not found.')
return False
def summarise(text_input, yt_title):
try:
openai.api_key = OPEN_AI_API_KEY
yt_title = yt_title
response = openai.Completion.create(
model='text-davinci-002',
prompt=f'Summarize this transcribed text from the YouTube video titled "{yt_title}" in dot points:\n\n{text_input}',
temperature=0.7,
max_tokens=4096,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
except:
return st.warning("Please enter Open AI API Key in the sidebar", icon="↖️")
st.header('Transcribe this!')
st.write('Upload an audio file in any language and let me transcribe it for you 🥳')
st.write('')
transcribe_choice = st.radio(
'Upload your audio or choose a Youtube video to transcribe:',
key='choices',
options=['Upload audio file', 'Transcribe Youtube video']
)
st.write(transcribe_choice)
with st.form('silent-whisper', clear_on_submit=False):
if transcribe_choice == 'Upload audio file':
uploaded_file = st.file_uploader('Choose an audio file', accept_multiple_files=False, type=['wav', 'aif', 'mp3',
'aiff', 'flac',
'aac', 'mp4',
'wma', 'ogg'])
submitted = st.form_submit_button('TRANSCRIBE!')
if uploaded_file is not None:
try:
with st.spinner('Getting ready...'):
upload_audio_to_s3(uploaded_file, st.secrets['bucket_name'], uploaded_file.name)
p_url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': st.secrets['bucket_name'], 'Key': uploaded_file.name},
ExpiresIn=1800
)
if p_url is not None:
st.success('Let\'s go!', icon='🕺')
with st.spinner('Transcribing...'):
model = whisper.load_model('base')
result = model.transcribe(p_url, fp16=False)
result_app = result['text']
if result_app == '':
st.warning('I have no words...(to transcribe)', icon='🤖')
else:
formatted_result_app = f'Transcribed text from "{uploaded_file.name}": \n\n{result_app}'
st.subheader(f'Transcribed text from "{uploaded_file.name}":')
st.write(result_app)
st.write('')
st.write('')
except RuntimeError:
st.warning('Please upload an audio file or try again!', icon='🧐')
elif transcribe_choice == 'Transcribe Youtube video':
try:
yt_link = st.text_input("Enter a Youtube link:")
submitted = st.form_submit_button('TRANSCRIBE!')
if submitted:
with st.spinner('Fetching video'):
yt_video = pytube.YouTube(yt_link)
yt_video_filter = yt_video.streams.filter(file_extension='mp4')
yt_stream = yt_video.streams.get_by_itag(139)
yt_filename = f'{yt_video.title}.mp4'
yt_stream_dl = yt_stream.download('', yt_filename)
# time.sleep(1)
with st.spinner('Transcribing...'):
with open(yt_stream_dl, 'rb') as f:
s3.upload_fileobj(f, st.secrets['bucket_name'], yt_filename)
p_url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': st.secrets['bucket_name'], 'Key': yt_filename},
ExpiresIn=1800
)
model = whisper.load_model('base')
result = model.transcribe(p_url, fp16=False)
result_app = result['text']
if result_app == '':
st.warning('I have no words...(to transcribe)', icon='🤖')
else:
formatted_result_app = f'Transcribed text from "{yt_video.title}": \n\n{result_app}'
st.subheader(f'Transcribed text from "{yt_video.title}":')
st.caption("results below the video")
st.video(str(yt_link))
st.write(result_app)
st.write('')
st.write('')
except:
st.error('Hmm that doesn\'t look like a Youtube link to me...🙄🙄🙄 Try again perhaps? 🤷')
if formatted_result_app is not None and yt_video is not None:
ste.download_button('Download', formatted_result_app,
f'{yt_video.title} transcribed.txt')
# sum_or_not = st.radio(
# "Would you like to summarise the text using Open AI? (Open AI API Key required)",
# ("Yes", "No")
# )
# if sum_or_not == "Yes":
# try:
# st.subheader(f'Summary of text transcribed from "{yt_video.title}":')
# with st.spinner('Summarising...🌞🌞🌞'):
# summary = summarise(text_input=result_app, yt_title=yt_video.title)
# st.write(summary)
# except:
# st.info("Open AI API fees are not covered by this app unfortunately", icon="ℹ️")
elif formatted_result_app is not None and uploaded_file is not None:
ste.download_button('Download', formatted_result_app,
f'{uploaded_file.name} transcribed.txt')
# sum_or_not = st.radio(
# "Would you like to summarise the text using Open AI? (Open AI API Key required)",
# ("Yes", "No")
# )
# if sum_or_not == "Yes":
# try:
# st.subheader(f'Summary of text transcribed from "{uploaded_file.name}":')
# with st.spinner('Summarising...🌞🌞🌞'):
# summary = summarise(text_input=result_app, yt_title=yt_video.title)
# st.write(summary)
# except:
# st.info("Open AI API fees are not covered by this app unfortunately", icon="ℹ️")
#
| [
"Summarize this transcribed text from the YouTube video titled \"PLACEHOLDER\" in dot points:\n\nPLACEHOLDER"
] |
2024-01-10 | rachittshah/promonitor | openai_logger.py | """Callback Handler that prints to std out."""
from typing import Any, Dict, List
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
MODEL_COST_PER_1K_TOKENS = {
# GPT-4 input
"gpt-4": 0.03,
"gpt-4-0314": 0.03,
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
"gpt-4-32k-0314": 0.06,
"gpt-4-32k-0613": 0.06,
# GPT-4 output
"gpt-4-completion": 0.06,
"gpt-4-0314-completion": 0.06,
"gpt-4-0613-completion": 0.06,
"gpt-4-32k-completion": 0.12,
"gpt-4-32k-0314-completion": 0.12,
"gpt-4-32k-0613-completion": 0.12,
# GPT-3.5 input
"gpt-3.5-turbo": 0.0015,
"gpt-3.5-turbo-0301": 0.0015,
"gpt-3.5-turbo-0613": 0.0015,
"gpt-3.5-turbo-16k": 0.003,
"gpt-3.5-turbo-16k-0613": 0.003,
# GPT-3.5 output
"gpt-3.5-turbo-completion": 0.002,
"gpt-3.5-turbo-0301-completion": 0.002,
"gpt-3.5-turbo-0613-completion": 0.002,
"gpt-3.5-turbo-16k-completion": 0.004,
"gpt-3.5-turbo-16k-0613-completion": 0.004,
# Azure GPT-35 input
"gpt-35-turbo": 0.0015, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0301": 0.0015, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613": 0.0015,
"gpt-35-turbo-16k": 0.003,
"gpt-35-turbo-16k-0613": 0.003,
# Azure GPT-35 output
"gpt-35-turbo-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0301-completion": 0.002, # Azure OpenAI version of ChatGPT
"gpt-35-turbo-0613-completion": 0.002,
"gpt-35-turbo-16k-completion": 0.004,
"gpt-35-turbo-16k-0613-completion": 0.004,
# Others
"text-ada-001": 0.0004,
"ada": 0.0004,
"text-babbage-001": 0.0005,
"babbage": 0.0005,
"text-curie-001": 0.002,
"curie": 0.002,
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
"ada-finetuned": 0.0016,
"babbage-finetuned": 0.0024,
"curie-finetuned": 0.012,
"davinci-finetuned": 0.12,
}
def standardize_model_name(
model_name: str,
is_completion: bool = False,
) -> str:
"""
Standardize the model name to a format that can be used in the OpenAI API.
Args:
model_name: Model name to standardize.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Standardized model name.
"""
model_name = model_name.lower()
if "ft-" in model_name:
return model_name.split(":")[0] + "-finetuned"
elif is_completion and (
model_name.startswith("gpt-4")
or model_name.startswith("gpt-3.5")
or model_name.startswith("gpt-35")
):
return model_name + "-completion"
else:
return model_name
def get_openai_token_cost_for_model(
model_name: str, num_tokens: int, is_completion: bool = False
) -> float:
"""
Get the cost in USD for a given model and number of tokens.
Args:
model_name: Name of the model
num_tokens: Number of tokens.
is_completion: Whether the model is used for completion or not.
Defaults to False.
Returns:
Cost in USD.
"""
model_name = standardize_model_name(model_name, is_completion=is_completion)
if model_name not in MODEL_COST_PER_1K_TOKENS:
raise ValueError(
f"Unknown model: {model_name}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys())
)
return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000) | [] |
2024-01-10 | mlc-ai/mlc-llm | examples~rest~python~sample_langchain.py | from langchain.chat_models import ChatOpenAI
from langchain import LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.document_loaders import TextLoader, UnstructuredRSTLoader, DirectoryLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
# Note that Langchain support for embedding documents using MLC is currently blocked on
# https://github.com/langchain-ai/langchain/pull/7815
# We have subclassed `OpenAIEmbeddings` in the meantime to get around this dependency.
from mlc_chat.embeddings.openai import MLCEmbeddings
# First set the following in your environment:
# export OPENAI_API_BASE=http://127.0.0.1:8000/v1
# export OPENAI_API_KEY=EMPTY
# Note that Langchain does not currently support Pydantic v2:
# https://github.com/langchain-ai/langchain/issues/6841
# Please ensure that your `pydantic` version is < 2.0
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def llm_chain_example():
template = """
{history}
USER: {human_input}
ASSISTANT:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
llm_chain = LLMChain(
llm=ChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()]),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(human_prefix="USER", ai_prefix="ASSISTANT")
)
output = llm_chain.predict(human_input="Write a short poem about Pittsburgh.")
output = llm_chain.predict(human_input="What does the poem mean?")
def load_qa_chain_example():
loader = TextLoader('../resources/linux.txt')
documents = loader.load()
chain = load_qa_chain(llm=OpenAI(), chain_type="stuff", verbose=False)
query = "When was Linux released?"
print(f"{color.BOLD}Query:{color.END} {color.BLUE} {query}{color.END}")
print(f"{color.BOLD}Response:{color.END} {color.GREEN}{chain.run(input_documents=documents, question=query)}{color.END}")
def retrieval_qa_sotu_example():
prompt_template = """Use only the following pieces of context to answer the question at the end. Don't use any other knowledge.
{context}
USER: {question}
ASSISTANT:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
loader = TextLoader('../resources/state_of_the_union.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# print(texts)
embeddings = MLCEmbeddings(deployment="text-embedding-ada-002", embedding_ctx_length=None)
db = Chroma.from_documents(documents=texts, embedding=embeddings)
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k":2})
qa = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": PROMPT}
)
questions = [
"What is the American Rescue Plan?",
"What did the president say about Ketanji Brown Jackson?",
"Who is mentioned in the speech?",
"To whom is the speech addressed?",
"Tell me more about the Made in America campaign."
]
for qn in questions:
print(f"{color.BOLD}QUESTION:{color.END} {qn}")
res = qa({'query': qn})
print(f"{color.BOLD}RESPONSE:{color.END} {color.GREEN}{res['result']}{color.END}")
print(f"{color.BOLD}SOURCE:{color.END} {color.BLUE}{repr(res['source_documents'][0].page_content)}{color.END}")
print()
def retrieval_qa_mlc_docs_example():
prompt_template = """Use only the following pieces of context to answer the question at the end. Don't use any other knowledge.
{context}
USER: {question}
ASSISTANT:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
loader = DirectoryLoader("../../../docs", glob='*/*.rst', show_progress=True, loader_cls=UnstructuredRSTLoader, loader_kwargs={"mode": "single"})
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embeddings = MLCEmbeddings(deployment="text-embedding-ada-002", embedding_ctx_length=None)
db = Chroma.from_documents(collection_name="abc", documents=texts, embedding=embeddings)
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k":3})
qa = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": PROMPT}
)
while True:
qn = input(f"{color.BOLD}QUESTION:{color.END} ")
res = qa({'query': qn})
print(f"{color.BOLD}RESPONSE:{color.END} {color.GREEN}{res['result']}{color.END}")
print(f"{color.BOLD}SOURCE:{color.END} {color.BLUE}{repr(res['source_documents'][0].page_content)}{color.END}")
print()
# Some example questions:
# - What is the chat config?
# - What is temperature?
# - What are the REST API endpoints?
# - What are the available quantization options?
# Uncomment one of the following lines to try out the corresponding demo:
# llm_chain_example()
# load_qa_chain_example()
# retrieval_qa_sotu_example()
# retrieval_qa_mlc_docs_example()
| [
"\n {history}\n USER: {human_input}\n ASSISTANT:",
"question",
"Use only the following pieces of context to answer the question at the end. Don't use any other knowledge.\n\n {context}\n\n USER: {question}\n ASSISTANT:",
"human_input",
"context"
] |
2024-01-10 | mlc-ai/mlc-llm | examples~rest~python~sample_openai.py | import openai
openai.api_key = "None"
openai.api_base = "http://127.0.0.1:8000/v1"
model = "vicuna-v1-7b"
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Chat completion example without streaming
print(f"{color.BOLD}OpenAI chat completion example without streaming:{color.END}\n")
completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": "Write a poem about OpenAI"}]
)
print(f"{color.GREEN}{completion.choices[0].message.content}{color.END}\n\n")
# Chat completion example with streaming
print(f"{color.BOLD}OpenAI chat completion example with streaming:{color.END}\n")
res = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": "Write a poem about OpenAI"}],
stream=True
)
for chunk in res:
content = chunk["choices"][0]["delta"].get("content", "")
print(f"{color.GREEN}{content}{color.END}", end="", flush=True)
print("\n")
# Completion example
print(f"{color.BOLD}OpenAI completion example:{color.END}\n")
res = openai.Completion.create(prompt="Write a poem about OpenAI", model=model)
print(f"{color.GREEN}{res.choices[0].text}{color.END}\n\n")
| [
"Write a poem about OpenAI"
] |
2024-01-10 | TRohit20/Video-Summariser-App | backend~models~summariser.py | # Set your OpenAI API key
import openai
openai.api_key = "Use-your-own-API-Key"
# Function to generate a summary of the transcript
def generate_summary(transcript: str) -> str:
instructPrompt = """
Given below is the transcript of a video. Please provide a concise yet comprehensive summary that captures the main points, key discussions, and any notable insights or takeaways.
How to perform this task:
First, break the transcript into logical sections based on topic or theme. Then, generate a concise summary for each section. Finally, combine these section summaries into an overarching summary of the entire video. The combined summary is what you should return back to me.
Things to focus on and include in your final summary:
- Ensure to extract the key insights, theories, steps, revelations, opinions, etc discussed in the video. Ensure that the summary provides a clear roadmap for listeners who want to implement the advice or insights(if any) shared.
- Identify any controversial or heavily debated points in the video. Summarize the various perspectives presented, ensuring a balanced representation of the video or points in the video.
- Along with a content summary, describe the overall mood or tone of the video. Were there moments of tension, humor, or any other notable ambiance details?
Here is the video transcript:
"""
request = instructPrompt + transcript
resp = openai.ChatCompletion.create(
model="gpt-4-32k",
messages=[
{"role": "system", "content": "You are a helpful AI assistant that specialises in summarizing data."},
{"role": "user", "content": request},
]
)
return resp['choices'][0]['message']['content'] | [
"\n Given below is the transcript of a video. Please provide a concise yet comprehensive summary that captures the main points, key discussions, and any notable insights or takeaways.\n\n How to perform this task:\n First, break the transcript into logical sections based on topic or theme. Then, generate a concise summary for each section. Finally, combine these section summaries into an overarching summary of the entire video. The combined summary is what you should return back to me.\n Things to focus on and include in your final summary:\n - Ensure to extract the key insights, theories, steps, revelations, opinions, etc discussed in the video. Ensure that the summary provides a clear roadmap for listeners who want to implement the advice or insights(if any) shared.\n - Identify any controversial or heavily debated points in the video. Summarize the various perspectives presented, ensuring a balanced representation of the video or points in the video.\n - Along with a content summary, describe the overall mood or tone of the video. Were there moments of tension, humor, or any other notable ambiance details?\n \n Here is the video transcript:\n PLACEHOLDER",
"\n Given below is the transcript of a video. Please provide a concise yet comprehensive summary that captures the main points, key discussions, and any notable insights or takeaways.\n\n How to perform this task:\n First, break the transcript into logical sections based on topic or theme. Then, generate a concise summary for each section. Finally, combine these section summaries into an overarching summary of the entire video. The combined summary is what you should return back to me.\n Things to focus on and include in your final summary:\n - Ensure to extract the key insights, theories, steps, revelations, opinions, etc discussed in the video. Ensure that the summary provides a clear roadmap for listeners who want to implement the advice or insights(if any) shared.\n - Identify any controversial or heavily debated points in the video. Summarize the various perspectives presented, ensuring a balanced representation of the video or points in the video.\n - Along with a content summary, describe the overall mood or tone of the video. Were there moments of tension, humor, or any other notable ambiance details?\n \n Here is the video transcript:\n ",
"You are a helpful AI assistant that specialises in summarizing data."
] |
2024-01-10 | IslamAlam/PyPolSAR | pypolsar~polsar~processor.py | import re
from pathlib import Path
import numpy as np
from .. import io, polsar, utils
from ..io.project import stand_pol_rat_files
# from timer import Timer
from ..opertaions import coherence, phase_diff, power_ratio
from ..stats.timer import Timer
class StandardPolarimetric:
def __init__(
self,
DIR_campaign,
ID_campaign,
ID_flight,
band,
ID_Pass,
n_try="01",
save_path=None,
n_windows=(7, 7),
coh=None,
crop=None,
crop_az=None,
crop_rg=None,
*args,
**kwargs,
):
master_files = stand_pol_rat_files(
DIR_campaign, ID_campaign, ID_flight, band, ID_Pass, n_try="01"
)
pat = r"incidence_(.*?)_t.*"
patch_name = re.findall(pat, Path(master_files["AOI"]).name)[0]
with Timer(name="read_files"):
fscl = np.sqrt(np.sin(io.rat.loadrat(master_files["AOI"])))
if crop is None:
self.slc_hh = io.rat.loadrat(master_files["HH"]) * fscl
self.slc_hv = io.rat.loadrat(master_files["HV"]) * fscl
self.slc_vh = io.rat.loadrat(master_files["VH"]) * fscl
self.slc_vv = io.rat.loadrat(master_files["VV"]) * fscl
else:
self.slc_hh = (io.rat.loadrat(master_files["HH"]) * fscl)[
crop_rg[0] : crop_rg[1], crop_az[0] : crop_az[1]
]
self.slc_hv = (io.rat.loadrat(master_files["HV"]) * fscl)[
crop_rg[0] : crop_rg[1], crop_az[0] : crop_az[1]
]
self.slc_vh = (io.rat.loadrat(master_files["VH"]) * fscl)[
crop_rg[0] : crop_rg[1], crop_az[0] : crop_az[1]
]
self.slc_vv = (io.rat.loadrat(master_files["VV"]) * fscl)[
crop_rg[0] : crop_rg[1], crop_az[0] : crop_az[1]
]
print("Patch Name\t: {}".format(patch_name))
print(
"Range\t\t: {}\nAzimuth\t\t: {}".format(
self.slc_hh.shape[0], self.slc_hh.shape[1]
)
)
with Timer(name="scat_matrix"):
# Do something
scat_matrix = self.cal_scattering_matrix(
self.slc_hh, self.slc_hv, self.slc_vh, self.slc_vv
)
with Timer(name="slc_pauli"):
# Do something
self.slc_pauli = polsar.decomposition.operators.pauli_vec(
scat_matrix
)
with Timer(name="polarimetric_matrix_jit"):
# Do something
self.slc_t44 = polsar.decomposition.operators.polarimetric_matrix_jit(
self.slc_pauli
)
with Timer(name="t_pauli_t_mat"):
# Do something
self.pauli_t_mat = np.stack(
(
self.slc_t44[:, :, 0, 0],
self.slc_t44[:, :, 1, 1],
self.slc_t44[:, :, 2, 2],
),
axis=2,
)
with Timer(name="eigen_decomposition_jit"):
# Do something
(
self.slc_ew4,
self.slc_ev44,
) = polsar.decomposition.eigen.eigen_decomposition_jit(self.slc_t44)
with Timer(name="ent_ani_alp_nc"):
# Do something
self.slc_ent_ani_alp_44_nc = polsar.parameters.ent_ani_alp(
self.slc_ew4[:, :, 1:] - (self.slc_ew4[:, :, 0:1]),
self.slc_ev44[:, :, 0:1, 1:4],
)
if coh is not None:
with Timer(name="coh"):
self.coh_hhvv = coherence(
self.slc_hh, self.slc_vv, window_size=7
)
self.coh_snr = coherence(
self.slc_hv, self.slc_vh, window_size=7
)
self.coh_hhhv = coherence(
self.slc_hh, self.slc_hv, window_size=7
)
self.coh_vvvh = coherence(
self.slc_vv, self.slc_hv, window_size=7
)
self.coh_hhxx = coherence(
self.slc_hh, (self.slc_vh + self.slc_vh) / 2, window_size=7
)
self.coh_vvxx = coherence(
self.slc_vv, (self.slc_vh + self.slc_vh) / 2, window_size=7
)
with Timer(name="power_phase_diff"):
# HH-VV Phasedifference
self.ph_diff_hhvv = phase_diff(
self.slc_hh, self.slc_vv, window_size=7, deg=True
)
self.p_hhvv = power_ratio(self.slc_hh, self.slc_vv, window_size=7)
with Timer(name="write_h5f"):
# Do something
import h5py
if save_path is not None:
Path(save_path).mkdir(parents=True, exist_ok=True)
h5_filename = Path.joinpath(
Path(save_path), str(patch_name + "_processed.h5")
)
h5f = h5py.File(str(h5_filename), "w")
h5f.create_dataset("pauli_t_mat", data=self.pauli_t_mat)
# h5f.create_dataset('mlc_ew4', data=self.slc_ew4)
# h5f.create_dataset('mlc_ev44', data=self.slc_ev44)
h5f.create_dataset(
"entropy", data=self.slc_ent_ani_alp_44_nc[0, :, :]
)
h5f.create_dataset(
"anisotropy", data=self.slc_ent_ani_alp_44_nc[1, :, :]
)
h5f.create_dataset(
"alpha",
data=np.rad2deg(self.slc_ent_ani_alp_44_nc[2, :, :]),
)
h5f.create_dataset("ph_diff_hhvv", data=self.ph_diff_hhvv)
h5f.create_dataset("p_hhvv", data=self.p_hhvv)
if coh is not None:
h5f.create_dataset("coh_hhvv", data=self.coh_hhvv)
h5f.create_dataset("coh_snr", data=self.coh_snr)
h5f.create_dataset("coh_hhhv", data=self.coh_hhhv)
h5f.create_dataset("coh_vvvh", data=self.coh_vvvh)
h5f.create_dataset("coh_hhxx", data=self.coh_hhxx)
h5f.create_dataset("coh_vvxx", data=self.coh_vvxx)
h5f.close()
def cal_scattering_matrix(self, slc_hh, slc_hv, slc_vh, slc_vv):
"""
"""
scat_matrix = np.zeros(
(slc_hh.shape[0], slc_hh.shape[1], 4), dtype=np.complex_
)
scat_matrix[:, :, 0] = slc_hh
scat_matrix[:, :, 1] = slc_hv
scat_matrix[:, :, 2] = slc_vh
scat_matrix[:, :, 3] = slc_vv
return scat_matrix
# Processing Class for Standard Polarimetric
# (Each Coherence Combination, Entropy, Anisotropy, Alpha)
| [] |
2024-01-10 | VenSagi/BenefitU | FC2.py | from pymongo.mongo_client import MongoClient
import openai
from flask import Flask
from flask import jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app) # allows flask app to not have network restrictions
# vv Example data set from Database vv
'''
data = {
"_id": "RingCentral",
"Benefit": "Description",
"Dental Insurance": "Offered by employer",
"Free Lunch": "5 days a week",
"Health Insurance": "Offered by employer",
"Life Insurance": "Offered by employer",
"Vision Insurance": "Offered by employer",
"Health Savings Account (HSA)": "Offered by employer",
"Maternity Leave": "Offered by employer",
"Sick Time": "Unlimited",
"Roth 401k": "Offered by employer",
"Employee Stock Purchase Program (ESPP)": "Allows contributions up to 15% of base salary. 10% discount on purchase price of stock",
"Donation Match": "100% match. Up to $1,000 matched",
"Flexible Spending Account (FSA)": "Offered by employer",
"Disability Insurance": "Offered by employer",
"401k": "50% match on the first 6% of base salary",
"Remote Work": "Depends on your manager, team, and needs.",
"Paternity Leave": "Offered by employer",
"PTO (Vacation / Personal Days)": "Unlimited",
"Employee Discount": "Offered by employer"
}
'''
# Post method of our custom python api (gets data previously stored in DataBase)
@app.route('/db-up/company', methods=['POST'])
def input_mongo():
# Connection details
uri = "mongodb+srv://VenkatSagi:[email protected]/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client.WorkerBenefitsDB
global coll
coll = db.WorkerBenefitsCo
# API Key
openai.api_key = "API_KEY"
# Get company data
global companyIn
companyIn = input("What Company do you work for: ")
# Post method of our custom python api (gets input and runs through chatGPT to get response)
@app.route('/gpt-up/user', methods=['POST'])
def input_chat():
user = input("How may I be of service: ") #initial user input/ querey question.
global completion
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": f"Provides any specifications the user has related to their employee benefits! "
f"The benefits are related to the company \"{companyIn}\". {companyData}"},
{"role": "user", "content": user}
]
) # open ai compltetions api
return jsonify(message="Chat Output Successful"), 200 # returns message client side
# Get method for our custom Python Api (Gets ChatGPT response from the completion)
@app.route('/gpt-down/user', methods=['GET'])
def get_chatResponse(): # method calls and returns transcribed text client side
return completion.choices[0].message.content # prints out Open Ai response
# Get method for our custom Python Api (Gets json response from MongoDB and checks for existense)
@app.route('//db-down/company', methods=['GET'])
def get_json(): # method calls and returns transcribed text client side
# Store the json data from company name (checks from MongoDB)
global companyData
companyData = coll.find_one({"_id": companyIn})
# Check if company exists
if companyData is None:
return("Company does not exist")
exit(1)
# Default return
return companyData
if __name__ == '__main__': # port this server is running on
app.run(port=5000)
| [
"Provides any specifications the user has related to their employee benefits! The benefits are related to the company \"PLACEHOLDER\". PLACEHOLDER"
] |
2024-01-10 | harshpp707/Chat_Gpt | Chat_gpt_app.py | # from flask import Flask, request, jsonify
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
import os
os.environ["OPENAI_API_KEY"] = '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'
def construct_index(directory_path):
max_input_size = 4096
num_outputs = 2000
max_chunk_overlap = 90
chunk_size_limit = 1200
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=-10, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index.save_to_disk('index.json')
return index
def chatbot(input_text):
index = GPTSimpleVectorIndex.load_from_disk('index.json')
response = index.query(input_text, response_mode="compact")
return response.response
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Custom-trained AI Chatbot")
if __name__ == '__main__':
index = construct_index("C:\\CHAT_GPT_project\\files")
iface.launch(share=True)
| [] |
2024-01-10 | TomWhitwell/dad_poems | script.py | from openai import OpenAI
import os
import json
import random
import requests
import re
GUARDIAN_KEY = os.getenv('GUARDIAN_API_KEY') # Corrected variable access
api_url = f"https://content.guardianapis.com/search?section=-(football|sport|australia-news)&show-fields=body&page-size=30&api-key={GUARDIAN_KEY}"
def read_number(file_path):
try:
with open(file_path, 'r') as file:
return int(file.read().strip())
except FileNotFoundError:
return 0
def write_number(file_path, number):
with open(file_path, 'w') as file:
file.write(str(number))
def increment_number(file_path):
number = read_number(file_path)
number += 1
write_number(file_path, number)
return number
def trim_to_words(s, num_words):
words = s.split()
return ' '.join(words[:num_words])
def strip_html_tags(text):
"""Remove HTML tags from text."""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
def get_news_headlines(api_url):
try:
response = requests.get(api_url)
if response.status_code == 200:
data = response.json()
headlines = [item['webTitle'] for item in data['response']['results']]
return headlines
else:
return f"Error fetching data: HTTP {response.status_code}"
except Exception as e:
return f"Error: {str(e)}"
def get_news_articles_and_summaries(api_url):
try:
response = requests.get(api_url)
if response.status_code == 200:
data = response.json()
articles = []
for item in data['response']['results']:
title = item['webTitle']
body = item['fields']['body'] # Get just the body field
body = strip_html_tags(body) # Strip HTML tags from the body
full_content = f"{title} {body}"
articles.append({'content': full_content})
return articles
else:
return f"Error fetching data: HTTP {response.status_code}"
except Exception as e:
return f"Error: {str(e)}"
modes = [
"creative"
]
# Now, modes contains the extended list of moods
philosophical_concepts = [
"Existentialism", "Determinism", "Dualism", "Monism", "Nihilism",
"Realism", "Idealism", "Empiricism", "Rationalism", "Skepticism",
"Pragmatism", "Stoicism", "Humanism", "Absurdism", "Relativism",
"Solipsism", "Utilitarianism", "Hedonism", "Altruism", "Egoism",
"Materialism", "Phenomenology", "Deontology", "Aesthetics", "Objectivism",
"Subjectivism", "Empathy", "Ethnocentrism", "Holism", "Individualism",
"Collectivism", "Romanticism", "Enlightenment", "Metaphysics", "Epistemology",
"Ontology", "Teleology", "Theism", "Atheism", "Agnosticism",
"Pantheism", "Fatalism", "Anarchism", "Marxism", "Capitalism",
"Socialism", "Libertarianism", "Nationalism", "Globalism", "Pluralism",
"Secularism", "Dogmatism", "Relativism", "Absolutism", "Mysticism",
"Transcendentalism", "Pacifism", "Asceticism", "Autonomy", "Causality",
"Vitalism", "Pessimism", "Optimism", "Empiricism", "Rationality",
"Intuitionism", "Naturalism", "Essentialism", "Perfectionism", "Nativism",
"Progressivism", "Conservatism", "Skepticism", "Traditionalism", "Postmodernism",
"Structuralism", "Functionalism", "Behaviorism", "Positivism", "Constructivism",
"Ecofeminism", "Egalitarianism", "Meritocracy", "Totalitarianism", "Authoritarianism",
"Democracy", "Aristocracy", "Oligarchy", "Platonism", "Socratic",
"Nietzscheanism", "Kantianism", "Hegelianism", "Darwinism", "Freudianism",
"Confucianism", "Taoism", "Buddhism", "Stoicism", "Cynicism"
]
poets = [
"Billy Collins",
"RS Thomas",
"Simon Armitage",
"William Carlos Williams"
]
styles = [
"T.S. Eliot", "Robert Frost", "Sylvia Plath", "Langston Hughes", "Maya Angelou", "Pablo Neruda", "Seamus Heaney", "W.H. Auden", "Ezra Pound", "Ted Hughes", "Allen Ginsberg", "Philip Larkin", "Anne Sexton", "Elizabeth Bishop", "John Ashbery", "Billy Collins", "Carol Ann Duffy", "Charles Bukowski", "Octavio Paz", "Dylan Thomas", "Wallace Stevens", "Robert Hayden", "Gwendolyn Brooks", "Seamus Heaney", "E.E. Cummings", "Robert Lowell", "Simon Armitage", "Tracy K. Smith", "Louise Glück", "Ocean Vuong", "Yusef Komunyakaa", "Saeed Jones", "Dorianne Laux", "Natalie Diaz", "Modernism", "Postmodernism", "Surrealism", "Harlem Renaissance", "Beat Poetry", "Black Mountain Poetry", "Language Poetry", "Imagism", "Futurism", "Dadaism", "Symbolism", "Objectivism", "Digital Poetry", "Spoken Word", "Concrete Poetry", "Romanticism", "Expressionism", "Futurism", "Minimalism", "Dirty Realism", "Narrative Poetry", "Avant-Garde Poetry", "Free Verse", "Visual Poetry", "Cyberpoetry", "Fluxus", "Free Verse", "Haiku", "Sonnet", "Villanelle", "Sestina", "Ode", "Ghazal", "Tanka", "Ballad",
"Blank Verse", "Rondeau", "Pantoum", "Acrostic", "Cinquain",
"Epigram", "Concrete Poetry", "Elegy", "Narrative Poetry", "Lyric Poetry",
"Prose Poetry", "Terza Rima", "Spoken Word", "Visual Poetry"
]
poetic_structures = [
"Free Verse", "Haiku", "Sonnet", "Villanelle", "Sestina", "Ode", "Ghazal", "Tanka", "Ballad",
"Blank Verse", "Rondeau", "Pantoum", "Acrostic", "Cinquain",
"Epigram", "Concrete Poetry", "Elegy", "Narrative Poetry", "Lyric Poetry",
"Prose Poetry", "Terza Rima", "Spoken Word", "Visual Poetry"]
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=os.getenv('OPENAI_API_KEY'),
)
def straighten_quotes(text):
replacements = {
"\u2018": "'", # Left single quotation mark
"\u2019": "'", # Right single quotation mark
"\u201C": '"', # Left double quotation mark
"\u201D": '"', # Right double quotation mark
"\u2032": "'", # Prime (often used as apostrophe/single quote)
"\u2033": '"', # Double prime (often used as double quote)
}
for find, replace in replacements.items():
text = text.replace(find, replace)
return text
# save_response_to_json(response, prompt, selected_news, selected_mode, selected_poem)
def save_response_to_json(response, prompt, selected_news, selected_mode, selected_poet, number, filename='response_news.json', archive_filename='archive_news.json'):
if response and response.choices:
# Access the content attribute of the message object
response_content = response.choices[0].message.content.strip()
response_content = straighten_quotes(response_content)
# Save to the individual file
with open(filename, 'w') as json_file:
json.dump({"poem": response_content, "prompt": prompt, "news": selected_news, "poet": selected_poet, "mode": selected_mode, "number": number}, json_file)
# Update the archive file
try:
# Read existing poems from the archive
with open(archive_filename, 'r') as archive_file:
archive_data = json.load(archive_file)
except FileNotFoundError:
# If the archive file doesn't exist, start with an empty list
archive_data = []
# Append the new poem to the archive
archive_data.insert(0, {"poem": response_content, "prompt": prompt, "news": selected_news, "poet": selected_poet, "mode": selected_mode, "number": number})
# Save the updated archive
with open(archive_filename, 'w') as archive_file:
json.dump(archive_data, archive_file)
def fetch_chatgpt_response(prompt):
try:
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt,
}
],
model="gpt-4",
)
return chat_completion
except Exception as e:
print(f"Error in fetching response: {e}")
return None
def main():
new_number = increment_number("num.txt")
print(f"The new number is: {new_number}")
articles_and_summaries = get_news_articles_and_summaries(api_url)
selected_concept = random.choice(philosophical_concepts)
selected_structure = random.choice(poetic_structures)
selected_style = random.choice(styles)
selected_poet = random.choice(poets)
selected_mode = random.choice(modes)
selected_news = trim_to_words(random.choice(articles_and_summaries)['content'],75)
# poem_prompt=["You are a successful and innovative poet. A few moments ago, you read this story in the newspaper: \"" + selected_news + "\". Inspired, you write a poem, no more than 60 words long, in the style of " + selected_style + ". You add a one line title at the top.","You are a successful and innovative poet. You are studying " + selected_concept + ". Inspired, you write a poem, no more than 60 words long, in the style of " + selected_style + ". You add a one line title at the top."]
#
# prompt = random.choice(poem_prompt)
prompt="You are the poet " + selected_poet + ". You woke up this morning feeling " + selected_mode + ". You have just read this story in the newspaper: \"" + selected_news + "\". Write a poem in YOUR OWN DISTINCTIVE STYLE, no more than 60 words long. You may add a one line title at the top if you like."
# prompt="Write a short poem about this news story: \"" + selected_news + "\". Write no more than 60 words. Adopt a strongly " + selected_mode + " tone. You may add a one line title at the top if you like."
print (prompt)
response = fetch_chatgpt_response(prompt)
if response:
save_response_to_json(response, prompt, selected_news, selected_mode, selected_poet, new_number)
if __name__ == "__main__":
main()
| [
"You are the poet PLACEHOLDER. You woke up this morning feeling PLACEHOLDER. You have just read this story in the newspaper: \"PLACEHOLDER\". Write a poem in YOUR OWN DISTINCTIVE STYLE, no more than 60 words long. You may add a one line title at the top if you like."
] |
2024-01-10 | hlzhang109/LMLP | src~clutrr_cot.py | from selectors import EpollSelector
import openai
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers import util as st_utils
import json
import argparse
argparser = argparse.ArgumentParser('CLUTRR', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('--device', action='store', type=int, default=1)
argparser.add_argument('--num_rule', action='store', type=int, default=1)
argparser.add_argument('--noisy_rate', action='store', type=float, default=0.0)
argparser.add_argument('--salient', type=bool, default=False)
argparser.add_argument('--train_id', type=int, default=0)
argparser.add_argument('--rule_path', action='store', type=str, default='src/clutrr/rules_all.json')
argparser.add_argument('--example_path', action='store', type=str, default='src/clutrr/example_clutrr_train_story.json')
argparser.add_argument('--test_path', action='store', type=str, default='src/clutrr/example_clutrr_test_story.json')
argparser.add_argument('--planning_lm_id', action='store', type=str, default='text-davinci-002')#gpt2-large
argparser.add_argument('--trans_lm_id', action='store', type=str, default='stsb-roberta-large')
args = argparser.parse_args()
print(args)
GPU = args.device
if torch.cuda.is_available():
torch.cuda.set_device(GPU)
OPENAI_KEY = None # replace this with your OpenAI API key, if you choose to use OpenAI API
source = 'openai' # select from ['openai', 'huggingface']
planning_lm_id = args.planning_lm_id # gpt2, gpt2-medium:, gpt2-large, gpt2-xl
encoder_pooling = 'sentence_embedding'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_STEPS = 1 # maximum number of steps to be generated
CUTOFF_THRESHOLD = 0.2 # early stopping threshold based on matching score and likelihood score
P = 0.5 # hyperparameter for early stopping heuristic to detect whether Planning LM believes the plan is finished
BETA = 0.1 # weighting coefficient used to rank generated samples
if source == 'openai':
openai.api_key = OPENAI_KEY
sampling_params = \
{
"max_tokens": 100,
"temperature": 0.6,
"top_p": 0.9,
"n": 10,
"logprobs": 1,
"presence_penalty": 0.5,
"frequency_penalty": 0.3,
"stop": '\n'
}
elif source == 'huggingface':
sampling_params = \
{
"max_tokens": 100,
"temperature": 0.1,
"top_p": 0.9,
"num_return_sequences": 10,
"repetition_penalty": 1.2,
'use_cache': True,
'output_scores': True,
'return_dict_in_generate': True,
'do_sample': True,
}
def lm_engine(source, planning_lm_id, device):
if source == 'huggingface':
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(planning_lm_id)
model = AutoModelForCausalLM.from_pretrained(planning_lm_id, pad_token_id=tokenizer.eos_token_id).to(device)
def _generate(prompt, start_entity, sampling_params):
if source == 'openai':
response = openai.Completion.create(engine=planning_lm_id, prompt=prompt, **sampling_params)
generated_samples = [response['choices'][i]['text'] for i in range(sampling_params['n'])]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(response['choices'][i]['logprobs']['token_logprobs']) for i in range(sampling_params['n'])]
elif source == 'huggingface':
input_prompt_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
input_entity_ids = tokenizer(start_entity, return_tensors="pt").input_ids.to(device)
prompt_len = input_prompt_ids.shape[-1]; entity_len = input_entity_ids.shape[-1]
if start_entity != ' ':
input_ids = torch.cat([input_prompt_ids, input_entity_ids], dim=1)
else:
input_ids = input_prompt_ids; entity_len = 0
output_dict = model.generate(input_ids, max_length=prompt_len + sampling_params['max_tokens'], **sampling_params)
# discard the prompt (only take the generated text)
generated_samples = tokenizer.batch_decode(output_dict.sequences[:, prompt_len:])
# calculate per-token logprob
vocab_log_probs = torch.stack(output_dict.scores, dim=1).log_softmax(-1) # [n, length, vocab_size]
token_log_probs = torch.gather(vocab_log_probs, 2, output_dict.sequences[:, prompt_len + entity_len:, None]).squeeze(-1).tolist() # [n, length]
for i, sample in enumerate(generated_samples):
stop_idx = sample.index('\n') if '\n' in sample else None
generated_samples[i] = sample[:stop_idx]
token_log_probs[i] = token_log_probs[i][:stop_idx]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(token_log_probs[i]) for i in range(sampling_params['num_return_sequences'])]
generated_samples = [sample.strip().lower() for sample in generated_samples]
return generated_samples, mean_log_probs
return _generate
# create example task embeddings using Translated LM
with open(args.test_path, 'r') as f:
all_test_samples = json.load(f)
# create action embeddings using Translated LM
with open(args.rule_path, 'r') as f:
action_list_ = json.load(f)
# create example task embeddings using Translated LM
with open(args.example_path, 'r') as f:
available_examples_ = json.load(f)
generator = lm_engine(source, planning_lm_id, device)
def find_rule_prompt(available_examples, task = '', predicate='', nums=1, leng=0):
'''
predicate: the target predicate
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
example_task_list_t = [example[1] for example in available_examples]
for i in range(len(available_examples_t)):
if predicate != '' and example_task_list_t[i] != predicate or task[:40] == available_examples_t[i][0][:40]:
continue
triplets = available_examples_t[i][0].split('\n')[-1].split(',')
if leng <= 4 and len(triplets) != leng:
continue
prompts += available_examples_t[i][0] + '\n\n'
num += 1
if num == nums:
break
return prompts
templates = [
"" for i in range(1)
]
# define query task
test_relation_lst = ["aunt", "brother", "daughter", "daughter-in-law", "father", "father-in-law", "granddaughter",
"grandfather", "grandmother", "grandson", "mother", "mother-in-law", "nephew", "niece",
"sister", "son", "son-in-law", "uncle", "husband", "wife"]
relation_value = [1, 0, -1, -1, 1, 1, -2, 2, 2, -2, 1, 1, -1, -1, 0, -1, -1, 1, 0, 0]
relation_sex = [0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0]
all_success = np.zeros((len(all_test_samples), len(all_test_samples)))
relation_to_idx = {test_relation_lst[r]:r for r in range(len(test_relation_lst))}
for a in range(0, len(all_test_samples)):#len(available_examples_)
names = ['1.2', '1.3', '1.4', '1.5', '1.6', '1.7', '1.8', '1.9', '1.10']
idx = [0, 0, 0, 1, 2, 3, 4, 5, 6]
print('*'*40 + '\t Rule from' + str(names[a]) + '\t' + '*'*40)
available_examples = available_examples_[idx[a]]
for k in range(len(all_test_samples)):
print('*'*30 + '\t Clutrr' + str(names[k]) + '\t' + '*'*30)
test_samples = all_test_samples[k]
success_tasks = 0
for t in range(len(test_samples)):
task, relation = test_samples[t]
print('*'*20 + test_samples[t][0].split('.')[-1][:-10] + '*'*20)
for _ in range(len(templates)):
print('*'*10 + ' templates %d ' % (_) + '*'*10)
example = ''
start_entity = task.split("?")[0].split(' ')[-1]
end_entity = task.split()[-1]
log_entities = []
curr_prompt = f'{task}'
if example == '':
example = find_rule_prompt(available_examples.copy(), task=task, predicate='', nums=args.num_rule, leng=a+2);
curr_prompt = f'{example}{task}'
if not args.salient:
print(curr_prompt, end=' ')
for step in range(1, MAX_STEPS + 1):
# start_entity = ' '
samples, log_probs = generator(curr_prompt, start_entity=start_entity, sampling_params=sampling_params)
# best_id = np.array(log_probs).argmax()
# r = samples[best_id].split(' ')[-1][:-1]
# print(samples[best_id])
best_id = (-np.array(log_probs)).argsort()
for id in best_id:
if '' != samples[id]:
print(samples[id])
r = samples[id].split(' ')[-1][:-1]
break
else:
r = ''
print(f"task relation: {relation}")
if r.lower() in test_relation_lst and relation.lower() in test_relation_lst:
id_r = relation_to_idx[r.lower()]
id = relation_to_idx[relation.lower()]
if relation_value[id_r] == relation_value[id] or relation_value[id_r] == -relation_value[id]:
success_tasks += 1
print("there have %d tasks, and templates are succssful with " % (len(test_samples)), success_tasks)
all_success[a][k] = success_tasks
print(all_success)
print(all_success)
| [
"\n\n",
"['']",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | hlzhang109/LMLP | src~countries.py | import openai
import numpy as np
import torch
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers import util as st_utils
import json
if __name__ == '__main__':
GPU = 0
if torch.cuda.is_available():
torch.cuda.set_device(GPU)
OPENAI_KEY = None # replace this with your OpenAI API key, if you choose to use OpenAI API
source = 'huggingface' # select from ['openai', 'huggingface']
planning_lm_id = 'gpt2-large' # see comments above for all options
translation_lm_id = 'stsb-roberta-large' # see comments above for all options
encoder_pooling = 'sentence_embedding'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_STEPS = 10 # maximum number of steps to be generated
CUTOFF_THRESHOLD = 0.2 # early stopping threshold based on matching score and likelihood score
P = 0.5 # hyperparameter for early stopping heuristic to detect whether Planning LM believes the plan is finished
BETA = 0.3 # weighting coefficient used to rank generated samples
with open('src/countries/test_samples.json', 'r') as f:
test_samples = json.load(f)
# create action embeddings using Translated LM
with open('src/countries/avaliable_rules_r2.json', 'r') as f:
action_list = json.load(f)
# create example task embeddings using Translated LM
with open('src/countries/avaliable_examples_r2.json', 'r') as f:
available_examples = json.load(f)
if source == 'openai':
openai.api_key = OPENAI_KEY
sampling_params = \
{
"max_tokens": 10,
"temperature": 0.6,
"top_p": 0.9,
"n": 10,
"logprobs": 1,
"presence_penalty": 0.5,
"frequency_penalty": 0.3,
"stop": '\n'
}
elif source == 'huggingface':
sampling_params = \
{
"max_tokens": 20,
"temperature": 0.1,
"top_p": 0.9,
"num_return_sequences": 10,
"repetition_penalty": 1.2,
'use_cache': True,
'output_scores': True,
'return_dict_in_generate': True,
'do_sample': True,
}
def lm_engine(source, planning_lm_id, device):
if source == 'huggingface':
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(planning_lm_id)
model = AutoModelForCausalLM.from_pretrained(planning_lm_id, pad_token_id=tokenizer.eos_token_id).to(device)
def _generate(prompt, start_entity, sampling_params):
if source == 'openai':
response = openai.Completion.create(engine=planning_lm_id, prompt=prompt, **sampling_params)
generated_samples = [response['choices'][i]['text'] for i in range(sampling_params['n'])]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(response['choices'][i]['logprobs']['token_logprobs']) for i in range(sampling_params['n'])]
elif source == 'huggingface':
input_prompt_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
input_entity_ids = tokenizer(start_entity, return_tensors="pt").input_ids.to(device)
prompt_len = input_prompt_ids.shape[-1]; entity_len = input_entity_ids.shape[-1]
input_ids = torch.cat([input_prompt_ids, input_entity_ids], dim=1)
output_dict = model.generate(input_ids, max_length=prompt_len + sampling_params['max_tokens'], **sampling_params)
# discard the prompt (only take the generated text)
generated_samples = tokenizer.batch_decode(output_dict.sequences[:, prompt_len:])
# calculate per-token logprob
vocab_log_probs = torch.stack(output_dict.scores, dim=1).log_softmax(-1) # [n, length, vocab_size]
token_log_probs = torch.gather(vocab_log_probs, 2, output_dict.sequences[:, prompt_len + entity_len:, None]).squeeze(-1).tolist() # [n, length]
# truncate each sample if it contains '\n' (the current step is finished)
# e.g. 'open fridge\n<|endoftext|>' -> 'open fridge'
for i, sample in enumerate(generated_samples):
stop_idx = sample.index('\n') if '\n' in sample else None
generated_samples[i] = sample[:stop_idx]
token_log_probs[i] = token_log_probs[i][:stop_idx]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(token_log_probs[i]) for i in range(sampling_params['num_return_sequences'])]
generated_samples = [sample.strip().lower() for sample in generated_samples]
return generated_samples, mean_log_probs
return _generate
# create example task embeddings using Translated LM
# with open('src/countries/test_samples.json', 'r') as f:
# test_samples = json.load(f)
generator = lm_engine(source, planning_lm_id, device)
# initialize Translation LM
translation_lm = SentenceTransformer(translation_lm_id).to(device)
for test in test_samples:
test = test[6:]
for example in available_examples:
if test in example:
available_examples.remove(example)
if test in action_list:
action_list.remove(test)
action_list_embedding = translation_lm.encode(action_list, batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling) # lower batch_size if limited by GPU memory
example_task_list = [example.split('\n')[0] for example in available_examples] # first line contains the task name
example_task_embedding = translation_lm.encode(example_task_list, batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling) # lower batch_size if limited by GPU memory
def find_most_similar_(query_str, corpus_embedding):
query_embedding = translation_lm.encode(query_str, convert_to_tensor=True, device=device, output_value=encoder_pooling)
# calculate cosine similarity against each candidate sentence in the corpus
cos_scores = st_utils.pytorch_cos_sim(query_embedding, corpus_embedding)[0].detach().cpu().numpy()
# retrieve high-ranked index and similarity score
most_similar_idx, matching_score = np.argmax(cos_scores), np.max(cos_scores)
return most_similar_idx, matching_score
def find_rule_prompt(predicate, nums, only_rule=False):
'''
predicate: the target predicate
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
for i in range(len(available_examples_t)):
av_exp = available_examples_t[i]
example = ''
rules = ''
steps = av_exp.split('\n')[:-1]
entities_to_fake, entities = {}, set()
if len(steps) > 0 and steps[0].split(' ')[2] == predicate:
num += 1
for i in range(len(steps)):
step = steps[i]
if step == steps[0]:
_, s, p, o = step.split(' ')
example += f'{step}'
else:
_, _, s, p, o = step.split(' ')
example += f'\n {step}'
entities.add(s)
entities.add(o)
rules = example
for idx, k in enumerate(entities):
entities_to_fake[k] = chr(idx+ord('A'))
rules = rules.replace(k, entities_to_fake[k])
if only_rule:
prompts += rules + '\n\n'
else:
prompts += rules + '\n\n' + example + '\n\n'
if num == nums:
break
if nums > 1:
return rules + prompts +'\n\n'
else:
return prompts
def find_rule_prompt_entity(predicate, nums, only_rule=False):
'''
predicate: the target predicate
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
for i in range(len(available_examples_t)):
av_exp = available_examples_t[i]
if av_exp == '':
continue
example = ''
rules = ''
steps = av_exp.split('\n')[:-1]
entities_to_fake, entities = {}, set()
entity = steps[0].split(' ')[1]
if len(steps) > 0 and entity == predicate:
num += 1
for i in range(len(steps)):
step = steps[i]
if step == steps[0]:
_, s, p, o = step.split(' ')
example += f'{step}'
else:
_, _, s, p, o = step.split(' ')
example += f'\n {step}'
entities.add(s)
entities.add(o)
rules = example
for idx, k in enumerate(entities):
entities_to_fake[k] = chr(idx+ord('A'))
rules = rules.replace(k, entities_to_fake[k])
if only_rule:
prompts += rules + '\n\n'
else:
prompts += rules + '\n\n' + example + '\n\n'
if num == nums:
break
if nums > 1:
return rules + prompts +'\n\n'
else:
return prompts
# helper function for finding similar sentence in a corpus given a query
def find_most_similar(query_str, start_entity, corpus_embedding, log_entities = None):
query_embedding = translation_lm.encode(query_str, convert_to_tensor=True, device=device, output_value=encoder_pooling)
# calculate cosine similarity against each candidate sentence in the corpus
cos_scores = st_utils.pytorch_cos_sim(query_embedding, corpus_embedding)[0].detach().cpu().numpy()
# retrieve high-ranked index and similarity score
most_similar_idx, matching_score = np.argmax(cos_scores), np.max(cos_scores)
return most_similar_idx, matching_score
templates = [
# "Task: X is located in Z\nStep 1: X is located in Y\nStep 2: Y is located in M\nStep 3: M is located in Z\n,
# "Task: X locatedIn Y\nStep 1: X neighborOf M\nStep 2: M neighborOf N\nStep 3: N locatedIn Y\n",
# "Task: X locatedIn Y\nStep 1: X neighborOf M\nStep 2: M locatedIn N\nStep 3: N locatedIn Y\n",
# "Task: X locatedIn Y\nStep 1: X neighborOf Z\nStep 2: Z locatedIn Y\n",
"",
# "Task: X is located in Z\nStep 1: X is the neighbor of Y\nStep 2: Y is located in M\nStep 3: M is located in Z\n",
# "Task: X is located in Z\nStep 1: X is located in Y\nStep 2: Y is the neighbor of M\nStep 3: M is located in Z\n",
]
# define query task
with open("result.txt","w") as f:
num_success = [0 for i in range(len(templates))]
num_steps = np.zeros((len(templates), MAX_STEPS))
success_tasks = np.zeros((len(test_samples), len(templates)))
for t in range(len(test_samples)):#
task = test_samples[t]
# find most relevant example
# f.write('*'*30 + ' Test EXAMPLE ' + '-*'*30 + '\n');
# print('*'*40 + ' Test EXAMPLE ' + '*'*40)
# print(f'{task}')
for k in range(len(templates)):
# print('*'*40 + ' templates %d ' % (k) + '*'*40)
example = templates[k]
start_entity = task.split()[0]
end_entity = task.split()[-1]
log_entities = []
if example == '':
example = find_rule_prompt(task.split()[1], 1); print(f'{example}Task: {task}')
curr_prompt = f'{example}Task: {task}'
# construct initial prompt
curr_prompt = f'{example}\n\n{task}'
# print example and query task
# print('-'*10 + ' GIVEN EXAMPLE ' + '-'*10);
print(curr_prompt)
# print('-'*10 + ' EXAMPLE END ' + '-'*10)
# f.write('-'*10 + ' GIVEN EXAMPLE ' + '-'*10 + '\n');
# f.write(example)
# f.write('-'*10 + ' EXAMPLE END ' + '-'*10 + '\n')
# f.write(f'{task}' + '\n')
for step in range(1, MAX_STEPS + 1):
best_overall_score = -np.inf
# query Planning LM for single-step action candidates
samples, log_probs = generator(curr_prompt + f'\nStep {step}: ', start_entity, sampling_params)
for sample, log_prob in zip(samples, log_probs):
most_similar_idx, matching_score = find_most_similar(sample, start_entity, action_list_embedding, log_entities)
if most_similar_idx == -1 and matching_score == -1:
print('[Terminating early no action begin with %s and end with encountered entity\n' % (start_entity))
f.write('[Terminating early no action begin with %s and end with encountered entity\n' % (start_entity))
break
# rank each sample by its similarity score and likelihood score
overall_score = matching_score + BETA * log_prob
translated_action = action_list[most_similar_idx]
if overall_score > best_overall_score:
best_overall_score = overall_score
best_action = translated_action
# terminate early when either the following is true:
# 1. top P*100% of samples are all 0-length (ranked by log prob)
# 2. overall score is below CUTOFF_THRESHOLD
# else: autoregressive generation based on previously translated action
top_samples_ids = np.argsort(log_probs)[-int(P * len(samples)):]
are_zero_length = all([len(samples[i]) == 0 for i in top_samples_ids])
below_threshold = best_overall_score < CUTOFF_THRESHOLD
if are_zero_length or most_similar_idx == -1:
print(f'\n[Terminating early because top {P*100}% of samples are all 0-length]')
f.write(f'\n[Terminating early because top {P*100}% of samples are all 0-length]\n')
break
else:
previous_action = best_action
log_entities.append(start_entity)
start_entity = previous_action.split()[-1]
formatted_action = (best_action[0].lower() + best_action[1:]).replace('_', ' ') # 'open_fridge' -> 'Open fridge'
curr_prompt += f'\nStep {step}: {formatted_action}'
print(f'Step {step}: {formatted_action}')
f.write(f'Step {step}: {formatted_action}\n')
if start_entity == end_entity:
num_success[k] += 1
num_steps[k, step-1] += 1
success_tasks[t, k] += 1
break
print("there have %d tasks, and templates are succssful with " % (len(test_samples)), num_success)
print('The hop counts of the template generation proof are respectively: ', num_steps)
print('the completion status of each task is', success_tasks)
print('1example, final success number of tasks is ', np.sum(np.max(success_tasks[:,0:1], axis=1))/len(test_samples))
print('3example, final success number of tasks is ', np.sum(np.max(success_tasks[:,:3], axis=1)))
print('5example, final success number of tasks is ', np.sum(np.max(success_tasks[:,:5], axis=1)))
print('10example, final success number of tasks is ', np.sum(np.max(success_tasks[:,:10], axis=1))) | [
"['']",
"PLACEHOLDER\n\n",
"PLACEHOLDERTask: PLACEHOLDER",
"PLACEHOLDER\n\nPLACEHOLDER",
"\nStep PLACEHOLDER: PLACEHOLDER",
"PLACEHOLDER\n\nPLACEHOLDER\n\n"
] |
2024-01-10 | hlzhang109/LMLP | src~clutrr.py | import openai
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers import util as st_utils
import json
import argparse
argparser = argparse.ArgumentParser('CLUTRR', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparser.add_argument('--device', action='store', type=int, default=1)
argparser.add_argument('--num_rule', action='store', type=int, default=1)
argparser.add_argument('--noisy_rate', action='store', type=float, default=0.0)
argparser.add_argument('--salient', type=bool, default=False)
argparser.add_argument('--rule_path', action='store', type=str, default='src/clutrr/rules_all.json')
argparser.add_argument('--example_path', action='store', type=str, default='src/clutrr/example_train.json')
argparser.add_argument('--test_path', action='store', type=str, default='src/clutrr/example_test.json')
argparser.add_argument('--planning_lm_id', action='store', type=str, default='gpt2-large')#gpt2-large
argparser.add_argument('--trans_lm_id', action='store', type=str, default='stsb-roberta-large')
args = argparser.parse_args()
print(args)
GPU = args.device
if torch.cuda.is_available():
torch.cuda.set_device(GPU)
OPENAI_KEY = None # replace this with your OpenAI API key, if you choose to use OpenAI API
source = 'huggingface' # select from ['openai', 'huggingface']
planning_lm_id = args.planning_lm_id # gpt2, gpt2-medium:, gpt2-large, gpt2-xl
translation_lm_id = args.trans_lm_id # stsb-roberta-base/large, stsb-bert-base/large
encoder_pooling = 'sentence_embedding'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_STEPS = 10 # maximum number of steps to be generated
CUTOFF_THRESHOLD = 0.2 # early stopping threshold based on matching score and likelihood score
P = 0.5 # hyperparameter for early stopping heuristic to detect whether Planning LM believes the plan is finished
BETA = 0.1 # weighting coefficient used to rank generated samples
if source == 'openai':
openai.api_key = OPENAI_KEY
sampling_params = \
{
"max_tokens": 15,
"temperature": 0.6,
"top_p": 0.9,
"n": 10,
"logprobs": 1,
"presence_penalty": 0.5,
"frequency_penalty": 0.3,
"stop": '\n'
}
elif source == 'huggingface':
sampling_params = \
{
"max_tokens": 20,
"temperature": 0.1,
"top_p": 0.9,
"num_return_sequences": 10,
"repetition_penalty": 1.2,
'use_cache': True,
'output_scores': True,
'return_dict_in_generate': True,
'do_sample': True,
}
def lm_engine(source, planning_lm_id, device):
if source == 'huggingface':
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(planning_lm_id)
model = AutoModelForCausalLM.from_pretrained(planning_lm_id, pad_token_id=tokenizer.eos_token_id).to(device)
def _generate(prompt, start_entity, sampling_params):
if source == 'openai':
response = openai.Completion.create(engine=planning_lm_id, prompt=prompt, **sampling_params)
generated_samples = [response['choices'][i]['text'] for i in range(sampling_params['n'])]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(response['choices'][i]['logprobs']['token_logprobs']) for i in range(sampling_params['n'])]
elif source == 'huggingface':
input_prompt_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
input_entity_ids = tokenizer(start_entity, return_tensors="pt").input_ids.to(device)
prompt_len = input_prompt_ids.shape[-1]; entity_len = input_entity_ids.shape[-1]
if start_entity != ' ':
input_ids = torch.cat([input_prompt_ids, input_entity_ids], dim=1)
else:
input_ids = input_prompt_ids; entity_len = 0
output_dict = model.generate(input_ids, max_length=prompt_len + sampling_params['max_tokens'], **sampling_params)
# discard the prompt (only take the generated text)
generated_samples = tokenizer.batch_decode(output_dict.sequences[:, prompt_len:])
# calculate per-token logprob
vocab_log_probs = torch.stack(output_dict.scores, dim=1).log_softmax(-1) # [n, length, vocab_size]
token_log_probs = torch.gather(vocab_log_probs, 2, output_dict.sequences[:, prompt_len + entity_len:, None]).squeeze(-1).tolist() # [n, length]
# truncate each sample if it contains '\n' (the current step is finished)
# e.g. 'open fridge\n<|endoftext|>' -> 'open fridge'
for i, sample in enumerate(generated_samples):
stop_idx = sample.index('\n') if '\n' in sample else None
generated_samples[i] = sample[:stop_idx]
token_log_probs[i] = token_log_probs[i][:stop_idx]
# calculate mean log prob across tokens
mean_log_probs = [np.mean(token_log_probs[i]) for i in range(sampling_params['num_return_sequences'])]
generated_samples = [sample.strip().lower() for sample in generated_samples]
return generated_samples, mean_log_probs
return _generate
# create example task embeddings using Translated LM
with open(args.test_path, 'r') as f:
all_test_samples = json.load(f)
# create action embeddings using Translated LM
with open(args.rule_path, 'r') as f:
action_list_ = json.load(f)
# create example task embeddings using Translated LM
with open(args.example_path, 'r') as f:
available_examples = json.load(f)
# initialize Translation LM
if args.noisy_rate > 0.:
with open('src/clutrr/rules_all.json', 'r') as f:
action_list_noise = json.load(f)
num_action_noise = len(action_list_noise)
action_list_noise = action_list_noise[:int(args.noisy_rate * num_action_noise)]
generator = lm_engine(source, planning_lm_id, device)
translation_lm = SentenceTransformer(translation_lm_id).to(device)
if 'test' in args.rule_path:
action_list_embedding_ = []
entity_list, relation_list = [], []
for rules in action_list_:
if args.noisy_rate > 0.:
rules.extend(action_list_noise)
entity_set, relation_set = set(), set()
action_embedding = translation_lm.encode(rules, batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling) # lower batch_size if limited by GPU memory
action_list_embedding_.append(action_embedding)
for action in rules:
s, p, _, o = action.split(' ')
s = s[:-2]
entity_set.add(s); relation_set.add(p); entity_set.add(o)
entity_list.append(list(entity_set))
relation_list.append(list(relation_set))
else:
entity_set, relation_set = set(), set()
action_list_embedding_ = translation_lm.encode(action_list_, batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling) # lower batch_size if limited by GPU memory
for action in action_list_:
s, p, _, o = action.split(' ')
s = s[:-2]
entity_set.add(s); relation_set.add(p); entity_set.add(o)
entity_list, relation_list = list(entity_set), list(relation_set)
entity_list_embedding = translation_lm.encode(list(entity_set), batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling)
relation_list_embedding = translation_lm.encode(list(relation_set), batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling)
example_task_list = [example.split('\n')[0] for example in available_examples] # first line contains the task name
example_task_embedding = translation_lm.encode(example_task_list, batch_size=512, convert_to_tensor=True, device=device, output_value=encoder_pooling) # lower batch_size if limited by GPU memory
def find_most_similar_(query_str, corpus_embedding, corpus_set, corpus):
if query_str in corpus_set:
return query_str
query_embedding = translation_lm.encode(query_str, convert_to_tensor=True, device=device, output_value=encoder_pooling)
# calculate cosine similarity against each candidate sentence in the corpus
cos_scores = st_utils.pytorch_cos_sim(query_embedding, corpus_embedding)[0].detach().cpu().numpy()
# retrieve high-ranked index and similarity score
most_similar_idx, matching_score = np.argmax(cos_scores), np.max(cos_scores)
return corpus[most_similar_idx]
# helper function for finding similar sentence in a corpus given a query
def find_most_similar(query_str, start_entity, task, corpus_embedding, log_entities = None, action_list=None):
if start_entity != '' and start_entity != ' ':
corpus_embedding_, ids = [], []
for i in range(len(action_list)):
s = action_list[i].split("'")[0]
o = action_list[i].split(" ")[-1]
if s == start_entity and o not in log_entities and action_list[i] != task:
corpus_embedding_.append(corpus_embedding[i].unsqueeze(0))
ids.append(i)
if len(ids) == 0:
return -1, -1
corpus_embedding_ = torch.cat(corpus_embedding_).to(device)
else:
corpus_embedding_ = corpus_embedding
query_embedding = translation_lm.encode(query_str, convert_to_tensor=True, device=device, output_value=encoder_pooling)
# calculate cosine similarity against each candidate sentence in the corpus
cos_scores = st_utils.pytorch_cos_sim(query_embedding, corpus_embedding_)[0].detach().cpu().numpy()
# retrieve high-ranked index and similarity score
most_similar_idx, matching_score = np.argmax(cos_scores), np.max(cos_scores)
if start_entity != '' and start_entity != ' ':
most_similar_idx = ids[most_similar_idx]
del corpus_embedding_
return most_similar_idx, matching_score
def find_rule_prompt(predicate, nums, only_rule=False, reverse_rule=False):
'''
predicate: the target predicate
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
for i in range(len(available_examples_t)):
av_exp = available_examples_t[i]
example = ''
rules = ''
steps = av_exp.split('\n')
entities_to_fake, entities = {}, set()
if steps[0].split(' ')[1] == predicate:
num += 1
for i in range(len(steps)):
step = steps[i]
s, p, _, o = step.split(' ')
s = s[:-2]
entities.add(s)
entities.add(o)
if step == steps[0]:
example += f'Task: {step}'
else:
example += f'\n Step {i}: {step}'
rules = example
for idx, k in enumerate(entities):
entities_to_fake[k] = chr(idx+ord('A'))
rules = rules.replace(k, entities_to_fake[k])
if only_rule:
return rules + '\n\n'
if reverse_rule:
prompts += example + '\n\n' + rules + '\n\n'
else:
prompts += rules + '\n\n' + example + '\n\n'
if num == nums:
break
if nums > 1:
return prompts + rules +'\n\n'
else:
return prompts
def find_rule_prompt_entity(entity, nums, only_rule=False):
'''
entity: the target entity
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
for i in range(len(available_examples_t)):
av_exp = available_examples_t[i]
example = ''
rules = ''
steps = av_exp.split('\n')
entities_to_fake, entities = {}, set()
if steps[0].split(' ')[0][:-2] == entity:
num += 1
for i in range(len(steps)):
step = steps[i]
s, p, _, o = step.split(' ')
s = s[:-2]
entities.add(s)
entities.add(o)
if step == steps[0]:
example += f'Task: {step}'
else:
example += f'\n Step {i}: {step}'
rules = example
for idx, k in enumerate(entities):
entities_to_fake[k] = chr(idx+ord('A'))
rules = rules.replace(k, entities_to_fake[k])
if only_rule:
return rules + '\n\n'
prompts += rules + '\n\n' + example + '\n\n'
if num == nums:
break
if nums > 1:
return prompts + rules +'\n\n'
else:
return prompts
def find_random_prompt(nums, only_rule=False):
'''
entity: the target entity
nums: numbers of templates that needs to find
'''
num = 0
prompts = ''
available_examples_t = available_examples
np.random.shuffle(available_examples_t)
for i in range(nums):
av_exp = available_examples_t[i]
example = ''
rules = ''
steps = av_exp.split('\n')
entities_to_fake, entities = {}, set()
for i in range(len(steps)):
step = steps[i]
s, p, _, o = step.split(' ')
s = s[:-2]
entities.add(s)
entities.add(o)
if step == steps[0]:
example += f'Task: {step}'
else:
example += f'\n Step {i}: {step}'
rules = example
for idx, k in enumerate(entities):
entities_to_fake[k] = chr(idx+ord('A'))
rules = rules.replace(k, entities_to_fake[k])
if only_rule:
return rules + '\n\n'
prompts += rules + '\n\n' + example + '\n\n'
if nums > 1:
return prompts + rules +'\n\n'
else:
return prompts
templates = [
"" for i in range(1)
]
# define query task
with open("result_3_templates.txt","w") as f:
names = ['1.5', '1.6', '1.7', '1.8', '1.9', '1.10']
for k in range(len(all_test_samples)):
print('*'*40 + '\t Clutrr' + str(names[k]) + '\t' + '*'*40)
if 'test' in args.rule_path:
action_list_embedding = action_list_embedding_[k]
action_list = action_list_[k]
else:
action_list_embedding = action_list_embedding_
action_list = action_list_
test_samples = all_test_samples[k]
num_success = [0 for i in range(len(templates))]
num_steps = np.zeros((len(templates), MAX_STEPS))
success_tasks = np.zeros((len(test_samples), len(templates)))
for t in range(len(test_samples)):
task = test_samples[t].split('\n')[0]
print('*'*20 + f' {task} ' + '*'*20)
for k in range(len(templates)):
print('*'*10 + ' templates %d ' % (k) + '*'*10)
example = ''
start_entity = task.split("'")[0]
end_entity = task.split()[-1]
log_entities = []
curr_string = f'{task}, '
curr_prompt = f'Task: {task}'
if example == '':
example = find_rule_prompt(task.split()[1], nums=args.num_rule, only_rule=False, reverse_rule=True);
if not args.salient:
print(f'{example}Task: {task}')
curr_prompt = f'{example}Task: {task}'
for step in range(1, MAX_STEPS + 1):
best_overall_score = -np.inf
samples, log_probs = generator(curr_prompt + f'\n Step {step}: ', start_entity=' ', sampling_params=sampling_params)
for sample, log_prob in zip(samples, log_probs):
most_similar_idx, matching_score = find_most_similar(sample, start_entity, task, action_list_embedding, log_entities, action_list=action_list)
if most_similar_idx == -1 and matching_score == -1:
print('[Terminating early no action begin with %s and end with encountered entity\n' % (start_entity))
break
# rank each sample by its similarity score and likelihood score
overall_score = matching_score + BETA * log_prob
translated_action = action_list[most_similar_idx]
if overall_score > best_overall_score:
best_overall_score = overall_score
best_action = translated_action
top_samples_ids = np.argsort(log_probs)[-int(P * len(samples)):]
are_zero_length = all([len(samples[i]) == 0 for i in top_samples_ids])
below_threshold = best_overall_score < CUTOFF_THRESHOLD
if are_zero_length or most_similar_idx == -1:
print(f'\n[Terminating early because top {P*100}% of samples are all 0-length]')
break
else:
previous_action = best_action
start_entity = previous_action.split()[-1]
log_entities.append(start_entity)
formatted_action = (best_action[0] + best_action[1:]).replace('_', ' ').replace('-', ' ') # 'open_fridge' -> 'Open fridge'
curr_prompt += f'\nStep {step}: {formatted_action}'
curr_string += (best_action[0] + best_action[1:]) + ', '
if not args.salient:
print(f'Step {step}: {formatted_action}')
if start_entity == end_entity:
num_success[k] += 1
num_steps[k, step - 1] += 1
success_tasks[t, k] += 1
print(curr_prompt)
f.write(f'{curr_string[:-2]}\n')
break
print("there have %d tasks, and templates are succssful with " % (len(test_samples)), num_success)
f.write("there have %d tasks, and templates are succssful with\n" % (len(test_samples)))
f.writelines(str(num_success))
print('The hop counts of the template generation proof are respectively: ', num_steps)
f.write('\nThe hop counts of the template generation proof are respectively:\n' % (num_steps))
f.writelines(str(num_steps))
print('the completion status of each task is', success_tasks)
f.write('\nthe completion status of each task is\n')
f.writelines(str(success_tasks))
print('final success number of tasks is ', np.sum(np.max(success_tasks, axis=1)))
f.write('\nfinal success number of tasks is %d' % (np.sum(np.max(success_tasks, axis=1)))) | [
"['']",
"Task: PLACEHOLDER",
"PLACEHOLDERTask: PLACEHOLDER",
"\nStep PLACEHOLDER: PLACEHOLDER",
"PLACEHOLDER\n\nPLACEHOLDER\n\n"
] |
2024-01-10 | tieandrews/dashGPT | src~dashgpt~chat~chat_utils.py | # Author: Ty ANdrews
# Date: 2023-09021
import os
import openai
import platform
from dotenv import load_dotenv, find_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
import re
from dashgpt.logs import get_logger
from dashgpt.data.langchain_utils import count_tokens
# for eployment on azure, Chroma SQlite version is out oof date, over write
# inspired from: https://gist.github.com/defulmere/8b9695e415a44271061cc8e272f3c300
if platform.system() == "Linux":
# these three lines swap the stdlib sqlite3 lib with the pysqlite3 package
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
from langchain.vectorstores import Chroma
logger = get_logger(__name__)
load_dotenv(find_dotenv())
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
def connect_to_vectorstore():
"""
Connect to the VectorStore and return a VectorStore object.
Returns
-------
VectorStore object
The VectorStore object connected to the VectorStore.
"""
embedding_function = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
chroma_db = Chroma(
persist_directory = "data/processed/reddit_jokes_chroma_db",
embedding_function = embedding_function,
collection_name = "reddit_jokes_2000"
)
return chroma_db
def stream_send_messages(prompt):
"""
Send a prompt to the OpenAI API and return the response.
Parameters
----------
prompt : str
The prompt to send to the OpenAI API.
Returns
-------
OpenAI ChatCompletion object
The response from the OpenAI API.
"""
# calculate the number of tokens by combining the prompt and the user prompt
total_tokens = 0
for message in prompt:
total_tokens += count_tokens(message["content"])
if total_tokens > 2048:
logger.warning(
f"Total tokens {total_tokens} exceeds maximum of 2048. Using turbo 16k context model."
)
# uncomment this to auto flip to 16k model
# model = "gpt-3.5-turbo-16k"
# instead only use the last 512 tokens of user prompt to limit abuse
prompt[-1]["content"] = prompt[-1]["content"][-512:]
else:
model = "gpt-3.5-turbo"
"""OpenAI API call. You may choose parameters here but `stream=True` is required."""
return openai.ChatCompletion.create(
model=model,
messages=prompt,
stream=True,
max_tokens=1024,
temperature=0.5,
)
def get_relevant_documents(
user_prompt,
vector_store,
k=3,
method="similarity",
):
"""
Get the most relevant documents from the VectorStore for a given user prompt.
Parameters
----------
user_prompt : str
The user prompt to search for in the VectorStore.
vector_store : Zilliz object
The object connected to the VectorStore.
method: str, optional
The method to use for searching the VectorStore, options are mmr, similarity. Default is "similarity".
Returns
-------
list of Document objects
The list of relevant documents from the VectorStore.
"""
if method == "mmr":
relevant_documents = vector_store.max_marginal_relevance_search(
query=user_prompt,
k=k,
fetch_k=10,
)
return relevant_documents
elif method == "similarity":
relevant_documents = vector_store.similarity_search_with_score(
query=user_prompt,
k=k,
)
# take the relavant documents which is a list of tuples of Document, score and convert to a list of Document
# with a new field in metadata of each document called score
for doc, score in relevant_documents:
doc.metadata["score"] = score
# only keep the documents from the relevant documents tuples, not score
relevant_documents_with_score = [doc for doc, score in relevant_documents]
# return selected_relevant_documents
return relevant_documents_with_score
else:
raise ValueError("method must be mmr or similarity")
def convert_documents_to_chat_context(relevant_documents):
"""
Convert a list of relevant documents to a chat context string.
Parameters
----------
relevant_documents : list of Document objects
The list of relevant documents to convert.
Returns
-------
str
The chat context string created from the relevant documents.
"""
# combine the page content from the relevant documents into a single string
context_str = ""
for i, doc in enumerate(relevant_documents):
context_str += f"{doc.page_content}\n"
return context_str
def convert_chat_history_to_string(
chat_history: dict, include_num_messages: int = 1, questions_only = False
) -> str:
"""
Convert a chat history dictionary to a string.
Parameters
----------
chat_history : dict
A dictionary containing the chat history.
Returns
-------
str
A string representation of the chat history.
Notes
-----
The chat history dictionary should have the following format:
{
"chat_history": [
{
"role": "user" or "assistant",
"content": "message content"
},
...
]
}
The returned string will have the following format:
"user: message content\nassistant: message content\n..."
"""
if questions_only is False:
start_index = -(2 * include_num_messages) - 1
chat_history_str = ""
for line in chat_history["chat_history"][start_index:-1]:
chat_history_str += f"{line['role']}: {line['content'].strip()}\n"
logger.debug(f"Chat history: {chat_history_str}")
elif questions_only is True:
start_index = -(2 * include_num_messages) - 1
chat_history_str = ""
for line in chat_history["chat_history"][start_index:-1]:
if line['role'] == 'user':
chat_history_str += f"{line['role']}: {line['content'].strip()}\n"
logger.debug(f"Chat history: {chat_history_str}")
return chat_history_str
| [] |
2024-01-10 | tonnitommi/example-prompt-template-assets | tasks.py | from robocorp.tasks import task
from robocorp import vault, storage, excel
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import SystemMessage, HumanMessagePromptTemplate
import json
@task
def compare_addresses():
"""Read address pairs from an excel file, and compare their similiarity using OpenAI.
The prompt template and OpenAI API credentials are stored in Robocorp Control Room.
Inspired by the blog post by Benjamin Stein: https://www.haihai.ai/programming-with-llm/"""
# Get addresses from an Excel (example comes with the repo)
addresses = excel.open_workbook("addresses.xlsx").worksheet("Sheet1").as_list(header=True)
# Set up LLM using credentials from Robocorp Vault - edit to match your own entries
openai_credentials = vault.get_secret("OpenAI")
llm = ChatOpenAI(openai_api_key=openai_credentials["api-key"])
# Create the prompt template using Robocorp Asset Storage, easy to edit the prompt in the cloud
# without deploying code changes. Edit the name of the Asset to match your own.
template = ChatPromptTemplate.from_messages(
[
SystemMessage(content=("You are a helpful assistant that compares addresses for the user.")),
HumanMessagePromptTemplate.from_template(storage.get_text("example_prompt_template")),
]
)
for row in addresses:
print(f"\nComparing addresses: {row['First address']} to {row['Second address']}.")
# Run LLM chat completion by feeding in the addresses to the template
response = llm(template.format_messages(address_one=row["First address"], address_two=row["Second address"]))
response_json = json.loads(response.content)
print(f"RESULT: {response_json['result']}, because {response_json['reason']}")
| [
"example_prompt_template",
"You are a helpful assistant that compares addresses for the user."
] |
2024-01-10 | Xuplussss/Dialog-System-API | dialog_server_userID.py | import speech_recognition as sr
import os, io,time, random
from pprint import pformat
from opencc import OpenCC
from itertools import chain
from datetime import datetime
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import FileResponse
# from pydub import AudioSegment
from transformers import OpenAIGPTLMHeadModel, GPT2LMHeadModel, BertTokenizer
import torch
import torch.nn.functional as F
from langconv import Converter # 簡繁體轉換
import soundfile
from espnet2.bin.asr_inference import Speech2Text
from pydub import AudioSegment
from pydub.silence import split_on_silence
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
cc = OpenCC('tw2s')
SPECIAL_TOKENS = ["[CLS]", "[SEP]", "[PAD]", "[speaker1]", "[speaker2]"]
device = torch.device("cuda")
random.seed(42)
torch.random.manual_seed(42)
torch.cuda.manual_seed(42)
tokenizer_class = BertTokenizer
# model_class = OpenAIGPTLMHeadModel if not args.gpt2 else GPT2LMHeadModel
model_class = OpenAIGPTLMHeadModel
tokenizer = tokenizer_class.from_pretrained("Dialog_model/0507/", do_lower_case=True)
model = model_class.from_pretrained("Dialog_model/0507/")
model.to(device)
model.eval()
## prepare translator
## prepare dialog
app = FastAPI()
step = 0
speech2text = Speech2Text('config.yaml','40epoch.pth') # ASR 模型
def silence_based_conversion(path, speech2text):
song = AudioSegment.from_wav(path)
# split track where silence is 0.5 seconds
# or more and get chunks
chunks = split_on_silence(song,
# must be silent for at least 0.5 seconds
# or 500 ms. adjust this value based on user
# requirement. if the speaker stays silent for
# longer, increase this value. else, decrease it.
min_silence_len = 500,
# consider it silent if quieter than -16 dBFS
# adjust this per requirement
silence_thresh = -20
)
# create a directory to store the audio chunks.
try:
os.mkdir('audio_chunks')
except(FileExistsError):
pass
# move into the directory to
# store the audio files.
# os.chdir('audio_chunks')
i = 0
text = ''
# process each chunk
for chunk in chunks:
# Create 0.5 seconds silence chunk
chunk_silent = AudioSegment.silent(duration = 10)
# add 0.5 sec silence to beginning and
# end of audio chunk. This is done so that
# it doesn't seem abruptly sliced.
audio_chunk = chunk_silent + chunk + chunk_silent
# export audio chunk and save it in
# the current directory.
# print("saving chunk{0}.wav".format(i))
# specify the bitrate to be 192 k
audio_chunk.export("./audio_chunks/chunk{0}.wav".format(i), bitrate ='192k', format ="wav")
y, sr = soundfile.read("audio_chunks/chunk{0}.wav".format(i))
text = text + speech2text(y)[0][0] + ','
# os.remove("audio_chunks/chunk{0}.wav".format(i))
i += 1
text = text.strip(',') + '。'
# os.chdir('..')
return text
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
assert logits.dim() == 1
top_k = min(top_k, logits.size(-1))
if top_k > 0:
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probabilities > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def build_input_from_segments(history, reply, tokenizer, with_eos=True):
bos, eos, pad, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
sequence = [[bos]] + history + [reply + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if i % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance = {}
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence[1:])
for _ in s]
return instance, sequence
def sample_sequence(history, tokenizer, model, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(70):
instance, sequence = build_input_from_segments(history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], dtype=torch.long, device='cuda').unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], dtype=torch.long, device='cuda').unsqueeze(0)
logits, *_ = model(input_ids, token_type_ids=token_type_ids)
logits = logits[0, -1, :] / 0.7
logits = top_filtering(logits, top_k=0, top_p=0.9)
probs = F.softmax(logits, dim=-1)
# prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
prev = torch.topk(probs, 1)[1]
if i < 1 and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/dialog_text/")
def read_item(user_id:str,item_id: str, q: str = None):
history =[]
raw_text = " ".join(list(cc.convert(item_id).replace(" ", "")))
history.append(tokenize(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model)
history.append(out_ids)
history = history[-(2 * 5 + 1):]
out_text = Converter('zh-hant').convert(tokenizer.decode(out_ids, skip_special_tokens=True).replace(' ','')).replace('幺','麼')
print(item_id)
print(user_id)
print(out_text)
print(datetime.now())
with open('record/dialogue_record.txt', 'a') as record_file:
record_file.write(item_id+'_eos_'+out_text+'\n')
return {"Sys":out_text}
@app.get("/get_wav/")
def main():
file_path = "zhtts_wav.wav"
return FileResponse(path=file_path, filename=file_path, media_type='text/wav')
# client:
# import requests
# r = requests.get('https://localhost:8087/get_wav/')
# open('test.wav','wb').write(r.content)
@app.post("/upload_wav_file/")
async def upload(file: UploadFile = File()):
fn = file.filename
save_path = 'up/'
if not os.path.exists(save_path):
os.mkdir(save_path)
save_file = os.path.join(save_path, fn)
f = open(save_file, 'wb')
data = await file.read()
f.write(data)
f.close()
r = sr.Recognizer()
data_asr = sr.AudioFile(save_file)
with data_asr as source:
audio = r.record(source)
print(audio)
text = r.recognize_google(audio,language = 'zh-tw')
return {'msg': f'{fn}上傳成功'}
# client:
#
# import requests
# files1 = {'file': open('output1.wav', 'rb')}
# r = requests.post('https://localhost:8087/upload_wav_file/', files=files1)
# print(r.text)
@app.post("/dialog_audio/{user_id}/")
async def upload(user_id:str, file: UploadFile = File()):
fn = file.filename
# 儲存路徑
save_path = 'up/'
if not os.path.exists(save_path):
os.mkdir(save_path)
save_file = os.path.join(save_path, fn)
f = open(save_file, 'wb')
data = await file.read()
f.write(data)
f.close()
start_time = time.time()
if save_file.endswith('m4a'):
target_file = save_file.replace('m4a','wav')
command = 'ffmpeg -i '+save_file+' -ac 1 -ar 16000 '+target_file
os.system(command)
os.remove(save_file)
save_file = target_file
elif save_file.endswith('mp3'):
target_file = save_file.replace('mp3','wav')
command = 'ffmpeg -i '+save_file+' '+target_file
os.system(command)
os.remove(save_file)
save_file = target_file
text = silence_based_conversion(save_file, speech2text)
end_time = time.time()
history =[]
raw_text = " ".join(list(cc.convert(text).replace(" ", "")))
history.append(tokenize(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model)
history.append(out_ids)
history = history[-(2 * 5 + 1):]
out_text = Converter('zh-hant').convert(tokenizer.decode(out_ids, skip_special_tokens=True).replace(' ','')).replace('幺','麼')
print(user_id)
print(text)
print(out_text)
print(end_time-start_time)
print(datetime.now())
with open('record/dialogue_record.txt', 'a') as record_file:
record_file.write(text+'_eos_'+out_text+'_wav_'+save_file+'\n')
# os.remove(save_file)
return {"User":text, "Sys":out_text}
| [] |
2024-01-10 | vikramsubramanian/code-search-benchmarking | replicating_openai~get_embeddings.py |
import json
from openai import AzureOpenAI
import time
import numpy as np
class OpenAIEmbeddings:
def __init__(self):
"""
https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
"""
self._client = AzureOpenAI(
api_version=OPEN_AI_API_VERSION,
azure_endpoint=OPEN_AI_BASE,
api_key=OPENAI_API_KEY
)
def get_embedding(self, chunk: str):
return (
self._client.embeddings.create(
input=chunk, model=EMBEDDING_MODEL
)
.data[0]
.embedding
)
def get_embeddings_save_to_file(self, code, doc, output_file):
embeddings = []
for i, (code_chunk, doc_chunk) in enumerate(zip(code, doc)):
print(f"Processing chunk {i}")
try:
code_embedding = self.get_embedding(code_chunk)
doc_embedding = self.get_embedding(doc_chunk)
except Exception as e:
print(e)
time.sleep(60)
continue
embeddings.append(
{
"code": code_chunk,
"docstring": doc_chunk,
"code_embedding": code_embedding,
"doc_embedding": doc_embedding,
}
)
with open(output_file, "w") as f:
json.dump(embeddings, f)
def load_embeddings_from_file(path_to_embeddings):
with open(path_to_embeddings, "r") as f:
return json.load(f)
def compute_cosine_similarity(vec1, vec2):
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
def compute_mrr(embeddings):
ranks = []
for item in embeddings:
similarities = [
np.dot(item["doc_embedding"], other["code_embedding"])
/ (
np.linalg.norm(item["doc_embedding"])
* np.linalg.norm(other["code_embedding"])
)
for other in embeddings
]
sorted_similarities = sorted(similarities, reverse=True)
correct_rank = (
sorted_similarities.index(
np.dot(item["doc_embedding"], item["code_embedding"])
/ (
np.linalg.norm(item["doc_embedding"])
* np.linalg.norm(item["code_embedding"])
)
)
+ 1
)
ranks.append(correct_rank)
MRR = np.mean([1 / rank for rank in ranks])
return MRR
if __name__ == "__main__":
# embeddings = OpenAIEmbeddings()
# path_to_codesearchnet = "/Users/vikram/Downloads/python/python/final/jsonl/valid/python_valid_0.jsonl"
# with open(path_to_codesearchnet, "r") as f:
# lines = f.readlines()
# code_chunks = [json.loads(line)["code"] for line in lines]
# doc_chunks = [json.loads(line)["docstring"] for line in lines]
# embeddings.get_embeddings_save_to_file(code_chunks, doc_chunks, "python_embs.json")
print(compute_mrr(load_embeddings_from_file("python_embs.json")))
| [] |
2024-01-10 | AIAnytime/azure-search-openai-demo | scripts~prepdocs.py | import argparse
import base64
import glob
import html
import io
import os
import re
import time
import openai
import tiktoken
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
from azure.identity import AzureDeveloperCliCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
HnswParameters,
PrioritizedFields,
SearchableField,
SearchField,
SearchFieldDataType,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticSettings,
SimpleField,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
from azure.storage.blob import BlobServiceClient
from pypdf import PdfReader, PdfWriter
from tenacity import retry, stop_after_attempt, wait_random_exponential
MAX_SECTION_LENGTH = 1000
SENTENCE_SEARCH_LIMIT = 100
SECTION_OVERLAP = 100
open_ai_token_cache = {}
CACHE_KEY_TOKEN_CRED = 'openai_token_cred'
CACHE_KEY_CREATED_TIME = 'created_time'
CACHE_KEY_TOKEN_TYPE = 'token_type'
#Embedding batch support section
SUPPORTED_BATCH_AOAI_MODEL = {
'text-embedding-ada-002': {
'token_limit' : 8100,
'max_batch_size' : 16
}
}
def calculate_tokens_emb_aoai(input: str):
encoding = tiktoken.encoding_for_model(args.openaimodelname)
return len(encoding.encode(input))
def blob_name_from_file_page(filename, page = 0):
if os.path.splitext(filename)[1].lower() == ".pdf":
return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf"
else:
return os.path.basename(filename)
def upload_blobs(filename):
blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds)
blob_container = blob_service.get_container_client(args.container)
if not blob_container.exists():
blob_container.create_container()
# if file is PDF split into pages and upload each page as a separate blob
if os.path.splitext(filename)[1].lower() == ".pdf":
reader = PdfReader(filename)
pages = reader.pages
for i in range(len(pages)):
blob_name = blob_name_from_file_page(filename, i)
if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}")
f = io.BytesIO()
writer = PdfWriter()
writer.add_page(pages[i])
writer.write(f)
f.seek(0)
blob_container.upload_blob(blob_name, f, overwrite=True)
else:
blob_name = blob_name_from_file_page(filename)
with open(filename,"rb") as data:
blob_container.upload_blob(blob_name, data, overwrite=True)
def remove_blobs(filename):
if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'")
blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net", credential=storage_creds)
blob_container = blob_service.get_container_client(args.container)
if blob_container.exists():
if filename is None:
blobs = blob_container.list_blob_names()
else:
prefix = os.path.splitext(os.path.basename(filename))[0]
blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(name_starts_with=os.path.splitext(os.path.basename(prefix))[0]))
for b in blobs:
if args.verbose: print(f"\tRemoving blob {b}")
blob_container.delete_blob(b)
def table_to_html(table):
table_html = "<table>"
rows = [sorted([cell for cell in table.cells if cell.row_index == i], key=lambda cell: cell.column_index) for i in range(table.row_count)]
for row_cells in rows:
table_html += "<tr>"
for cell in row_cells:
tag = "th" if (cell.kind == "columnHeader" or cell.kind == "rowHeader") else "td"
cell_spans = ""
if cell.column_span > 1: cell_spans += f" colSpan={cell.column_span}"
if cell.row_span > 1: cell_spans += f" rowSpan={cell.row_span}"
table_html += f"<{tag}{cell_spans}>{html.escape(cell.content)}</{tag}>"
table_html +="</tr>"
table_html += "</table>"
return table_html
def get_document_text(filename):
offset = 0
page_map = []
if args.localpdfparser:
reader = PdfReader(filename)
pages = reader.pages
for page_num, p in enumerate(pages):
page_text = p.extract_text()
page_map.append((page_num, offset, page_text))
offset += len(page_text)
else:
if args.verbose: print(f"Extracting text from '{filename}' using Azure Form Recognizer")
form_recognizer_client = DocumentAnalysisClient(endpoint=f"https://{args.formrecognizerservice}.cognitiveservices.azure.com/", credential=formrecognizer_creds, headers={"x-ms-useragent": "azure-search-chat-demo/1.0.0"})
with open(filename, "rb") as f:
poller = form_recognizer_client.begin_analyze_document("prebuilt-layout", document = f)
form_recognizer_results = poller.result()
for page_num, page in enumerate(form_recognizer_results.pages):
tables_on_page = [table for table in form_recognizer_results.tables if table.bounding_regions[0].page_number == page_num + 1]
# mark all positions of the table spans in the page
page_offset = page.spans[0].offset
page_length = page.spans[0].length
table_chars = [-1]*page_length
for table_id, table in enumerate(tables_on_page):
for span in table.spans:
# replace all table spans with "table_id" in table_chars array
for i in range(span.length):
idx = span.offset - page_offset + i
if idx >=0 and idx < page_length:
table_chars[idx] = table_id
# build page text by replacing characters in table spans with table html
page_text = ""
added_tables = set()
for idx, table_id in enumerate(table_chars):
if table_id == -1:
page_text += form_recognizer_results.content[page_offset + idx]
elif table_id not in added_tables:
page_text += table_to_html(tables_on_page[table_id])
added_tables.add(table_id)
page_text += " "
page_map.append((page_num, offset, page_text))
offset += len(page_text)
return page_map
def split_text(page_map, filename):
SENTENCE_ENDINGS = [".", "!", "?"]
WORDS_BREAKS = [",", ";", ":", " ", "(", ")", "[", "]", "{", "}", "\t", "\n"]
if args.verbose: print(f"Splitting '{filename}' into sections")
def find_page(offset):
num_pages = len(page_map)
for i in range(num_pages - 1):
if offset >= page_map[i][1] and offset < page_map[i + 1][1]:
return i
return num_pages - 1
all_text = "".join(p[2] for p in page_map)
length = len(all_text)
start = 0
end = length
while start + SECTION_OVERLAP < length:
last_word = -1
end = start + MAX_SECTION_LENGTH
if end > length:
end = length
else:
# Try to find the end of the sentence
while end < length and (end - start - MAX_SECTION_LENGTH) < SENTENCE_SEARCH_LIMIT and all_text[end] not in SENTENCE_ENDINGS:
if all_text[end] in WORDS_BREAKS:
last_word = end
end += 1
if end < length and all_text[end] not in SENTENCE_ENDINGS and last_word > 0:
end = last_word # Fall back to at least keeping a whole word
if end < length:
end += 1
# Try to find the start of the sentence or at least a whole word boundary
last_word = -1
while start > 0 and start > end - MAX_SECTION_LENGTH - 2 * SENTENCE_SEARCH_LIMIT and all_text[start] not in SENTENCE_ENDINGS:
if all_text[start] in WORDS_BREAKS:
last_word = start
start -= 1
if all_text[start] not in SENTENCE_ENDINGS and last_word > 0:
start = last_word
if start > 0:
start += 1
section_text = all_text[start:end]
yield (section_text, find_page(start))
last_table_start = section_text.rfind("<table")
if (last_table_start > 2 * SENTENCE_SEARCH_LIMIT and last_table_start > section_text.rfind("</table")):
# If the section ends with an unclosed table, we need to start the next section with the table.
# If table starts inside SENTENCE_SEARCH_LIMIT, we ignore it, as that will cause an infinite loop for tables longer than MAX_SECTION_LENGTH
# If last table starts inside SECTION_OVERLAP, keep overlapping
if args.verbose: print(f"Section ends with unclosed table, starting next section with the table at page {find_page(start)} offset {start} table start {last_table_start}")
start = min(end - SECTION_OVERLAP, start + last_table_start)
else:
start = end - SECTION_OVERLAP
if start + SECTION_OVERLAP < end:
yield (all_text[start:end], find_page(start))
def filename_to_id(filename):
filename_ascii = re.sub("[^0-9a-zA-Z_-]", "_", filename)
filename_hash = base64.b16encode(filename.encode('utf-8')).decode('ascii')
return f"file-{filename_ascii}-{filename_hash}"
def create_sections(filename, page_map, use_vectors):
file_id = filename_to_id(filename)
for i, (content, pagenum) in enumerate(split_text(page_map, filename)):
section = {
"id": f"{file_id}-page-{i}",
"content": content,
"category": args.category,
"sourcepage": blob_name_from_file_page(filename, pagenum),
"sourcefile": filename
}
if use_vectors:
section["embedding"] = compute_embedding(content)
yield section
def before_retry_sleep(retry_state):
if args.verbose: print("Rate limited on the OpenAI embeddings API, sleeping before retrying...")
@retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep)
def compute_embedding(text):
refresh_openai_token()
return openai.Embedding.create(engine=args.openaideployment, input=text)["data"][0]["embedding"]
@retry(wait=wait_random_exponential(min=15, max=60), stop=stop_after_attempt(15), before_sleep=before_retry_sleep)
def compute_embedding_in_batch(texts):
refresh_openai_token()
emb_response = openai.Embedding.create(engine=args.openaideployment, input=texts)
return [data.embedding for data in emb_response.data]
def create_search_index():
if args.verbose: print(f"Ensuring search index {args.index} exists")
index_client = SearchIndexClient(endpoint=f"https://{args.searchservice}.search.windows.net/",
credential=search_creds)
if args.index not in index_client.list_index_names():
index = SearchIndex(
name=args.index,
fields=[
SimpleField(name="id", type="Edm.String", key=True),
SearchableField(name="content", type="Edm.String", analyzer_name="en.microsoft"),
SearchField(name="embedding", type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
hidden=False, searchable=True, filterable=False, sortable=False, facetable=False,
vector_search_dimensions=1536, vector_search_configuration="default"),
SimpleField(name="category", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True)
],
semantic_settings=SemanticSettings(
configurations=[SemanticConfiguration(
name='default',
prioritized_fields=PrioritizedFields(
title_field=None, prioritized_content_fields=[SemanticField(field_name='content')]))]),
vector_search=VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default",
kind="hnsw",
hnsw_parameters=HnswParameters(metric="cosine")
)
]
)
)
if args.verbose: print(f"Creating {args.index} search index")
index_client.create_index(index)
else:
if args.verbose: print(f"Search index {args.index} already exists")
def update_embeddings_in_batch(sections):
batch_queue = []
copy_s = []
batch_response = {}
token_count = 0
for s in sections:
token_count += calculate_tokens_emb_aoai(s["content"])
if token_count <= SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]['token_limit'] and len(batch_queue) < SUPPORTED_BATCH_AOAI_MODEL[args.openaimodelname]['max_batch_size']:
batch_queue.append(s)
copy_s.append(s)
else:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
batch_queue = []
batch_queue.append(s)
token_count = calculate_tokens_emb_aoai(s["content"])
if batch_queue:
emb_responses = compute_embedding_in_batch([item["content"] for item in batch_queue])
if args.verbose: print(f"Batch Completed. Batch size {len(batch_queue)} Token count {token_count}")
for emb, item in zip(emb_responses, batch_queue):
batch_response[item["id"]] = emb
for s in copy_s:
s["embedding"] = batch_response[s["id"]]
yield s
def index_sections(filename, sections):
if args.verbose: print(f"Indexing sections from '{filename}' into search index '{args.index}'")
search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/",
index_name=args.index,
credential=search_creds)
i = 0
batch = []
for s in sections:
batch.append(s)
i += 1
if i % 1000 == 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose: print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
batch = []
if len(batch) > 0:
results = search_client.upload_documents(documents=batch)
succeeded = sum([1 for r in results if r.succeeded])
if args.verbose: print(f"\tIndexed {len(results)} sections, {succeeded} succeeded")
def remove_from_index(filename):
if args.verbose: print(f"Removing sections from '{filename or '<all>'}' from search index '{args.index}'")
search_client = SearchClient(endpoint=f"https://{args.searchservice}.search.windows.net/",
index_name=args.index,
credential=search_creds)
while True:
filter = None if filename is None else f"sourcefile eq '{os.path.basename(filename)}'"
r = search_client.search("", filter=filter, top=1000, include_total_count=True)
if r.get_count() == 0:
break
r = search_client.delete_documents(documents=[{ "id": d["id"] } for d in r])
if args.verbose: print(f"\tRemoved {len(r)} sections from index")
# It can take a few seconds for search results to reflect changes, so wait a bit
time.sleep(2)
# refresh open ai token every 5 minutes
def refresh_openai_token():
if open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] == 'azure_ad' and open_ai_token_cache[CACHE_KEY_CREATED_TIME] + 300 < time.time():
token_cred = open_ai_token_cache[CACHE_KEY_TOKEN_CRED]
openai.api_key = token_cred.get_token("https://cognitiveservices.azure.com/.default").token
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
def read_files(path_pattern: str, use_vectors: bool, vectors_batch_support: bool):
"""
Recursively read directory structure under `path_pattern`
and execute indexing for the individual files
"""
for filename in glob.glob(path_pattern):
if args.verbose: print(f"Processing '{filename}'")
if args.remove:
remove_blobs(filename)
remove_from_index(filename)
else:
if os.path.isdir(filename):
read_files(filename + "/*", use_vectors, vectors_batch_support)
continue
try:
if not args.skipblobs:
upload_blobs(filename)
page_map = get_document_text(filename)
sections = create_sections(os.path.basename(filename), page_map, use_vectors and not vectors_batch_support)
print (use_vectors and vectors_batch_support)
if use_vectors and vectors_batch_support:
sections = update_embeddings_in_batch(sections)
index_sections(os.path.basename(filename), sections)
except Exception as e:
print(f"\tGot an error while reading {filename} -> {e} --> skipping file")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.",
epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v"
)
parser.add_argument("files", help="Files to be processed")
parser.add_argument("--category", help="Value for the category field in the search index for all sections indexed in this run")
parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage")
parser.add_argument("--storageaccount", help="Azure Blob Storage account name")
parser.add_argument("--container", help="Azure Blob Storage container name")
parser.add_argument("--storagekey", required=False, help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--tenantid", required=False, help="Optional. Use this to define the Azure directory where to authenticate)")
parser.add_argument("--searchservice", help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)")
parser.add_argument("--index", help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)")
parser.add_argument("--searchkey", required=False, help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--openaiservice", help="Name of the Azure OpenAI service used to compute embeddings")
parser.add_argument("--openaideployment", help="Name of the Azure OpenAI model deployment for an embedding model ('text-embedding-ada-002' recommended)")
parser.add_argument("--openaimodelname", help="Name of the Azure OpenAI embedding model ('text-embedding-ada-002' recommended)")
parser.add_argument("--novectors", action="store_true", help="Don't compute embeddings for the sections (e.g. don't call the OpenAI embeddings API during indexing)")
parser.add_argument("--disablebatchvectors", action="store_true", help="Don't compute embeddings in batch for the sections")
parser.add_argument("--openaikey", required=False, help="Optional. Use this Azure OpenAI account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--remove", action="store_true", help="Remove references to this document from blob storage and the search index")
parser.add_argument("--removeall", action="store_true", help="Remove all blobs from blob storage and documents from the search index")
parser.add_argument("--localpdfparser", action="store_true", help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents")
parser.add_argument("--formrecognizerservice", required=False, help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)")
parser.add_argument("--formrecognizerkey", required=False, help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
# Use the current user identity to connect to Azure services unless a key is explicitly set for any of them
azd_credential = AzureDeveloperCliCredential() if args.tenantid is None else AzureDeveloperCliCredential(tenant_id=args.tenantid, process_timeout=60)
default_creds = azd_credential if args.searchkey is None or args.storagekey is None else None
search_creds = default_creds if args.searchkey is None else AzureKeyCredential(args.searchkey)
use_vectors = not args.novectors
compute_vectors_in_batch = not args.disablebatchvectors and args.openaimodelname in SUPPORTED_BATCH_AOAI_MODEL
if not args.skipblobs:
storage_creds = default_creds if args.storagekey is None else args.storagekey
if not args.localpdfparser:
# check if Azure Form Recognizer credentials are provided
if args.formrecognizerservice is None:
print("Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.")
exit(1)
formrecognizer_creds = default_creds if args.formrecognizerkey is None else AzureKeyCredential(args.formrecognizerkey)
if use_vectors:
if args.openaikey is None:
openai.api_key = azd_credential.get_token("https://cognitiveservices.azure.com/.default").token
openai.api_type = "azure_ad"
open_ai_token_cache[CACHE_KEY_CREATED_TIME] = time.time()
open_ai_token_cache[CACHE_KEY_TOKEN_CRED] = azd_credential
open_ai_token_cache[CACHE_KEY_TOKEN_TYPE] = "azure_ad"
else:
openai.api_type = "azure"
openai.api_key = args.openaikey
openai.api_base = f"https://{args.openaiservice}.openai.azure.com"
openai.api_version = "2022-12-01"
if args.removeall:
remove_blobs(None)
remove_from_index(None)
else:
if not args.remove:
create_search_index()
print("Processing files...")
read_files(args.files, use_vectors, compute_vectors_in_batch)
| [] |
2024-01-10 | AIAnytime/azure-search-openai-demo | app~backend~lookuptool.py | import csv
from pathlib import Path
from typing import Union
from langchain.agents import Tool
from langchain.callbacks.manager import Callbacks
class CsvLookupTool(Tool):
data: dict[str, str] = {}
def __init__(self, filename: Union[str, Path], key_field: str, name: str = "lookup",
description: str = "useful to look up details given an input key as opposite to searching data with an unstructured question",
callbacks: Callbacks = None):
super().__init__(name, self.lookup, description, callbacks=callbacks)
with open(filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.data[row[key_field]] = "\n".join([f"{i}:{row[i]}" for i in row])
def lookup(self, key: str) -> str:
return self.data.get(key, "")
| [] |
2024-01-10 | AIAnytime/azure-search-openai-demo | app~backend~approaches~readdecomposeask.py | import re
from typing import Any, Optional, Sequence
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from langchain.agents import AgentExecutor, Tool
from langchain.agents.react.base import ReActDocstoreAgent
from langchain.callbacks.manager import CallbackManager
from langchain.llms.openai import AzureOpenAI
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.tools.base import BaseTool
from approaches.approach import AskApproach
from langchainadapters import HtmlCallbackHandler
from text import nonewlines
class ReadDecomposeAsk(AskApproach):
def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.embedding_deployment = embedding_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
async def search(self, query_text: str, overrides: dict[str, Any]) -> tuple[list[str], str]:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = ""
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
else:
r = await self.search_client.search(query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) async for doc in r]
else:
results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) async for doc in r]
return results, "\n".join(results)
async def lookup(self, q: str) -> Optional[str]:
r = await self.search_client.search(q,
top = 1,
include_total_count=True,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
query_answer="extractive|count-1",
query_caption="extractive|highlight-false")
answers = await r.get_answers()
if answers and len(answers) > 0:
return answers[0].text
if await r.get_count() > 0:
return "\n".join([d['content'] async for d in r])
return None
async def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]:
search_results = None
async def search_and_store(q: str) -> Any:
nonlocal search_results
search_results, content = await self.search(q, overrides)
return content
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key)
tools = [
Tool(name="Search", func=lambda _: 'Not implemented', coroutine=search_and_store, description="useful for when you need to ask with search", callbacks=cb_manager),
Tool(name="Lookup", func=lambda _: 'Not implemented', coroutine=self.lookup, description="useful for when you need to ask with lookup", callbacks=cb_manager)
]
prompt_prefix = overrides.get("prompt_template")
prompt = PromptTemplate.from_examples(
EXAMPLES, SUFFIX, ["input", "agent_scratchpad"], prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX)
class ReAct(ReActDocstoreAgent):
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
return prompt
agent = ReAct.from_llm_and_tools(llm, tools)
chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager)
result = await chain.arun(q)
# Replace substrings of the form <file.ext> with [file.ext] so that the frontend can render them as links, match them with a regex to avoid
# generalizing too much and disrupt HTML snippets if present
result = re.sub(r"<([a-zA-Z0-9_ \-\.]+)>", r"[\1]", result)
return {"data_points": search_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
# Modified version of langchain's ReAct prompt that includes instructions and examples for how to cite information sources
EXAMPLES = [
"""Question: What is the elevation range for the area that the eastern sector of the
Colorado orogeny extends into?
Thought: I need to search Colorado orogeny, find the area that the eastern sector
of the Colorado orogeny extends into, then find the elevation range of the
area.
Action: Search[Colorado orogeny]
Observation: <info1.pdf> The Colorado orogeny was an episode of mountain building (an orogeny) in
Colorado and surrounding areas.
Thought: It does not mention the eastern sector. So I need to look up eastern
sector.
Action: Lookup[eastern sector]
Observation: <info2.txt> (Result 1 / 1) The eastern sector extends into the High Plains and is called
the Central Plains orogeny.
Thought: The eastern sector of Colorado orogeny extends into the High Plains. So I
need to search High Plains and find its elevation range.
Action: Search[High Plains]
Observation: <some_file.pdf> High Plains refers to one of two distinct land regions
Thought: I need to instead search High Plains (United States).
Action: Search[High Plains (United States)]
Observation: <filea.pdf> The High Plains are a subregion of the Great Plains. <another-ref.docx> From east to west, the
High Plains rise in elevation from around 1,800 to 7,000 ft (550 to 2,130
m).
Thought: High Plains rise in elevation from around 1,800 to 7,000 ft, so the answer
is 1,800 to 7,000 ft.
Action: Finish[1,800 to 7,000 ft <filea.pdf>]""",
"""Question: Musician and satirist Allie Goertz wrote a song about the "The Simpsons"
character Milhouse, who Matt Groening named after who?
Thought: The question simplifies to "The Simpsons" character Milhouse is named after
who. I only need to search Milhouse and find who it is named after.
Action: Search[Milhouse]
Observation: <info7.pdf> Milhouse Mussolini Van Houten is a recurring character in the Fox animated
television series The Simpsons voiced by Pamela Hayden and created by Matt
Groening.
Thought: The paragraph does not tell who Milhouse is named after, maybe I can look up
"named after".
Action: Lookup[named after]
Observation: <historyref2.txt> (Result 1 / 1) Milhouse was named after U.S. president Richard Nixon, whose
middle name was Milhous.
Thought: Milhouse was named after U.S. president Richard Nixon, so the answer is
Richard Nixon.
Action: Finish[Richard Nixon <historyref2.txt>]""",
"""Question: Which documentary is about Finnish rock groups, Adam Clayton Powell or The
Saimaa Gesture?
Thought: I need to search Adam Clayton Powell and The Saimaa Gesture, and find which
documentary is about Finnish rock groups.
Action: Search[Adam Clayton Powell]
Observation: <info9.pdf> Could not find [Adam Clayton Powell]. Similar: ['Adam Clayton Powell
III', 'Seventh Avenue (Manhattan)', 'Adam Clayton Powell Jr. State Office
Building', 'Isabel Washington Powell', 'Adam Powell', vAdam Clayton Powell
(film)', 'Giancarlo Esposito'].
Thought: To find the documentary, I can search Adam Clayton Powell (film).
Action: Search[Adam Clayton Powell (film)]
Observation: <data123.txt> Adam Clayton Powell is a 1989 American documentary film directed by
Richard Kilberg. The film is about the rise and fall of influential
African-American politician Adam Clayton Powell Jr.[3][4] It was later aired
as part of the PBS series The American Experience.
Thought: Adam Clayton Powell (film) is a documentary about an African-American
politician, not Finnish rock groups. So the documentary about Finnish rock
groups must instead be The Saimaa Gesture.
Action: Finish[The Saimaa Gesture <data123.txt>]""",
"""Question: What profession does Nicholas Ray and Elia Kazan have in common?
Thought: I need to search Nicholas Ray and Elia Kazan, find their professions, then
find the profession they have in common.
Action: Search[Nicholas Ray]
Observation: <files-987.png> Nicholas Ray (born Raymond Nicholas Kienzle Jr., August 7, 1911 - June 16,
1979) was an American film director, screenwriter, and actor best known for
the 1955 film Rebel Without a Cause.
Thought: Professions of Nicholas Ray are director, screenwriter, and actor. I need
to search Elia Kazan next and find his professions.
Action: Search[Elia Kazan]
Observation: <files-654.txt> Elia Kazan was an American film and theatre director, producer, screenwriter
and actor.
Thought: Professions of Elia Kazan are director, producer, screenwriter, and actor.
So profession Nicholas Ray and Elia Kazan have in common is director,
screenwriter, and actor.
Action: Finish[director, screenwriter, actor <files-987.png><files-654.txt>]""",
"""Question: Which magazine was started first Arthur's Magazine or First for Women?
Thought: I need to search Arthur's Magazine and First for Women, and find which was
started first.
Action: Search[Arthur's Magazine]
Observation: <magazines-1850.pdf> Arthur's Magazine (1844-1846) was an American literary periodical published
in Philadelphia in the 19th century.
Thought: Arthur's Magazine was started in 1844. I need to search First for Women
next.
Action: Search[First for Women]
Observation: <magazines-1900.pdf> First for Women is a woman's magazine published by Bauer Media Group in the
USA.[1] The magazine was started in 1989.
Thought: First for Women was started in 1989. 1844 (Arthur's Magazine) < 1989 (First
for Women), so Arthur's Magazine was started first.
Action: Finish[Arthur's Magazine <magazines-1850.pdf><magazines-1900.pdf>]""",
"""Question: Were Pavel Urysohn and Leonid Levin known for the same type of work?
Thought: I need to search Pavel Urysohn and Leonid Levin, find their types of work,
then find if they are the same.
Action: Search[Pavel Urysohn]
Observation: <info4444.pdf> Pavel Samuilovich Urysohn (February 3, 1898 - August 17, 1924) was a Soviet
mathematician who is best known for his contributions in dimension theory.
Thought: Pavel Urysohn is a mathematician. I need to search Leonid Levin next and
find its type of work.
Action: Search[Leonid Levin]
Observation: <datapoints_aaa.txt> Leonid Anatolievich Levin is a Soviet-American mathematician and computer
scientist.
Thought: Leonid Levin is a mathematician and computer scientist. So Pavel Urysohn
and Leonid Levin have the same type of work.
Action: Finish[yes <info4444.pdf><datapoints_aaa.txt>]""",
]
SUFFIX = """\nQuestion: {input}
{agent_scratchpad}"""
PREFIX = "Answer questions as shown in the following examples, by splitting the question into individual search or lookup actions to find facts until you can answer the question. " \
"Observations are prefixed by their source name in angled brackets, source names MUST be included with the actions in the answers." \
"All questions must be answered from the results from search or look up actions, only facts resulting from those can be used in an answer. "
"Answer questions as truthfully as possible, and ONLY answer the questions using the information from observations, do not speculate or your own knowledge."
| [
"\nQuestion: {input}\n{agent_scratchpad}",
"['input', 'agent_scratchpad']",
"\n\n",
"agent_scratchpad",
"input",
"prompt_template"
] |
2024-01-10 | AIAnytime/azure-search-openai-demo | app~backend~approaches~readretrieveread.py | from typing import Any
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.callbacks.manager import CallbackManager, Callbacks
from langchain.chains import LLMChain
from langchain.llms.openai import AzureOpenAI
from approaches.approach import AskApproach
from langchainadapters import HtmlCallbackHandler
from lookuptool import CsvLookupTool
from text import nonewlines
class ReadRetrieveReadApproach(AskApproach):
"""
Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information
is present then formulate an answer. Each iteration consists of two parts:
1. use GPT to see if we need more information
2. if more data is needed, use the requested "tool" to retrieve it.
The last call to GPT answers the actual question.
This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain.
[1] E. Karpas, et al. arXiv:2205.00445
"""
template_prefix = \
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " \
"Answer the question using only the data provided in the information sources below. " \
"For tabular information return it as an html table. Do not return markdown format. " \
"Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. " \
"For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" " \
"It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). " \
"If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". " \
"Never quote tool names as sources." \
"If you cannot answer using the sources below, say that you don't know. " \
"\n\nYou can access to the following tools:"
template_suffix = """
Begin!
Question: {input}
Thought: {agent_scratchpad}"""
CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
def __init__(self, search_client: SearchClient, openai_deployment: str, embedding_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.embedding_deployment = embedding_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
async def retrieve(self, query_text: str, overrides: dict[str, Any]) -> Any:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = ""
# Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top = top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
else:
r = await self.search_client.search(query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) async for doc in r]
else:
results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) async for doc in r]
content = "\n".join(results)
return results, content
async def run(self, q: str, overrides: dict[str, Any]) -> dict[str, Any]:
retrieve_results = None
async def retrieve_and_store(q: str) -> Any:
nonlocal retrieve_results
retrieve_results, content = await self.retrieve(q, overrides)
return content
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
acs_tool = Tool(name="CognitiveSearch",
func=lambda _: 'Not implemented',
coroutine=retrieve_and_store,
description=self.CognitiveSearchToolDescription,
callbacks=cb_manager)
employee_tool = EmployeeInfoTool("Employee1", callbacks=cb_manager)
tools = [acs_tool, employee_tool]
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=overrides.get("prompt_template_prefix") or self.template_prefix,
suffix=overrides.get("prompt_template_suffix") or self.template_suffix,
input_variables = ["input", "agent_scratchpad"])
llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.3, openai_api_key=openai.api_key)
chain = LLMChain(llm = llm, prompt = prompt)
agent_exec = AgentExecutor.from_agent_and_tools(
agent = ZeroShotAgent(llm_chain = chain),
tools = tools,
verbose = True,
callback_manager = cb_manager)
result = await agent_exec.arun(q)
# Remove references to tool names that might be confused with a citation
result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "")
return {"data_points": retrieve_results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class EmployeeInfoTool(CsvLookupTool):
employee_name: str = ""
def __init__(self, employee_name: str, callbacks: Callbacks = None):
super().__init__(filename="data/employeeinfo.csv",
key_field="name",
name="Employee",
description="useful for answering questions about the employee, their benefits and other personal information",
callbacks=callbacks)
self.func = lambda _: 'Not implemented'
self.coroutine = self.employee_info
self.employee_name = employee_name
async def employee_info(self, name: str) -> str:
return self.lookup(name)
| [
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. Answer the question using only the data provided in the information sources below. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual data, quote the source name for each piece of data you use in the response. For example, if the question is \"What color is the sky?\" and one of the information sources says \"info123: the sky is blue whenever it's not cloudy\", then answer with \"The sky is blue [info123]\" It's important to strictly follow the format where the name of the source is in square brackets at the end of the sentence, and only up to the prefix before the colon (\":\"). If there are multiple sources, cite each one in their own square brackets. For example, use \"[info343][ref-76]\" and not \"[info343,ref-76]\". Never quote tool names as sources.If you cannot answer using the sources below, say that you don't know. \n\nYou can access to the following tools:",
"\nBegin!\n\nQuestion: {input}\n\nThought: {agent_scratchpad}",
"prompt_template_suffix",
"agent_scratchpad",
"input",
"prompt_template_prefix"
] |
2024-01-10 | AIAnytime/azure-search-openai-demo | app~backend~approaches~chatreadretrieveread.py | from typing import Any, AsyncGenerator
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach:
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook.
Use double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.
Try not to repeat questions that have already been asked.
Only generate questions and do not generate any text before or after the questions, such as 'Next Questions'"""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{'role' : USER, 'content' : 'What are my health plans?' },
{'role' : ASSISTANT, 'content' : 'Show available health plans' },
{'role' : USER, 'content' : 'does my plan cover cardio?' },
{'role' : ASSISTANT, 'content' : 'Health plan cardio coverage' }
]
def __init__(self, search_client: SearchClient, chatgpt_deployment: str, chatgpt_model: str, embedding_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(self, history: list[dict[str, str]], overrides: dict[str, Any], should_stream: bool = False) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
user_q = 'Generate search query for: ' + history[-1]["user"]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
self.query_prompt_template,
self.chatgpt_model,
history,
user_q,
self.query_prompt_few_shots,
self.chatgpt_token_limit - len(user_q)
)
chat_completion = await openai.ChatCompletion.acreate(
deployment_id=self.chatgpt_deployment,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=32,
n=1)
query_text = chat_completion.choices[0].message.content
if query_text.strip() == "0":
query_text = history[-1]["user"] # Use the last user input if we failed to generate a better query
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
query_vector = (await openai.Embedding.acreate(engine=self.embedding_deployment, input=query_text))["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
else:
r = await self.search_client.search(query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) async for doc in r]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_override")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
messages = self.get_messages_from_history(
system_message,
self.chatgpt_model,
history,
history[-1]["user"]+ "\n\nSources:\n" + content, # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
max_tokens=self.chatgpt_token_limit)
msg_to_display = '\n\n'.join([str(message) for message in messages])
extra_info = {"data_points": results, "thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>" + msg_to_display.replace('\n', '<br>')}
chat_coroutine = openai.ChatCompletion.acreate(
deployment_id=self.chatgpt_deployment,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=1024,
n=1,
stream=should_stream)
return (extra_info, chat_coroutine)
async def run_without_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=False)
chat_content = (await chat_coroutine).choices[0].message.content
extra_info["answer"] = chat_content
return extra_info
async def run_with_streaming(self, history: list[dict[str, str]], overrides: dict[str, Any]) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(history, overrides, should_stream=True)
yield extra_info
async for event in await chat_coroutine:
yield event
def get_messages_from_history(self, system_prompt: str, model_id: str, history: list[dict[str, str]], user_conv: str, few_shots = [], max_tokens: int = 4096) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in few_shots:
message_builder.append_message(shot.get('role'), shot.get('content'))
user_content = user_conv
append_index = len(few_shots) + 1
message_builder.append_message(self.USER, user_content, index=append_index)
for h in reversed(history[:-1]):
if bot_msg := h.get("bot"):
message_builder.append_message(self.ASSISTANT, bot_msg, index=append_index)
if user_msg := h.get("user"):
message_builder.append_message(self.USER, user_msg, index=append_index)
if message_builder.token_length > max_tokens:
break
messages = message_builder.messages
return messages
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Next Questions",
"Generate three very brief follow-up questions that the user would likely ask next about their healthcare plan and employee handbook.\nUse double angle brackets to reference the questions, e.g. <<Are there exclusions for prescriptions?>>.\nTry not to repeat questions that have already been asked.\nOnly generate questions and do not generate any text before or after the questions, such as 'Next Questions'",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"suggest_followup_questions",
"prompt_override",
"does my plan cover cardio?"
] |
2024-01-10 | AIAnytime/azure-search-openai-demo | tests~conftest.py | from collections import namedtuple
from unittest import mock
import openai
import pytest
import pytest_asyncio
from azure.search.documents.aio import SearchClient
import app
MockToken = namedtuple("MockToken", ["token", "expires_on"])
class MockAzureCredential:
async def get_token(self, uri):
return MockToken("mock_token", 9999999999)
@pytest.fixture
def mock_openai_embedding(monkeypatch):
async def mock_acreate(*args, **kwargs):
return {"data": [{"embedding": [0.1, 0.2, 0.3]}]}
monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate)
@pytest.fixture
def mock_openai_chatcompletion(monkeypatch):
class AsyncChatCompletionIterator:
def __init__(self, answer):
self.num = 1
self.answer = answer
def __aiter__(self):
return self
async def __anext__(self):
if self.num == 1:
self.num = 0
return openai.util.convert_to_openai_object({"choices": [{"delta": {"content": self.answer}}]})
else:
raise StopAsyncIteration
async def mock_acreate(*args, **kwargs):
messages = kwargs["messages"]
if messages[-1]["content"] == "Generate search query for: What is the capital of France?":
answer = "capital of France"
else:
answer = "The capital of France is Paris."
if "stream" in kwargs and kwargs["stream"] is True:
return AsyncChatCompletionIterator(answer)
else:
return openai.util.convert_to_openai_object({"choices": [{"message": {"content": answer}}]})
monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate)
@pytest.fixture
def mock_acs_search(monkeypatch):
class Caption:
def __init__(self, text):
self.text = text
class AsyncSearchResultsIterator:
def __init__(self):
self.num = 1
def __aiter__(self):
return self
async def __anext__(self):
if self.num == 1:
self.num = 0
return {
"sourcepage": "Benefit_Options-2.pdf",
"sourcefile": "Benefit_Options.pdf",
"content": "There is a whistleblower policy.",
"embeddings": [],
"category": None,
"id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2",
"@search.score": 0.03279569745063782,
"@search.reranker_score": 3.4577205181121826,
"@search.highlights": None,
"@search.captions": [Caption("Caption: A whistleblower policy.")],
}
else:
raise StopAsyncIteration
async def mock_search(*args, **kwargs):
return AsyncSearchResultsIterator()
monkeypatch.setattr(SearchClient, "search", mock_search)
@pytest_asyncio.fixture
async def client(monkeypatch, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search):
monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account")
monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container")
monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index")
monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service")
monkeypatch.setenv("AZURE_OPENAI_SERVICE", "test-openai-service")
monkeypatch.setenv("AZURE_OPENAI_CHATGPT_DEPLOYMENT", "test-chatgpt")
monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo")
monkeypatch.setenv("AZURE_OPENAI_EMB_DEPLOYMENT", "test-ada")
with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
mock_default_azure_credential.return_value = MockAzureCredential()
quart_app = app.create_app()
async with quart_app.test_app() as test_app:
quart_app.config.update({"TESTING": True})
yield test_app.test_client()
| [
"There is a whistleblower policy."
] |
2024-01-10 | jakethekoenig/whimper | plugin~whimper.py | import itertools
import os
import wave
from textwrap import dedent
import pyaudio
import pynvim
import openai
from faster_whisper import WhisperModel
openai.api_key = os.getenv("OPENAI_API_KEY")
AUDIO_FILE = "temp.wav"
CHUNK = 512
RATE = 44100
WHISPER_BATCH_SECS = 2
GPT_BATCH_SECS = 4
NUM_CHUNKS = 12000
audio = pyaudio.PyAudio()
MAX_SECONDS = 360 # TODO: break things up so I send at most 30 s to whisper
gpt_model = "gpt-3.5-turbo"
# TODO: I don't say the language but I should pick it up from context and let the model know in case it's from the beginning of a file.
system_prompt_content = """
You will be given a few lines of CODE and then a TRANSCRIPT of spoken code. Please
answer with just your best guess of the intended code continuation given the
transcript. Please only return the code continuation. The goal is for your response to
be inserted directly below the lines of CODE. Do not annotate or explain your code in
any way.
"""
system_prompt_content = dedent(system_prompt_content).strip()
system_prompt = {"role": "system",
"content": system_prompt_content}
def write_audio(data):
waveFile = wave.open(AUDIO_FILE, 'wb')
waveFile.setnchannels(1)
waveFile.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(data))
waveFile.close()
def segments_to_transcript(segments):
transcript = ""
for segment in segments:
transcript += segment.text + " "
return transcript
@pynvim.plugin
class Whimper:
def __init__(self, nvim):
self.nvim = nvim
def set_code_context(self):
self.start_line = self.nvim.current.buffer.mark('.')[0]
MAX_LINES = 20
start_context_line = max(0, self.start_line - MAX_LINES)
lines = self.nvim.current.buffer[start_context_line:self.start_line]
self.code_context = "\n".join(lines)
def gpt_prompt(self, transcript):
history = [system_prompt]
history += [{"role": "user", "content": "CODE:\n"+self.code_context}]
history += [{"role": "user", "content": "TRANSCRIPT: "+transcript}]
return history
def setup(self):
# TODO: replace 3 with selection menu
# I keep a buffer that's much larger than the chunks I read so I don't lose frames when I call whisper and or GPT.
self.stream = audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, input_device_index=3, frames_per_buffer=CHUNK*50)
# TODO: figure out CUDA
self.whisper_model_size = "tiny"
self.whisper_model = WhisperModel(self.whisper_model_size)
self.initial_paste_value = self.nvim.command_output('set paste?')
self.initial_stl_value = self.nvim.command_output('set stl?')
self.nvim.feedkeys(':set paste\n')
self.nvim.feedkeys(':let &stl="[Recording Audio]"\n')
self.nvim.feedkeys("i")
self.sent_so_far = ""
self.set_code_context() # Don't need this for transcribe but it's cheap
def teardown(self):
self.stream.close()
self.nvim.feedkeys("\x03")
self.nvim.command('set {}'.format(self.initial_paste_value))
self.nvim.command('set {}'.format(self.initial_stl_value))
history = []
def send_response(self, text):
# TODO: detect typing and stop the model
# It's possible to rewrite this with no ifs and it probably doesn't even matter for efficiency.
if self.sent_so_far == text[:len(self.sent_so_far)]:
self.nvim.feedkeys(text[len(self.sent_so_far):])
else:
current_line = self.nvim.current.buffer.mark('.')[0]
if current_line == 1+self.start_line:
self.nvim.feedkeys("\x03cc{}".format(text)) # TODO: multi line changes.
else:
self.nvim.feedkeys("\x03c{}k{}".format(current_line - self.start_line, text))
self.sent_so_far = text
@pynvim.function("Transcribe")
def transcribe(self, args):
self.setup()
data = []
last_whisper_run = 0
for i in range(NUM_CHUNKS):
if i * CHUNK / RATE > MAX_SECONDS:
break
data += [self.stream.read(CHUNK)]
if (i - last_whisper_run) * CHUNK / RATE > WHISPER_BATCH_SECS:
last_whisper_run = i
# TODO: pass data directly to whisper
write_audio(data)
segments, info = self.whisper_model.transcribe(AUDIO_FILE, beam_size=5, language="en")
transcript = segments_to_transcript(segments)
if "stop" in transcript.lower():
break
# TODO: detect typing and stop the model
self.send_response(transcript)
self.teardown()
@pynvim.function("Whimper")
def whimper(self, args):
self.setup()
data = []
last_gpt_run = 0
history = []
for i in range(NUM_CHUNKS):
if i * CHUNK / RATE > MAX_SECONDS:
break
data += [self.stream.read(CHUNK)]
if (i - last_gpt_run) * CHUNK / RATE > GPT_BATCH_SECS:
last_gpt_run = i
# TODO: pass data directly to whisper
write_audio(data)
segments, info = self.whisper_model.transcribe(AUDIO_FILE, beam_size=5, language="en")
transcript = segments_to_transcript(segments)
if "stop" in transcript.lower():
break
# TODO: remember previous GPT response and send it to GPT to minimize needed tokens.
history = self.gpt_prompt(transcript)
response = openai.ChatCompletion.create(
model=gpt_model,
messages=history
# stream=True
)
self.send_response(response["choices"][0]["message"]["content"])
self.teardown()
| [
"{'role': 'system', 'content': PLACEHOLDER}",
"CODE:\n",
"TRANSCRIPT: PLACEHOLDER",
"\nYou will be given a few lines of CODE and then a TRANSCRIPT of spoken code. Please\nanswer with just your best guess of the intended code continuation given the\ntranscript. Please only return the code continuation. The goal is for your response to\nbe inserted directly below the lines of CODE. Do not annotate or explain your code in\nany way.\n"
] |
2024-01-10 | Aman95495/Jarvis1 | my_openai.py | import os
import openai
from config import apikey
openai.api_key = apikey
response = openai.ChatCompletion.create(
engine="davinci-codex",
messages=[
{
"role": "system",
"content": "write a letter to boss for salary increament"
},
{
"role": "user",
"content": ""
},
{
"role": "assistant"
}
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
| [
"write a letter to boss for salary increament"
] |
2024-01-10 | Aman95495/Jarvis1 | source_code2.py | import os
import subprocess
import win32com.client
import speech_recognition as sr
import webbrowser
import openai
import random
import datetime
import wikipedia
from config import apikey
from config import weather
import requests
# This help jarvis to speak
speaker = win32com.client.Dispatch("SAPI.SpVoice")
# Giving Command i.e this function covert speech command to text and return it.
def takeCommand():
r=sr.Recognizer()
with sr.Microphone() as source:
r.pause_threshold = 0.6
r.energy_threshold = 300
audio = r.listen(source)
try:
print("Recognizing....")
query = r.recognize_google(audio, language="en-in")
print(f'User Said: {query}')
return query
except Exception as e:
return "Sorry Sir Some Error Occured."
# calling openai function
def ai(prompt):
openai.api_key = apikey
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
try:
return response.choices[0].text
except Exception as e:
return str(e)
# asking for weather details
def askweather(location):
# Replace with your OpenWeatherMap API key
api_key = weather
# Define the parameters for your request (e.g., city name and units)
params = {
"q": location, # Replace with the desired location (city, country)
"units": "metric", # Use "imperial" for Fahrenheit
"appid": api_key
}
# Define the base API URL for the "weather" endpoint
base_url = "https://api.openweathermap.org/data/2.5/weather"
# Construct the complete URL by combining the base URL and parameters
api_url = f"{base_url}?q={params['q']}&units={params['units']}&appid={params['appid']}"
# Send the GET request
response = requests.get(api_url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
data = response.json()
# Print the weather data
'''
print("City:", data["name"])
print("Temperature:", data["main"]["temp"], "°C")
print("Description:", data["weather"][0]["description"])
'''
speaker.speak(f"Sir, in {data['name']}, today's temperature is {data['main']['temp']} degrees Celsius with {data['weather'][0]['description']} conditions.")
else:
print("Error:", response.status_code)
speaker.speak("Sorry Sir Some Error Occurred!")
# Wish Function
def wishMe():
hour=int(datetime.datetime.now().hour)
if 0 <= hour < 4:
speaker.speak("Good night Sir!")
elif 4 <= hour < 12:
speaker.speak("Good morning Sir!")
elif 12 <= hour < 16:
speaker.speak("Good afternoon Sir!")
else:
speaker.speak("Good evening Sir!")
# Main Function
if __name__ == '__main__':
wishMe()
speaker.speak(" I am Jarvis, How may i help you?")
while True:
print("Listening....")
query = input("command :: ")
print(f"User Said :: {query}")
#### For Opening Different Sites Using Command "open <site name>"
sites = [
["youtube", "https://www.youtube.com"],
["google", "https://www.google.com"],
["facebook", "https://www.facebook.com"],
["twitter", "https://www.twitter.com"],
["leetcode", "https://leetcode.com"],
["hackerearth", "https://www.hackerearth.com"],
["Wikipedia", "https://www.wikipedia.org"],
["Tinkercad", "https://www.tinkercad.com"],
["LinkedIn", "https://www.linkedin.com"],
["AngelList", "https://www.angel.co"],
["Google Scholar", "https://scholar.google.com"],
["Coursera", "https://www.coursera.org"],
["edX", "https://www.edx.org"],
["Khan Academy", "https://www.khanacademy.org"],
["MIT OpenCourseWare", "https://ocw.mit.edu"],
["Harvard Online Courses", "https://online-learning.harvard.edu"],
["Stanford Online", "https://online.stanford.edu"],
["Udacity", "https://www.udacity.com"],
["Codecademy", "https://www.codecademy.com"],
["Duolingo", "https://www.duolingo.com"],
["TED Talks", "https://www.ted.com"],
["National Geographic Kids", "https://kids.nationalgeographic.com"],
["NASA", "https://www.nasa.gov"],
["Smithsonian Institution", "https://www.si.edu"],
["History.com", "https://www.history.com"],
["Discovery Channel", "https://www.discovery.com"],
["Britannica", "https://www.britannica.com"],
["OpenStax", "https://openstax.org"],
["Project Gutenberg", "https://www.gutenberg.org"],
["SparkNotes", "https://www.sparknotes.com"],
["Chemguide", "http://www.chemguide.co.uk"],
["Geology.com", "https://geology.com"],
["Internet Archive", "https://archive.org"],
["National Archives", "https://www.archives.gov"],
["Smithsonian Learning Lab", "https://learninglab.si.edu"]
]
for site in sites:
if f'open {site[0]}'.lower() in query:
speaker.speak(f'Opening {site[0]} sir...')
webbrowser.open(site[1])
### Music
if "open music" in query:
musicPath = "C:\\Users\\DELL\\Music"
songs = os.listdir(musicPath)
print(songs)
if songs:
speaker.speak("Opening Music Sir!")
random_song = random.choice(songs)
song_path = os.path.join(musicPath, random_song)
try:
# Use subprocess to open the music file in the background
subprocess.Popen(['start', '', song_path], shell=True)
except Exception as e:
print(f"Error: {e}")
else:
speaker.speak("Sorry Sir Can't Play Music")
elif 'search google for' in query:
query = query.replace("search google for", "")
speaker.speak(f"Searching Google for {query}")
webbrowser.open(f"https://www.google.com/search?q={query}")
elif 'open notepad' in query:
speaker.speak("Opening Notepad Sir")
subprocess.Popen(["notepad.exe"])
elif 'open file explorer' in query:
speaker.speak("Opening file explorer Sir")
subprocess.Popen(["explorer.exe"])
elif 'open code' in query:
speaker.speak("Opening Visual Studio Code Sir")
codePath = "C:\\Users\\DELL\\Desktop\\Gallery\\Application Setup\\VS code\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open python' in query:
speaker.speak("Opening Python IDLE Sir")
codePath = "C:\\Program Files\\Python311\\Lib\\idlelib\\idle.pyw"
os.startfile(codePath)
elif 'open html' in query:
speaker.speak("Opening notepad++ Sir")
codePath = 'C:\\Users\\DELL\\OneDrive\\Desktop\\Gallery\\Application Setup\\Notepad++\\notepad++'
os.startfile(codePath)
elif 'what time is it' in query:
current_time = datetime.datetime.now().strftime("%I:%M %p")
speaker.speak(f"Sir, The current time is {current_time}")
elif 'wikipedia' in query:
speaker.speak('Searching Wikipedia...')
query = query.replace("wikipedia","")
results = wikipedia.summary(query, sentences=2)
speaker.speak("According to Wikipedia")
print(results)
speaker.speak(results)
elif 'search ai' in query:
query = query.replace("search ai","")
speaker.speak("According to ai :: ")
speaker.speak(ai(prompt=query))
elif 'tell me about' in query:
query = query.replace('tell me about','')
elif 'today weather condition for ' in query:
query = query.replace('today weather condition for ','')
askweather(query)
elif 'jarvis quit' in query:
speaker.speak("If you have any more questions or need further assistance in the future, feel free to reach out. Have a great day!")
break
| [] |
2024-01-10 | harriet-fisher/pong-audio | pong-audio-1.py | """
Based on: https://gist.github.com/xjcl/8ce64008710128f3a076
Modified by PedroLopes and ShanYuanTeng for Intro to HCI class but credit remains with author
HOW TO RUN HOST LOCALLY:
> python3 pong-audio.py host
HOW TO RUN HOST FOR CONNECTION:
> python3 pong-audio.py host --host_ip 127.0.0.1
HOW TO PLAY ON HOST VISUALLY:
Play like a regular pong:
Player 1 controls the left paddle: UP (W) DOWN (S)
Player 2 controls the right paddle: UP (O) DOWN (L)
HOW TO CONNECT TO HOST AS PLAYER 1
> python3 pong-audio.py player --host_ip 127.0.0.1 --host_port 5005 --player_ip 127.0.0.1 --player_port 5007
HOW TO CONNECT TO HOST AS PLAYER 2
> python3 pong-audio.py player --host_ip 127.0.0.1 --host_port 5006 --player_ip 127.0.0.1 --player_port 5008
about IP and ports: 127.0.0.1 means your own computer, change it to play across computer under the same network. port numbers are picked to avoid conflits.
CODE YOUR AUDIO CONTROL FOR PLAYER!
p.s.: this needs 10x10 image in the same directory: "white_square.png".
"""
import time
import math
import random
import pyglet
import sys
from playsound import playsound
import argparse
from pyo import *
from pysinewave import SineWave
import threading
from gtts import gTTS
import sounddevice as sd
import speech_recognition as sr
import aubio
import numpy as num
import pyaudio
import wave
import tempfile
import soundfile as sf
from whisper_mic.whisper_mic import WhisperMic
from typing import Optional
import openai
from pythonosc import osc_server
from pythonosc import dispatcher
from pythonosc import udp_client
mode = ''
debug = False
quit = False
pause = True
sine_pitch = 0
sine_pan = 0
instructions_playing = False
sine_playing = False
score_playing = False
host_ip = "127.0.0.1"
host_port_1 = 5005 # you are player 1 if you talk to this port
host_port_2 = 5006
player_1_ip = "127.0.0.1"
player_2_ip = "127.0.0.1"
player_1_port = 5007
player_2_port = 5008
player_ip = "127.0.0.1"
player_port = 0
host_port = 0
paddle_1 = 225
paddle_2 = 225
paddle_1_direction = 0
paddle_2_direction = 0
volume = 0
sine_wave = None
# store how many powerups each player has
p1_activated = 0
p2_activated = 0
last_power_up = time.time()
power_up_duration = 10
power_up_type = 0
speech_recognition_active = True
latest_voice_command = None
level = 1
game_start = 0
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description='Program description')
parser.add_argument('mode', help='host, player (ip & port required)')
parser.add_argument('--host_ip', type=str, required=False)
parser.add_argument('--host_port', type=int, required=False)
parser.add_argument('--player_ip', type=str, required=False)
parser.add_argument('--player_port', type=int, required=False)
parser.add_argument('--debug', action='store_true', help='show debug info')
args = parser.parse_args()
print("> run as " + args.mode)
mode = args.mode
if (args.host_ip):
host_ip = args.host_ip
if (args.host_port):
host_port = args.host_port
if (args.player_ip):
player_ip = args.player_ip
if (args.player_port):
player_port = args.player_port
if (args.debug):
debug = True
# Host
# -------------------------------------#
# used to send messages to players (game state etc)
client_1 = None
client_2 = None
# functions receiving messages from players (game control etc)
def on_receive_game_level(address, args, l):
global level
level = l
if (client_1 != None):
client_1.send_message("/level", l)
if (client_2 != None):
client_2.send_message("/level", l)
def on_receive_game_start(address, args, g):
global game_start, pause, sine_wave
game_start = g
if game_start == 2:
pause = True
if (client_1 != None):
client_1.send_message("/gen_stop", 0)
client_1.send_message("/instructions", 0)
if (client_2 != None):
client_2.send_message("/gen_stop", 0)
client_2.send_message("/instructions", 0)
if game_start == 0:
pause = True
if (client_1 != None):
client_1.send_message("/gen_stop", 0)
if (client_2 != None):
client_2.send_message("/gen_stop", 0)
if game_start == 1:
pause = False
if (client_1 != None):
client_1.send_message("/gen_sine", 0)
if (client_2 != None):
client_2.send_message("/gen_sine", 0)
def on_receive_paddle_1(address, args, paddle):
global paddle_1
paddle_1 = paddle
if (client_1 != None):
client_1.send_message("/paddle", [paddle_1, 0, 1])
def on_receive_movepaddle_1(address, args, direction):
global paddle_1, paddle_1_direction
paddle_1_direction = direction
def on_receive_movepaddle_2(address, args, direction):
global paddle_2, paddle_2_direction
paddle_2_direction = direction
def on_receive_connection_1(address, args, ip):
global client_1
global player_1_ip
player_1_ip = ip
client_1 = udp_client.SimpleUDPClient(player_1_ip, player_1_port)
print("> player 1 connected: " + ip)
def on_receive_paddle_2(address, args, paddle):
global paddle_2
if (client_2 != None):
client_2.send_message("/paddle", [0, paddle_2, 2])
paddle_2 = paddle
def on_receive_quit(address, args, b):
global quit, microphone_thread, speech_thread
print("Quitting...")
quit = True
server_1.shutdown()
server_2.shutdown()
microphone_thread.join()
speech_thread.join()
def on_receive_connection_2(address, args, ip):
global client_2
global player_2_ip
player_2_ip = ip
client_2 = udp_client.SimpleUDPClient(player_2_ip, player_2_port)
print("> player 2 connected: " + ip)
def on_receive_bigpaddle_1(address, args, b):
global p1_activated
global last_power_up
if (power_up_type == 3):
p1_activated = 1
last_power_up = time.time()
if (client_1 != None):
client_1.send_message("/p1bigpaddle", 0)
if (client_2 != None):
client_2.send_message("/p1bigpaddle", 0)
def on_receive_bigpaddle_2(address, args, b):
global p2_activated
global last_power_up
if (power_up_type == 4):
p2_activated = 1
last_power_up = time.time()
if (client_1 != None):
client_1.send_message("/p2bigpaddle", 0)
if (client_2 != None):
client_2.send_message("/p2bigpaddle", 0)
def on_receive_play_scores(address, *args):
score_text = args[0] + args[1]
play_scores(score_text)
dispatcher_1 = dispatcher.Dispatcher()
dispatcher_1.map("/p1", on_receive_paddle_1, "p")
dispatcher_1.map("/l", on_receive_game_level, "l")
dispatcher_1.map("/g", on_receive_game_start, "g")
dispatcher_1.map("/c", on_receive_connection_1, "c")
dispatcher_1.map("/b", on_receive_bigpaddle_1, "b")
dispatcher_1.map("/mp", on_receive_movepaddle_1, "mp")
dispatcher_1.map("/q", on_receive_quit, "q")
dispatcher_1.map("/s", on_receive_play_scores, "s")
dispatcher_2 = dispatcher.Dispatcher()
dispatcher_2.map("/p2", on_receive_paddle_2, "p")
dispatcher_2.map("/l", on_receive_game_level, "l")
dispatcher_2.map("/g", on_receive_game_start, "g")
dispatcher_2.map("/c", on_receive_connection_2, "c")
dispatcher_2.map("/b", on_receive_bigpaddle_2, "b")
dispatcher_2.map("/mp", on_receive_movepaddle_2, "mp")
dispatcher_2.map("/q", on_receive_quit, "q")
dispatcher_2.map("/s", on_receive_play_scores, "s")
# -------------------------------------#
# Player
# TODO: add audio output here so that you can play the game eyes-free
# -------------------------------------#
#play some fun sounds?
'''s = Server().boot()
s.start()'''
print(sd.query_devices())
#SET DESIRED OUTPUT/INPUT HERE IF YOU'RE TESTING @TAs
sd.default.device = [0, 1]
sine_wave = SineWave(pitch = 440, pitch_per_second=10, channels=2)
def hit():
playsound('hit.wav', False)
def bounce():
playsound('bounce.wav', False)
def paddle_limit():
playsound('limit.wav', False)
def wii_groan():
playsound('wii_groan.mp3', False)
def power_available(power):
if power < 3:
playsound('freeze_ray.wav', False)
else:
playsound('power_available.wav', False)
def level_sound(level):
if level == 1:
playsound('level_1.mp3', False)
elif level == 2:
playsound('level_2.mp3', False)
else:
playsound('level_3.mp3', False)
def instructions():
global sine_wave, instructions_playing
sfx = ['sound_when/sound_when_paddle.mp3','sound_when/sound_when_OOB.mp3',
'sound_when/sound_when_walls.mp3', 'sound_when/sound_when_powerup.mp3',
'sound_when/sound_when_frozen.mp3']
for idx, s in enumerate(sfx):
playsound(s, False)
time.sleep(2)
if idx == 0:
hit()
elif idx == 1:
wii_groan()
elif idx ==2:
bounce()
elif idx == 3:
power_available(3)
elif idx == 3:
power_available(1)
time.sleep(3)
if not instructions_playing:
instructions_playing = True
playsound('instructions.wav', False)
instructions_playing = False
def power_sound(player):
if player == 1:
playsound('p1_power.mp3', False)
elif player == 2:
playsound('p2_power.mp3', False)
def sine_wave_gen():
global paddle_1, paddle_2, game_start, sine_wave, quit, sine_playing
sine_wave = SineWave(pitch = sine_pitch, pitch_per_second=10, channels=2)
if not sine_playing:
sine_playing = True
sine_wave.play()
def sine_wave_stop():
global paddle_1, paddle_2, game_start, sine_wave, quit, sine_playing
sine_wave.stop()
sine_playing = False
def update_sinewave(pan=None, pitch=None, vol=None):
global sine_wave, paddle_1, paddle_2
if pitch is not None:
sine_wave.set_pitch(pitch)
if pan is not None:
sine_wave.channel_side = pan
if vol is not None:
sine_wave.set_volume(volume)
def play_scores(text):
score_obj = gTTS(text=text, lang='en')
f = "score.mp3"
score_obj.save(f)
playsound("score.mp3", False)
return
if mode == 'p1':
host_port = host_port_1
if mode == 'p2':
host_port = host_port_2
if (mode == 'p1') or (mode == 'p2'):
client = udp_client.SimpleUDPClient(host_ip, host_port)
print("> connected to server at "+host_ip+":"+str(host_port))
# functions receiving messages from host
def on_receive_ball(address, *args):
max_pitch = 15
min_pitch = -5
pitch = min_pitch + ((675 - args[1]) / 675) * (max_pitch - min_pitch)
if args[0] < 600:
pan = 1
elif 600 <= args[0]:
pan = 0
pan = pan
pitch = pitch
update_sinewave(pan, pitch)
#print("> ball position: (" + str(args[0]) + ", " + str(args[1]) + ")")
def on_receive_paddle(address, *args):
#print("> paddle position: (" + str(args[0]) + ", " + str(args[1]) + ")")
#print("curr_x: " + str(args[1]))
#print("curr_y: " + str(args[0]))
if args[2] == 1:
volume = 4 - (args[0] / 675)
if args[0] > 250:
volume = volume * -1
#print("receive paddle " + str(volume))
update_sinewave(pan=None, pitch=None, vol=volume)
elif args[2] == 2:
volume = 4 - (args[1] / 675)
if args[1] > 250:
volume = volume * -1
#print("receive paddle " + str(volume))
update_sinewave(pan=None, pitch=None, vol=volume)
def on_receive_hitpaddle(address, *args):
hit()
#print("> ball hit at paddle " + str(args[0]) )
pass
def on_receive_ballout(address, *args):
#print("> ball went out on left/right side: " + str(args[0]) )
wii_groan()
def on_receive_gen_sine(address, *args):
sine_wave_gen()
def on_receive_gen_stop(address, *args):
sine_wave_stop()
def on_receive_ballbounce(address, *args):
bounce()
#print("> ball bounced on up/down side: " + str(args[0]) )
def on_receive_scores(address, *args):
global sine_wave
p1_score = str(args[0])
p2_score = str(args[1])
score = "" + p1_score + " to " + p2_score
play_scores(score)
#print("> scores now: " + str(args[0]) + " vs. " + str(args[1]))
def on_receive_level(address, *args):
level_sound(args[0])
def on_receive_powerup(address, *args):
power_available(args[0])
#print("> powerup now: " + str(args[0]))
# 1 - freeze p1
# 2 - freeze p2
# 3 - adds a big paddle to p1, not use
# 4 - adds a big paddle to p2, not use
def on_receive_instructions(address, *args):
instructions()
def on_receive_p1_limit(address, *args):
print("p1's paddle hit the limit of screen")
paddle_limit()
def on_receive_p2_limit(address, *args):
print("p2's paddle hit the limit of screen")
paddle_limit()
def on_receive_p1_bigpaddle(address, *args):
print("> p1 has a big paddle now")
power_sound(1)
# when p1 activates their big paddle
def on_receive_p2_bigpaddle(address, *args):
print("> p2 has a big paddle now")
power_sound(2)
# when p2 activates their big paddle
dispatcher_player = dispatcher.Dispatcher()
dispatcher_player.map("/ball", on_receive_ball)
dispatcher_player.map("/paddle", on_receive_paddle)
dispatcher_player.map("/ballout", on_receive_ballout)
dispatcher_player.map("/ballbounce", on_receive_ballbounce)
dispatcher_player.map("/hitpaddle", on_receive_hitpaddle)
dispatcher_player.map("/scores", on_receive_scores)
dispatcher_player.map("/playscores", on_receive_play_scores)
dispatcher_player.map("/level", on_receive_level)
dispatcher_player.map("/p1limit", on_receive_p1_limit)
dispatcher_player.map("/p2limit", on_receive_p2_limit)
dispatcher_player.map("/powerup", on_receive_powerup)
dispatcher_player.map("/p1bigpaddle", on_receive_p1_bigpaddle)
dispatcher_player.map("/p2bigpaddle", on_receive_p2_bigpaddle)
dispatcher_player.map("/p1limit", on_receive_p1_limit)
dispatcher_player.map("/p2limit", on_receive_p2_limit)
dispatcher_player.map("/instructions", on_receive_instructions)
dispatcher_player.map("/gen_sine", on_receive_gen_sine)
dispatcher_player.map("/gen_stop", on_receive_gen_stop)
# -------------------------------------#
# Player: speech recognition library
# -------------------------------------#
# threading so that listenting to speech would not block the whole program
# speech recognition (default using google, requiring internet)
# PyAudio object.
p = pyaudio.PyAudio()
# Open stream.
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, input=True,
frames_per_buffer=1024)
# Aubio's pitch detection.
pDetection = aubio.pitch("default", 2048,
2048//2, 44100)
# Set unit.
pDetection.set_unit("Hz")
pDetection.set_silence(-40)
# -------------------------------------#
# keeping score of points:
p1_score = 0
p2_score = 0
p1_score_text = "zero"
p2_score_text = "zero"
selected_device_index = 1
recognizer = sr.Recognizer()
# Player: speech recognition functions using google api
# TODO: you can use this for input, add function like "client.send_message()" to control the host game
# -------------------------------------#
'''print("Available audio input devices:")
for i in range(p.get_device_count()):
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print(f"Device index {i}: {dev['name']}")'''
def sense_microphone():
global quit
global paddle_1, paddle_2, mode
global debug
data = stream.read(1024,exception_on_overflow=False)
samples = num.frombuffer(data,
dtype=aubio.float_type)
# Compute the pitch of the microphone input
pitch = pDetection(samples)[0]
# Compute the energy (volume) of the mic input
volume = num.sum(samples**2)/len(samples)
# Format the volume output so that at most
# it has six decimal numbers.
volume = "{:.6f}".format(volume)
# uncomment these lines if you want pitch or volume
if debug:
print("pitch "+str(pitch)+" volume "+str(volume))
def listen_to_speech():
global quit, latest_voice_command
print("[speech recognition] Thread started")
mic = WhisperMic(model="base", english=True, verbose=False, energy=300, pause=0.5, save_file=False)
while not quit:
try:
recog_results = mic.listen()
latest_voice_command = recog_results.lower()
print(f"[speech recognition] Recognized: {latest_voice_command}")
speech_processor(latest_voice_command)
except sr.UnknownValueError:
print("[speech recognition] Whisper could not understand audio")
except sr.RequestError as e:
print(f"[speech recognition] Could not request results from Whisper service; {e}")
py = 0
def speech_processor(command):
global quit
global paddle_1, paddle_2, paddle_1_direction, paddle_2_direction, py
number_commands = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5, 'six':6, 'seven':7, 'eight':8, 'nine':9, 'ten':10}
if 'start' in command or 'play' in command:
client.send_message('/g', 1)
if 'pause' in command:
client.send_message('/g', 0)
if 'menu' in command:
client.send_message('/g', 0)
if 'quit' in command:
client.send_message('/q', 0)
if 'level one' in command or 'level 1' in command:
client.send_message('/l', 1)
elif 'level two' in command or 'level 2' in command:
client.send_message('/l', 2)
elif 'level three' in command or 'level 3' in command:
client.send_message('/l', 3)
for num_com, num in number_commands.items():
if num_com in command or str(num) in command:
if mode == 'p1':
py = (num/10)*675
client.send_message('/p1',py)
break
elif mode == 'p2':
client.send_message('/p2',py)
break
if 'power' in command:
client.send_message('/b',0)
if 'up' in command:
client.send_message('/mp',-1)
elif 'down' in command:
client.send_message('/mp', 1)
elif 'stop' in command:
client.send_message('/mp', 0)
if 'instructions' in command or 'help' in command:
client.send_message('/g',2)
if 'score' in command or 'scores' in command:
client.send_message('/s', 0)
# -------------------------------------#
# Player: pitch & volume detection
# TODO: you can use this for input, add function like "client.send_message()" to control the host game
# -------------------------------------#
# -------------------------------------#
# Host game mechanics: no need to change below
class Ball(object):
def __init__(self):
self.debug = 0
self.TO_SIDE = 5
self.x = 50.0 + self.TO_SIDE
self.y = float( random.randint(0, 675) )
self.x_old = self.x # coordinates in the last frame
self.y_old = self.y
self.vec_x = 2**0.5 / 2 # sqrt(2)/2
self.vec_y = random.choice([-1, 1]) * 2**0.5 / 2
class Player(object):
def __init__(self, NUMBER, screen_WIDTH=1200):
"""NUMBER must be 0 (left player) or 1 (right player)."""
self.NUMBER = NUMBER
self.x = 50.0 + (screen_WIDTH - 100) * NUMBER
self.y = 50.0
self.last_movements = [0]*4 # short movement history
# used for bounce calculation
self.up_key, self.down_key = None, None
if NUMBER == 0:
self.up_key = pyglet.window.key.W
self.down_key = pyglet.window.key.S
elif NUMBER == 1:
self.up_key = pyglet.window.key.O
self.down_key = pyglet.window.key.L
class Model(object):
"""Model of the entire game. Has two players and one ball."""
def __init__(self, DIMENSIONS=(1200, 675)):
"""DIMENSIONS is a tuple (WIDTH, HEIGHT) of the field."""
# OBJECTS
WIDTH = DIMENSIONS[0]
self.players = [Player(0, WIDTH), Player(1, WIDTH)]
self.ball = Ball()
# DATA
self.pressed_keys = set() # set has no duplicates
self.quit_key = pyglet.window.key.Q
self.p1activate_key = pyglet.window.key.E
self.p2activate_key = pyglet.window.key.P
self.menu_key = pyglet.window.key.SPACE
self.level_1_key = pyglet.window.key._1
self.level_2_key = pyglet.window.key._2
self.level_3_key = pyglet.window.key._3
self.instructions_key = pyglet.window.key.I
self.speed = 4 # in pixels per frame
self.ball_speed = self.speed #* 2.5
self.WIDTH, self.HEIGHT = DIMENSIONS
# STATE VARS
self.menu = 0 # 0: menu, 1: game, 2: instructions
self.level = 1
self.paused = True
self.i = 0 # "frame count" for debug
self.powerup = 0 # (0=none, 1=player_1, 2=player_2)
def reset_ball(self, who_scored):
"""Place the ball anew on the loser's side."""
if debug: print(str(who_scored)+" scored. reset.")
self.ball.y = float( random.randint(0, self.HEIGHT) )
self.ball.vec_y = random.choice([-1, 1]) * 2**0.5 / 2
if who_scored == 0:
self.ball.x = self.WIDTH - 50.0 - self.ball.TO_SIDE
self.ball.vec_x = - 2**0.5 / 2
elif who_scored == 1:
self.ball.x = 50.0 + self.ball.TO_SIDE
self.ball.vec_x = + 2**0.5 / 2
elif who_scored == "debug":
self.ball.x = 70 # in paddle atm -> usage: hold f
self.ball.y = self.ball.debug
self.ball.vec_x = -1
self.ball.vec_y = 0
self.ball.debug += 0.2
if self.ball.debug > 100:
self.ball.debug = 0
def check_if_oob_top_bottom(self):
"""Called by update_ball to recalc. a ball above/below the screen."""
# bounces. if -- bounce on top of screen. elif -- bounce on bottom.
b = self.ball
if b.y - b.TO_SIDE < 0:
illegal_movement = 0 - (b.y - b.TO_SIDE)
b.y = 0 + b.TO_SIDE + illegal_movement
b.vec_y *= -1
if (client_1 != None):
client_1.send_message("/ballbounce", 1)
if (client_2 != None):
client_2.send_message("/ballbounce", 1)
elif b.y + b.TO_SIDE > self.HEIGHT:
illegal_movement = self.HEIGHT - (b.y + b.TO_SIDE)
b.y = self.HEIGHT - b.TO_SIDE + illegal_movement
b.vec_y *= -1
if (client_1 != None):
client_1.send_message("/ballbounce", 2)
if (client_2 != None):
client_2.send_message("/ballbounce", 2)
def check_if_oob_sides(self):
global p2_score, p1_score
"""Called by update_ball to reset a ball left/right of the screen."""
b = self.ball
if b.x + b.TO_SIDE < 0: # leave on left
self.reset_ball(1)
p2_score+=1
if (client_1 != None):
client_1.send_message("/ballout", 1)
client_1.send_message("/scores", [p1_score, p2_score])
if (client_2 != None):
client_2.send_message("/ballout", 1)
client_2.send_message("/scores", [p1_score, p2_score])
elif b.x - b.TO_SIDE > self.WIDTH: # leave on right
p1_score+=1
self.reset_ball(0)
if (client_1 != None):
client_1.send_message("/ballout", 2)
client_1.send_message("/scores", [p1_score, p2_score])
if (client_2 != None):
client_2.send_message("/ballout", 2)
client_2.send_message("/scores", [p1_score, p2_score])
def check_if_paddled(self):
"""Called by update_ball to recalc. a ball hit with a player paddle."""
b = self.ball
p0, p1 = self.players[0], self.players[1]
angle = math.acos(b.vec_y)
factor = random.randint(5, 15)
cross0 = (b.x < p0.x + 2*b.TO_SIDE) and (b.x_old >= p0.x + 2*b.TO_SIDE)
cross1 = (b.x > p1.x - 2*b.TO_SIDE) and (b.x_old <= p1.x - 2*b.TO_SIDE)
if p1_activated == 1 and power_up_type == 3:
bounding_1 = 25 * 5
else:
bounding_1 = 25 * 2
if cross0 and -bounding_1 < b.y - p0.y < bounding_1:
hit()
if (client_1 != None):
client_1.send_message("/hitpaddle", 1)
if (client_2 != None):
client_2.send_message("/hitpaddle", 1)
if debug: print("hit at "+str(self.i))
illegal_movement = p0.x + 2*b.TO_SIDE - b.x
b.x = p0.x + 2*b.TO_SIDE + illegal_movement
# angle -= sum(p0.last_movements) / factor / self.ball_speed
b.vec_y = math.cos(angle)
b.vec_x = (1**2 - b.vec_y**2) ** 0.5
else:
if p2_activated == 1 and power_up_type == 4:
bounding = 25 * 5
else:
bounding = 25 * 2
if cross1 and -bounding < b.y - p1.y < bounding:
hit()
if (client_1 != None):
client_1.send_message("/hitpaddle", 2)
if (client_2 != None):
client_2.send_message("/hitpaddle", 2)
if debug: print("hit at "+str(self.i))
illegal_movement = p1.x - 2*b.TO_SIDE - b.x
b.x = p1.x - 2*b.TO_SIDE + illegal_movement
# angle -= sum(p1.last_movements) / factor / self.ball_speed
b.vec_y = math.cos(angle)
b.vec_x = - (1**2 - b.vec_y**2) ** 0.5
# -------------- Ball position: you can find it here -------
def update_ball(self):
"""
Update ball position with post-collision detection.
I.e. Let the ball move out of bounds and calculate
where it should have been within bounds.
When bouncing off a paddle, take player velocity into
consideration as well. Add a small factor of random too.
"""
global client_1
global client_2
global snd_left, snd_right
self.i += 1 # "debug"
b = self.ball
b.x_old, b.y_old = b.x, b.y
b.x += b.vec_x * self.ball_speed
b.y += b.vec_y * self.ball_speed
self.check_if_oob_top_bottom() # oob: out of bounds
self.check_if_oob_sides()
self.check_if_paddled()
if (client_1 != None):
client_1.send_message("/ball", [b.x, b.y])
if (client_2 != None):
client_2.send_message("/ball", [b.x, b.y])
def toggle_menu(self):
global game_start
if (self.menu != 0):
self.menu = 0
game_start = 0
self.paused = True
else:
self.menu = 1
game_start = 1
self.paused = False
def toggle_instructions(self):
global game_start
if self.menu != 2:
client.send_message('/g', 2)
self.menu = 2
game_start = 2
self.paused = True
else:
client.send_message('/g', 0)
self.menu = 0
game_start = 0
self.paused = False
def update(self):
"""Work through all pressed keys, update and call update_ball."""
global paddle_1, paddle_1_direction
global paddle_2, paddle_2_direction
global p1_activated
global p2_activated
global snd_left, snd_right, pause, latest_voice_command
pks = self.pressed_keys
if quit:
sys.exit(1)
if self.quit_key in pks:
exit(0)
if self.menu_key in pks:
self.toggle_menu()
pks.remove(self.menu_key)
if self.instructions_key in pks:
self.toggle_instructions()
pks.remove(self.instructions_key)
if self.p1activate_key in pks:
# print("E pressed to send power up on 1")
if power_up_type == 3:
p1_activated = 1
last_power_up = time.time() #pedro added 2023
# else:
#print("... but there's none active for P1")
pks.remove(self.p1activate_key)
if self.p2activate_key in pks:
# print("P pressed to send power up by P2")
if power_up_type == 4:
p2_activated = 1
last_power_up = time.time() #pedro added 2023
# else:
# print("... but there's none active for P2")
pks.remove(self.p2activate_key)
if self.level_1_key in pks:
self.level = 1
self.ball_speed = self.speed
pks.remove(self.level_1_key)
if self.level_2_key in pks:
self.level = 2
self.ball_speed = self.speed * 1.5
pks.remove(self.level_2_key)
if self.level_3_key in pks:
self.level = 3
self.ball_speed = self.speed * 2
pks.remove(self.level_3_key)
if pyglet.window.key.R in pks and debug:
self.reset_ball(1)
if pyglet.window.key.F in pks and debug:
self.reset_ball("debug")
if not self.paused:
p1 = self.players[0]
p2 = self.players[1]
if power_up_type == 1:
pass
else:
if (paddle_1 != 0):
p1.y = paddle_1
paddle_1 = 0
if p1.up_key in pks and p1.down_key not in pks:
p1.y -= (1.5*self.speed)
elif p1.up_key not in pks and p1.down_key in pks:
p1.y += (1.5*self.speed)
if power_up_type == 1:
pass
else:
if paddle_1_direction == -1:
if p1.y - self.speed > 0:
p1.y -= self.speed
else:
paddle_1_direction = 0
client_1.send_message("/p1limit", 0)
if (client_1 != None):
client_1.send_message("/paddle", [p1.y, p2.y, 1])
if paddle_1_direction == 1:
if p1.y + self.speed < 675:
p1.y += self.speed
else:
paddle_1_direction = 0
if (client_1 != None):
client_1.send_message("/paddle", [p1.y, p2.y, 1])
if power_up_type == 2:
pass
else:
if (paddle_2 != 0):
p2.y = paddle_2
paddle_2 = 0
if p2.up_key in pks and p2.down_key not in pks:
p2.y -= (1.5*self.speed)
elif p2.up_key not in pks and p2.down_key in pks:
p2.y += (1.5*self.speed)
if power_up_type == 2:
pass
else:
if paddle_2_direction == -1:
if p2.y - self.speed > 0:
p2.y -= self.speed
else:
paddle_2_direction = 0
client_2.send_message("/p2limit", 0)
if (client_2 != None):
client_2.send_message("/paddle", [p1.y, p2.y, 2])
if paddle_2_direction == 1:
if p2.y + self.speed < 675:
p2.y += self.speed
else:
paddle_2_direction = 0
if (client_2 != None):
client_2.send_message("/paddle", [p1.y, p2.y, 2])
self.update_ball()
class Controller(object):
def __init__(self, model):
self.m = model
def on_key_press(self, symbol, modifiers):
# `a |= b`: mathematical or. add to set a if in set a or b.
# equivalent to `a = a | b`.
# p0 holds down both keys => p1 controls break # PYGLET!? D:
self.m.pressed_keys |= set([symbol])
def on_key_release(self, symbol, modifiers):
if symbol in self.m.pressed_keys:
self.m.pressed_keys.remove(symbol)
def update(self):
self.m.update()
class View(object):
def __init__(self, window, model):
self.w = window
self.m = model
# ------------------ IMAGES --------------------#
# "white_square.png" is a 10x10 white image
lplayer = pyglet.resource.image("white_square.png")
self.player_spr = pyglet.sprite.Sprite(lplayer)
def redraw_game(self):
# ------------------ PLAYERS --------------------#
TO_SIDE = self.m.ball.TO_SIDE
idx = 0
for p in self.m.players:
idx = idx + 1
self.player_spr.x = p.x//1 - TO_SIDE
# oh god! pyglet's (0, 0) is bottom right! madness.
self.player_spr.y = self.w.height - (p.y//1 + TO_SIDE)
self.player_spr.draw() # these 3 lines: pretend-paddle
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
# print ("----")
# print (p1_activated)
# print (p2_activated)
# print(power_up_type)
if idx == 2 and p2_activated == 1 and power_up_type == 4:
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 14*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
# do the same for p1
if idx == 1 and p1_activated == 1 and power_up_type == 3:
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y += 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 14*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
self.player_spr.y -= 2*TO_SIDE; self.player_spr.draw()
# ------------------ BALL --------------------#
self.player_spr.x = self.m.ball.x//1 - TO_SIDE
self.player_spr.y = self.w.height - (self.m.ball.y//1 + TO_SIDE)
self.player_spr.draw()
def redraw_menu(self):
global level
self.m.level = level
if (level == 1):
self.m.ball_speed = self.m.speed
elif (level == 2):
self.m.ball_speed = self.m.speed*2
elif (level == 3):
self.m.ball_speed = self.m.speed*3
self.start_label = pyglet.text.Label("press space to start", font_name=None, font_size=36, x=self.w.width//2, y=self.w.height//2, anchor_x='center', anchor_y='center')
self.level_label = pyglet.text.Label("easy | hard | insane", font_name=None, font_size=24, x=self.w.width//2, y=self.w.height//2+100, anchor_x='center', anchor_y='center')
if (self.m.level == 1):
self.level_indicator_label = pyglet.text.Label("------", font_name=None, font_size=24, x=self.w.width//2-105, y=self.w.height//2+80, anchor_x='center', anchor_y='center')
elif (self.m.level == 2):
self.level_indicator_label = pyglet.text.Label("------", font_name=None, font_size=24, x=self.w.width//2-12, y=self.w.height//2+80, anchor_x='center', anchor_y='center')
elif (self.m.level == 3):
self.level_indicator_label = pyglet.text.Label("---------", font_name=None, font_size=24, x=self.w.width//2+92, y=self.w.height//2+80, anchor_x='center', anchor_y='center')
self.start_label.draw()
self.level_label.draw()
self.level_indicator_label.draw()
def redraw_instructions(self):
window_height = self.w.height
instruction_y_start = window_height - 60
self.instructions_label = pyglet.text.Label("Instructions", font_name=None, font_size=36, x=self.w.width//2, y=instruction_y_start, anchor_x='center', anchor_y='center', color=(255, 255, 255, 255))
self.instructions_label.draw()
instruction_lines = [
"Listen to the pitch to find the y-position of the ball. Pay attention to the stereo sound to find the x-position.",
"You will hear sounds when you hit the ball and when the ball bounces on the walls.",
"When your paddle is closer to the top of the screen, the ball's frequency will be louder, and vice-versa.",
"1. To start the game, say 'start' or 'play'",
"2. To pause the game, say 'pause' or 'menu'",
"3. To quit the game, say 'quit'",
"4. To choose a level, say 'Level + [Level Number]'",
"5. To move your paddle continuously, say 'up' or 'down' then 'stop' when you want to stop moving",
"6. To move your paddle incrementally, say a number between 1 and 10, 10 being the top",
"7. To activate a powerup, say 'power'"
]
line_height = 20
vertical_spacing = 20
current_y = instruction_y_start - 60
for line in instruction_lines:
label = pyglet.text.Label(line, font_name=None, font_size=18, x=self.w.width//2, y=current_y, anchor_x='center', anchor_y='center', color=(255, 255, 255, 255))
label.draw()
current_y -= (line_height + vertical_spacing)
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
DIM = (1200, 675) # DIMENSIONS
super(Window, self).__init__(width=DIM[0], height=DIM[1],
*args, **kwargs)
# ------------------ MVC --------------------#
the_window = self
self.model = Model(DIM)
self.view2 = View(the_window, self.model)
self.controller = Controller(self.model)
# ------------------ CLOCK --------------------#
fps = 60.0
pyglet.clock.schedule_interval(self.update, 1.0/fps)
#pyglet.clock.set_fps_limit(fps)
self.score_label = pyglet.text.Label(str(p1_score)+':'+str(p2_score), font_name=None, font_size=36, x=self.width//2, y=self.height//2, anchor_x='center', anchor_y='center')
self.powerup_status_label = pyglet.text.Label("status: ", font_name=None, font_size=16, x=self.width//2, y=self.height//8, anchor_x='center', anchor_y='center')
def on_key_release(self, symbol, modifiers):
self.controller.on_key_release(symbol, modifiers)
def on_key_press(self, symbol, modifiers):
self.controller.on_key_press(symbol, modifiers)
def on_close(self):
pyglet.app.exit()
def update(self, *args, **kwargs):
global last_power_up
global power_up_duration
global power_up_type
global p1_activated
global p2_activated
global quit
# make more efficient (save last position, draw black square
# over that and the new square, don't redraw _entire_ frame.)
self.clear()
self.controller.update()
self.model.menu = game_start
if quit:
self.close()
if (game_start == 1):
self.model.paused = False
else:
self.model.paused = True
if self.model.menu == 2:
self.view2.redraw_instructions()
elif self.model.menu == 1:
self.view2.redraw_game()
self.score_label.draw()
else:
self.view2.redraw_menu()
if game_start == 1:
if (time.time() > last_power_up + random.randint(20,32)):
last_power_up = time.time()
power_up_type = random.randint(1,4)
# print("new powerup: " + str(power_up_type))
# 1 - freeze p1
# 2 - freeze p2
# 3 - adds a big paddle to p1, not use
# 4 - adds a big paddle to p2, not use
if (client_1 != None):
# fix power up you / oppenent fre
client_1.send_message("/powerup", power_up_type)
if (client_2 != None):
client_2.send_message("/powerup", power_up_type)
if (power_up_type != 0 and time.time() > last_power_up + power_up_duration):
# print("reset powerup")
power_up_type = 0
p1_activated = 0
p2_activated = 0
if (client_1 != None):
client_1.send_message("/powerup", 0)
if (client_2 != None):
client_2.send_message("/powerup", 0)
self.score_label.text = str(p1_score)+':'+str(p2_score)
if power_up_type == 1:
power_up_status_add = " P1 is frozen!"
elif power_up_type == 2:
power_up_status_add = " P2 is frozen!"
elif power_up_type == 3:
power_up_status_add = " P1 could use big-paddle now!"
elif power_up_type == 4:
power_up_status_add = " P2 could use big-paddle now!"
else:
power_up_status_add = " no active power ups"
self.powerup_status_label.text = "powerup status: " + power_up_status_add
self.powerup_status_label.draw()
if mode == 'host':
# OSC thread
# -------------------------------------#
server_1 = osc_server.ThreadingOSCUDPServer((host_ip, host_port_1), dispatcher_1)
server_1_thread = threading.Thread(target=server_1.serve_forever)
server_1_thread.daemon = True
server_1_thread.start()
server_2 = osc_server.ThreadingOSCUDPServer((host_ip, host_port_2), dispatcher_2)
server_2_thread = threading.Thread(target=server_2.serve_forever)
server_2_thread.daemon = True
server_2_thread.start()
print("> server opens at ip: "+host_ip)
print("> instruction: player 1 connects to "+str(host_port_1) + ", listen at "+str(player_1_port))
print("> instruction: player 2 connects to "+str(host_port_2) + ", listen at "+str(player_2_port))
# -------------------------------------#
# Host: pygame starts
if mode == 'host':
window = Window()
pyglet.app.run()
if (mode == 'p1') or (mode == 'p2'):
microphone_thread = threading.Thread(target=sense_microphone, args=())
microphone_thread.daemon = True
microphone_thread.start()
speech_thread = threading.Thread(target=listen_to_speech, args=())
speech_thread.daemon = True
speech_thread.start()
# Player
if mode == 'p1':
player_port = player_1_port
if mode == 'p2':
player_port = player_2_port
if (mode == 'p1') or (mode == 'p2'):
# OSC thread
# -------------------------------------#
player_server = osc_server.ThreadingOSCUDPServer((player_ip, player_port), dispatcher_player)
player_server_thread = threading.Thread(target=player_server.serve_forever)
player_server_thread.daemon = True
player_server_thread.start()
# -------------------------------------#
client.send_message("/c", player_ip)
# manual input for debugging
while True:
m = input("> send: ")
cmd = m.split(' ')
if len(cmd) == 2:
client.send_message("/"+cmd[0], int(cmd[1]))
if len(cmd) == 1:
client.send_message("/"+cmd[0], 0)
# this is how client send messages to server
# send paddle position 200 (it should be between 0 - 450):
# client.send_message('/p', 200)
# set level to 3:
# client.send_message('/l', 3)
# start the game:
# client.send_message('/g', 1)
# pause the game:
# client.send_message('/g', 0)
# big paddle if received power up:
# client.send_message('/b', 0)
| [] |
2024-01-10 | ahassan275/vetting | vetting_questions.py | from langchain.document_loaders.csv_loader import CSVLoader
import re
loader = CSVLoader(file_path="_Software Vetting Request and Resolution List - SW Vetting Template.csv")
data = loader.load()
extracted_questions = [row.page_content.split('\n: ')[1].split('\n')[0] for row in data[31:74]]
# extracted_fields = []
# for string in extracted_questions:
# match = re.search(r'(\d+\.\d+)\s+(.*)', string)
# if match:
# numbered_field = match.group(1)
# text = match.group(2)
# extracted_fields.append((numbered_field, text))
#
# extracted_dict_list = [{'number': field, 'question': text} for field, text in extracted_fields]
print(extracted_questions) | [] |
2024-01-10 | ahassan275/vetting | vetting_search.py | import streamlit as st
from langchain.agents import AgentType, initialize_agent, Tool
from langchain.document_loaders import PyPDFLoader
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain_core.prompts import PromptTemplate
import openai
import os
import requests
from vetting_questions import extracted_questions
import uuid
from docx import Document
from langchain.schema import SystemMessage
import base64
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
# Set OpenAI API key
openai.api_key = os.environ["OPENAI_API_KEY"]
# GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
# OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
# GOOGLE_CSE_ID = st.secrets["GOOGLE_CSE_ID"]
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
GOOGLE_CSE_ID = os.environ["GOOGLE_CSE_ID"]
def get_file_content_as_string(file_path):
with open(file_path, 'rb') as f:
binary_file_data = f.read()
return base64.b64encode(binary_file_data).decode('utf-8')
def create_download_link(file_path, file_name):
file_content = get_file_content_as_string(file_path)
href = f'<a href="data:application/vnd.openxmlformats-officedocument.wordprocessingml.document;base64,{file_content}" download="{file_name}">Download the responses</a>'
return href
@st.cache_resource
def process_document(file_path):
loader = PyPDFLoader(file_path)
pages = loader.load_and_split()
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = splitter.split_documents(pages)
embeddings = OpenAIEmbeddings()
retriever = FAISS.from_documents(docs, embeddings).as_retriever()
return retriever
def google_search(query):
try:
endpoint = "https://www.googleapis.com/customsearch/v1"
params = {
"key": GOOGLE_API_KEY,
"cx": GOOGLE_CSE_ID,
"q": query
}
response = requests.get(endpoint, params=params)
results = response.json().get("items", [])
return [result["link"] for result in results]
except Exception as e:
st.error(f"Error during web search: {e}")
return []
import tempfile
def handle_uploaded_file(uploaded_file):
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp:
tmp.write(uploaded_file.getvalue())
return tmp.name
def vetting_assistant_page():
st.title("Vetting Assistant Chatbot")
if "uploaded_pdf_path" not in st.session_state or "retriever" not in st.session_state:
uploaded_file = st.file_uploader("Upload a PDF containing the terms of service", type=["pdf"])
if uploaded_file:
file_path = handle_uploaded_file(uploaded_file)
st.session_state.uploaded_pdf_path = file_path
st.session_state.retriever = process_document(st.session_state.uploaded_pdf_path)
else:
st.write("Using previously uploaded PDF. If you want to use a different PDF, please refresh the page.")
app_name = st.text_input("Enter the name of the app:")
if "retriever" in st.session_state:
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo-16k")
tools = [
Tool(
name="vetting_tool",
description="Tool for retrieving infomration related to security and privacy",
func=RetrievalQA.from_llm(llm=llm, retriever=st.session_state.retriever, return_source_documents=True)
)
]
# tool = create_retriever_tool(
# st.session_state.retriever,
# "search_terms_service",
# "Searches and returns an application's privacy and data policies and terms of use.",
# )
# tools = [tool]
agent_kwargs = {
"system_message": SystemMessage(content="You are an intelligent Vetting Assistant, "
"expertly designed to analyze and extract key "
"information from terms of service documents. "
"Your goal is to assist users in understanding "
"complex legal documents and provide clear, "
"concise answers to their queries.")
}
agent = initialize_agent(agent=AgentType.OPENAI_FUNCTIONS, tools=tools, llm=llm, agent_kwargs=agent_kwargs,
verbose=True)
# agent = create_conversational_retrieval_agent(llm, tools)
st.write("Ask any question related to the vetting process:")
query_option = st.selectbox("Choose a predefined query:", extracted_questions)
user_input = st.text_input("Your Question:", value=query_option)
if st.button('Start Vetting') and user_input:
with st.spinner('Processing your question...'):
try:
response = agent.run(user_input)
# response = agent({"input": "user_input"})
st.write(f"Answer: {response}")
except Exception as e:
st.error(f"An error occurred: {e}")
st.write("Note: The chatbot retrieves answers from the uploaded document.")
if 'running_queries' not in st.session_state:
st.session_state.running_queries = False
placeholder_message = f"{app_name} is being vetted for compliance and its policies provided in context. Does {app_name} meet this criteria?"
all_queries = [f"{question} {placeholder_message}" for question in extracted_questions]
if st.button('Run All Queries'):
with st.spinner('Processing all queries...'):
st.session_state.running_queries = True
doc = Document()
doc.add_heading('Vetting Assistant Responses', 0)
for question in all_queries:
if not st.session_state.running_queries:
break
try:
response = agent.run(question)
doc.add_heading('Q:', level=1)
doc.add_paragraph(question)
doc.add_heading('A:', level=1)
doc.add_paragraph(response)
except Exception as e:
doc.add_paragraph(f"Error for question '{question}': {e}")
doc_path = "vetting_responses.docx"
doc.save(doc_path)
st.markdown(create_download_link(doc_path, "vetting_responses.docx"), unsafe_allow_html=True)
if st.button('Stop Queries'):
st.session_state.running_queries = False
if st.button('Search Web'):
with st.spinner('Searching the web...'):
links = google_search(user_input)
st.write("Top search results:")
for link in links:
st.write(link)
def pdf_chatbot_page():
st.title("PDF-based Chatbot")
if "uploaded_pdf_path" not in st.session_state:
uploaded_file = st.file_uploader("Upload a PDF", type=["pdf"])
if uploaded_file:
file_path = handle_uploaded_file(uploaded_file)
st.session_state.uploaded_pdf_path = file_path
st.session_state.retriever = process_document(st.session_state.uploaded_pdf_path)
else:
st.write("Using previously uploaded PDF. If you want to use a different PDF, please refresh the page.")
if "retriever" in st.session_state:
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo-16k")
tools = [
Tool(
name="pdf_tool",
description="Tool for querying based on document content",
func=RetrievalQA.from_chain_type(llm=llm, retriever=st.session_state.retriever)
)
]
agent_kwargs = {
"system_message": SystemMessage(content="You are an intelligent Vetting Assistant, "
"expertly designed to analyze and extract key "
"information from terms of service documents. "
"Your goal is to assist users in understanding "
"complex legal documents and provide clear, "
"concise answers to their queries.")
}
agent = initialize_agent(agent=AgentType.OPENAI_FUNCTIONS, tools=tools, llm=llm, agent_kwargs=agent_kwargs,
verbose=True)
instructions_container = st.container()
with instructions_container:
st.header("Instructions")
st.write("""
- This chatbot provides answers based on the content of the uploaded PDF.
- Type in your question in the chat input below.
- Adjust the slider to control the specificity of the chatbot's responses.
""")
input_container = st.container()
with input_container:
temperature = st.slider("Adjust chatbot specificity:", min_value=0.0, max_value=1.0, value=0.5, step=0.05)
llm.temperature = temperature
chat_container = st.container()
with chat_container:
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
user_input = st.text_input("Ask a question about the uploaded PDF:")
if st.button('Query PDF') and user_input:
with st.spinner('Processing your question...'):
try:
response = agent.run(user_input)
st.session_state.messages.append({"role": "assistant", "content": response})
with st.chat_message("assistant"):
st.markdown(response)
except Exception as e:
st.error(f"An error occurred: {e}")
if st.button('Search Web'):
with st.spinner('Searching the web...'):
links = google_search(user_input)
st.write("Top search results:")
for link in links:
st.write(link)
# Streamlit UI Configuration
st.set_page_config(page_title="Vetting Assistant Chatbot", layout="wide", initial_sidebar_state="expanded")
page = st.sidebar.selectbox("Choose a Tool:", ["Vetting Assistant", "PDF Chatbot"])
if page == "Vetting Assistant":
vetting_assistant_page()
elif page == "PDF Chatbot":
pdf_chatbot_page()
| [
"You are an intelligent Vetting Assistant, expertly designed to analyze and extract key information from terms of service documents. Your goal is to assist users in understanding complex legal documents and provide clear, concise answers to their queries."
] |
2024-01-10 | ahassan275/vetting | vetting_app.py | import streamlit as st
from langchain.agents import AgentType
from langchain.agents import initialize_agent, Tool
from langchain.callbacks import StreamlitCallbackHandler
from langchain.llms import OpenAI
from langchain.utilities import DuckDuckGoSearchAPIWrapper
import re
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
import pathlib
from langchain.vectorstores.faiss import FAISS
from langchain.chat_models import ChatOpenAI
import openai
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader
from pydantic import BaseModel, Field
from vetting_questions import extracted_dict_list
from langchain.agents import initialize_agent
from langchain.agents import AgentType
import random
import os
# from streamlit_agent.callbacks.capturing_callback_handler import playback_callbacks
# from streamlit_agent.clear_results import with_clear_container
#
# openai_api_key = st.secrets["OPENAI_API_KEY"]
openai.api_key = os.environ["OPENAI_API_KEY"]
MAX_API_CALLS = 25 # set your limit
# Initialize count of API calls
if "api_calls" not in st.session_state:
st.session_state.api_calls = 0
def chat_with_agent(input_text):
response = agent({"input": input_text})
return response['output']
class DocumentInput(BaseModel):
question: str = Field()
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo-16k")
tools = []
files = [
{
"name": "dedoose-terms-of-service",
"path": "TERMS OF SERVICE.pdf",
},
]
for file in files:
loader = PyPDFLoader(file["path"])
pages = loader.load_and_split()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings()
retriever = FAISS.from_documents(docs, embeddings).as_retriever()
# Wrap retrievers in a Tool
tools.append(
Tool(
args_schema=DocumentInput,
name=file["name"],
description=f"useful when you want to answer questions about {file['name']}",
func=RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
)
)
llm = ChatOpenAI(
temperature=0,
model="gpt-3.5-turbo",
)
agent = initialize_agent(
agent=AgentType.OPENAI_FUNCTIONS,
tools=tools,
llm=llm,
verbose=True,
)
st.set_page_config(page_title="Vetting Assistant")
st.title("Vetting Assistant")
for question_dict in extracted_dict_list:
user_input = question_dict['question']
st.chat_message("user").write(user_input)
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
response = agent.run(user_input, callbacks=[st_callback])
st.write(response)
# Initialize chat_history
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# Display chat messages from history
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if "run_count" not in st.session_state:
st.session_state.run_count = 0
if st.button('Start'):
if st.session_state.run_count >= 1:
st.warning("You have reached the maximum number of runs for this session.")
else:
st.session_state.run_count += 1
# Select 3 random questions
selected_questions = random.sample(extracted_dict_list, 3)
# Loop over selected questions
for question_dict in selected_questions:
user_input = question_dict['question']
# Save user's message to chat history
st.session_state.chat_history.append({"role": "user", "content": user_input})
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(st.container())
response = agent.run(user_input, callbacks=[st_callback])
st.write(response)
# Save assistant's response to chat history
st.session_state.chat_history.append({"role": "assistant", "content": response})
# for question in extracted_dict_list:
# input_text = question['question']
# response = chat_with_agent(input_text)
# print(f"Question: {input_text}")
# print(f"Response: {response}")
# print()
| [] |
2024-01-10 | ahassan275/vetting | vetting_retrieval.py | import aiohttp
import streamlit as st
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.retrievers.web_research import WebResearchRetriever
import os
import streamlit as st
import requests
import os
import openai
import requests
import json
# Google Search API setup
st.set_page_config(page_title="Vetting Assistant")
st.title("Vetting Assistant")
def google_search(query):
endpoint = "https://www.googleapis.com/customsearch/v1"
params = {
"key": os.environ["GOOGLE_API_KEY"],
"cx": os.environ["GOOGLE_CSE_ID"],
"q": query
}
response = requests.get(endpoint, params=params)
results = response.json().get("items", [])
return [result["link"] for result in results]
def get_page_content(url):
try:
response = requests.get(url)
response.raise_for_status()
return response.text
except requests.RequestException as e:
st.error(f"Error fetching content for {url}: {e}")
return None
def extract_insights_from_content(content):
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Extract insights from this content: {content[:500]}..."}
# Using the first 500 characters for brevity
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
)
return response["choices"][0]["message"]["content"]
def run_conversation(question):
messages = [{"role": "user", "content": question}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
)
response_message = response["choices"][0]["message"]["content"]
return response_message
# Streamlit UI
st.header("Vetting Assistant")
st.write("Ask any question related to the vetting process:")
# User input
question = st.text_input("Ask a question:")
if question:
try:
# Get AI's response to the question
ai_response = run_conversation(question)
# Use AI's response as a query for Google Search
links = google_search(ai_response)
st.write("AI's response:")
st.write(ai_response)
st.write("Top search results based on AI's response:")
for link in links[:4]: # Only consider the first 4 links
st.write(link)
# Fetch the content of each link
content = get_page_content(link)
if content:
# Extract insights from the content using OpenAI
insights = extract_insights_from_content(content)
st.write("Extracted Insights:")
st.write(insights)
except Exception as e:
st.error(f"An error occurred: {e}")
| [
"Extract insights from this content: PLACEHOLDER...",
"You are a helpful assistant."
] |
2024-01-10 | edmondsylar/ACE_Framework | CORE_DEMOS~iACEui~src~ace~app~base~base_layer.py | import asyncio
import logging
import aio_pika
from abc import ABC
from base.settings import Settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from base import ai
from base import prompts
import openai
import re
import tiktoken
import json
from database.connection import get_db
from database.dao import (
get_layer_state_by_name,
get_layer_config,
get_active_ancestral_prompt,
update_layer_state,
)
from database.dao_models import LayerConfigModel, AncestralPromptModel
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class BaseLayer(ABC):
def __init__(self, settings: Settings):
self.settings = settings
self.loop = asyncio.get_event_loop()
self.connection = None
self.channel = None
self.llm_messages = []
self.ancestral_prompt: AncestralPromptModel
self.layer_config: LayerConfigModel
self._fetch_layer_config()
self._fetch_ancestral_prompt()
async def data_bus_message_handler(self, message: aio_pika.IncomingMessage):
logger.info("data_bus_message_handler")
# try:
if self.settings.debug:
await self.wait_for_signal()
with get_db() as db:
update_layer_state(
db=db,
layer_name=self.settings.role_name,
process_messages=False,
)
await self._process_message(
message=message,
source_bus="Data Bus",
)
await message.ack()
# except:
# await message.nack()
async def control_bus_message_handler(self, message: aio_pika.IncomingMessage):
logger.info("control_bus_message_handler")
# try:
if self.settings.debug:
await self.wait_for_signal()
await self._process_message(
message=message,
source_bus="Control Bus",
)
await message.ack()
# except:
# await message.nack()
async def wait_for_signal(self):
while True:
logger.info("wait_for_signal")
with get_db() as session:
process_messages = get_layer_state_by_name(
db=session,
layer_name=self.settings.role_name,
).process_messages
logger.info(f"{process_messages=}")
if process_messages:
break
await asyncio.sleep(3)
async def _process_message(
self, message: aio_pika.IncomingMessage, source_bus: str
):
logger.info(f"Processing message from {source_bus}")
# if debug == True
self._fetch_layer_config()
self._fetch_ancestral_prompt()
await self._handle_bus_message(
message=message,
source_bus=source_bus,
)
def _reason(
self,
input: str,
source_bus: str,
):
return ai.reason(
ancestral_prompt=self.ancestral_prompt.prompt,
input=input,
source_bus=source_bus,
prompts=self.layer_config.prompts,
llm_model_parameters=self.layer_config.llm_model_parameters,
llm_messages=self.llm_messages,
)
async def _handle_bus_message(self, message: aio_pika.IncomingMessage, source_bus):
logger.info(f"handling message from {source_bus}")
reasoning_completion = self._reason(
input=input,
source_bus=source_bus,
)
logger.info(f"{reasoning_completion=}")
data_bus_message, control_bus_message = self._determine_action(
source_bus,
reasoning_completion,
)
logger.info(f"{data_bus_message=}")
logger.info(f"{control_bus_message=}")
logger.info(f"{ai.determine_none(data_bus_message['content'])=}")
logger.info(f"{ai.determine_none(control_bus_message['content'])=}")
if ai.determine_none(data_bus_message['content']) != "none":
await self._publish(
queue_name=self.settings.data_bus_pub_queue,
message=data_bus_message,
destination_bus="Data Bus",
source_bus=source_bus,
input_message=message,
reasoning_message=reasoning_completion,
)
self.llm_messages.append(data_bus_message)
if ai.determine_none(control_bus_message['content']) != "none":
await self._publish(
queue_name=self.settings.control_bus_pub_queue,
message=control_bus_message,
destination_bus="Control Bus",
source_bus=source_bus,
input_message=message,
reasoning_message=reasoning_completion,
)
# create setting to disable this.
self.llm_messages.append(control_bus_message)
self._compact_llm_messages()
def _determine_action(
self,
source_bus,
reasoning_completion,
):
return ai.determine_action(
ancestral_prompt=self.ancestral_prompt.prompt,
source_bus=source_bus,
reasoning_completion=reasoning_completion,
prompts=self.layer_config.prompts,
llm_model_parameters=self.layer_config.llm_model_parameters,
role_name=self.settings.role_name,
llm_messages=self.llm_messages,
)
async def _publish(
self,
queue_name,
message,
destination_bus,
source_bus,
input_message: aio_pika.IncomingMessage,
reasoning_message,
):
exchange = await create_exchange(
connection=self.connection,
queue_name=queue_name,
)
headers = {
"source_bus": source_bus,
"parent_message_id": str(input_message.message_id),
"destination_bus": destination_bus,
"layer_name": self.settings.role_name or "user input",
"llm_messages": json.dumps(self.llm_messages),
"config_id": str(self.layer_config.config_id),
"input": input_message.body.decode(),
"reasoning": json.dumps(reasoning_message),
}
logger.info(f"message {headers=}")
message_body = aio_pika.Message(
body=message["content"].encode(),
headers=headers,
delivery_mode=aio_pika.DeliveryMode.PERSISTENT,
content_type="text/plain",
)
logger.info(f"publishing {queue_name=}, {destination_bus=}, {source_bus=}")
await exchange.publish(
message_body,
routing_key=queue_name,
)
async def _connect(self):
self.connection = await get_connection(
loop=self.loop,
amqp_host_name=self.settings.amqp_host_name,
username=self.settings.amqp_username,
password=self.settings.amqp_password,
role_name=self.settings.role_name,
)
self.channel = await self.connection.channel()
logger.info(f"{self.settings.role_name} connection established...")
async def _subscribe(self):
nb_queue = await self.channel.declare_queue(
self.settings.data_bus_sub_queue,
durable=True,
)
sb_queue = await self.channel.declare_queue(
self.settings.control_bus_sub_queue,
durable=True,
)
await nb_queue.consume(self.data_bus_message_handler)
await sb_queue.consume(self.control_bus_message_handler)
def _compact_llm_messages(self):
token_count = 0
for message in self.llm_messages:
token_count += self._count_tokens(message)
logger.info(f"Current {token_count=}")
if token_count > self.settings.memory_max_tokens:
logger.info("compacting initiated...")
self._update_llm_messages()
token_count = self._count_tokens(self.llm_messages[0])
logger.info(f"After compaction memory {token_count=}")
else:
logger.info("No compaction required")
def _update_llm_messages(self):
openai.api_key = self.settings.openai_api_key
identity = {"role": "system", "content": self.layer_config.prompts.identity}
summarization_prompt = {
"role": "user",
"content": prompts.memory_compaction_prompt,
}
conversation = [identity] + self.llm_messages + [summarization_prompt]
completion = openai.ChatCompletion.create(
model=self.settings.model,
messages=conversation,
temperature=self.settings.temperature,
)
self.llm_messages = [completion.choices[0].message]
def _count_tokens(self, message: str) -> int:
encoding = tiktoken.encoding_for_model(self.settings.model)
logger.info(f"{message=}")
num_tokens = len(encoding.encode(message["content"]))
return num_tokens
def _fetch_layer_config(self):
with get_db() as db:
config = get_layer_config(
db=db,
layer_name=self.settings.role_name,
)
self.layer_config = LayerConfigModel.model_validate(config)
def _fetch_ancestral_prompt(self):
with get_db() as db:
prompt = get_active_ancestral_prompt(db=db)
self.ancestral_prompt = AncestralPromptModel.model_validate(prompt)
async def _run_layer(self):
logger.info(f"Running {self.settings.role_name}")
await self._connect()
await self._subscribe()
logger.info(
f"{self.settings.role_name} Subscribed to {self.settings.data_bus_sub_queue} and {self.settings.control_bus_sub_queue}"
)
def run(self):
self.loop.create_task(self._run_layer())
try:
self.loop.run_forever()
finally:
self.loop.close()
| [
"[identity] + self.llm_messages + [summarization_prompt]",
"content"
] |
2024-01-10 | Kailuo-Lai/VChat-BigDL | models~llm_model.py | import os
from langchain.chains import ConversationalRetrievalChain, StuffDocumentsChain
from langchain.prompts import PromptTemplate
from bigdl.llm.langchain.llms import TransformersLLM
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
from bigdl.llm.langchain.embeddings import TransformersEmbeddings
from langchain import LLMChain
from models.helsinki_model import Translator
from utils.utils import new_cd
parent_dir = os.path.dirname(__file__)
condense_template = """
Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
You can assume the discussion is about the video content.
Chat History:
{chat_history}
Follow Up Question: {question}
Standalone question:
"""
qa_template = """
You are an AI assistant designed for answering questions about a video.
You are given a timeline document. The document records what people see and hear from a single video.
Try to connet these information and provide a conversational answer.
=========
{context}
=========
Question: {question}
Answer:
"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(condense_template)
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["question", "context"])
# DOC_PROMPT = PromptTemplate.from_template("Video Clip {video_clip}: {page_content}")
DOC_PROMPT = PromptTemplate.from_template("{page_content}")
class LlmReasoner():
def __init__(self, args):
self.history = []
self.llm_version = args.llm_version
self.embed_version = args.embed_version
self.qa_chain = None
self.vectorstore = None
self.top_k = args.top_k
self.qa_max_new_tokens = args.qa_max_new_tokens
self.init_model()
def init_model(self):
with new_cd(parent_dir):
self.llm = TransformersLLM.from_model_id_low_bit(f"../checkpoints/{self.llm_version}")
self.llm.streaming = False
self.embeddings = TransformersEmbeddings.from_model_id(model_id=f"../checkpoints/{self.embed_version}")
self.question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT)
self.answer_generator = LLMChain(llm=self.llm, prompt=QA_PROMPT,
llm_kwargs={"max_new_tokens": self.qa_max_new_tokens})
self.doc_chain = StuffDocumentsChain(llm_chain=self.answer_generator, document_prompt=DOC_PROMPT,
document_variable_name='context')
self.text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0, keep_separator=True)
# self.text_splitter = CharacterTextSplitter()
# self.translator = Translator()
def create_qa_chain(self, input_log):
texts = self.text_splitter.split_text(input_log)
self.vectorstore = FAISS.from_texts(texts, self.embeddings, metadatas=[{"video_clip": str(i)} for i in range(len(texts))])
retriever = self.vectorstore.as_retriever(search_kwargs={"k": self.top_k})
self.qa_chain = ConversationalRetrievalChain(retriever=retriever,
question_generator=self.question_generator,
combine_docs_chain=self.doc_chain,
return_generated_question = True,
return_source_documents = True,
rephrase_question=False)
def __call__(self, question):
response = self.qa_chain({"question": question, "chat_history": self.history})
answer = response["answer"]
generated_question = response["generated_question"]
source_documents = response["source_documents"]
self.history.append([question, answer])
return self.history, generated_question, source_documents
def clean_history(self):
self.history = [] | [
"{page_content}",
"question",
"\nYou are an AI assistant designed for answering questions about a video.\nYou are given a timeline document. The document records what people see and hear from a single video.\nTry to connet these information and provide a conversational answer.\n=========\n{context}\n=========\nQuestion: {question}\nAnswer: \n",
"context",
"\nGiven the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\nYou can assume the discussion is about the video content.\nChat History:\n{chat_history}\nFollow Up Question: {question}\nStandalone question:\n"
] |
2024-01-10 | gustavz/DataChad | datachad~backend~deeplake.py | import time
from datetime import datetime
from glob import glob
import deeplake
from deeplake.client.client import DeepLakeBackendClient
from deeplake.util.bugout_reporter import deeplake_reporter
from langchain.schema import Document
from langchain.vectorstores import DeepLake, VectorStore
from datachad.backend.constants import (
DATA_PATH,
DEFAULT_USER,
LOCAL_DEEPLAKE,
STORE_DOCS_EXTRA,
VERBOSE,
)
from datachad.backend.io import clean_string_for_storing
from datachad.backend.loader import load_data_source, split_docs
from datachad.backend.logging import logger
from datachad.backend.models import STORES, get_embeddings
from datachad.backend.utils import clean_string_for_storing
SPLIT = "-_-"
def list_deeplake_datasets(
org_id: str = "",
token: str = None,
) -> None:
"""List all available Deep Lake cloud datasets for a given user / orgnaization.
Removed from deeplake in: https://github.com/activeloopai/deeplake/pull/2182/files
"""
deeplake_reporter.feature_report(
feature_name="list",
parameters={"org_id": org_id},
)
def get_datasets(self, workspace: str):
LIST_DATASETS = "/api/datasets/{}"
suffix_public = LIST_DATASETS.format("public")
suffix_user = LIST_DATASETS.format("all")
if workspace:
res_datasets = self.get_workspace_datasets(workspace, suffix_public, suffix_user)
else:
public_datasets = self.request(
"GET",
suffix_public,
endpoint=self.endpoint(),
).json()
user_datasets = self.request(
"GET",
suffix_user,
endpoint=self.endpoint(),
).json()
res_datasets = public_datasets + user_datasets
return [ds["_id"] for ds in res_datasets]
client = DeepLakeBackendClient(token=token)
client.get_datasets = get_datasets
datasets = client.get_datasets(client, workspace=org_id)
return datasets
def get_deeplake_dataset_path(dataset_name: str, credentials: dict) -> str:
if LOCAL_DEEPLAKE:
dataset_path = str(DATA_PATH / dataset_name)
else:
dataset_path = f"hub://{credentials['activeloop_id']}/{dataset_name}"
return dataset_path
def delete_all_deeplake_datasets(credentials: dict) -> None:
datasets = list_deeplake_datasets(credentials["activeloop_id"], credentials["activeloop_token"])
for dataset in datasets:
path = f"hub://{dataset}"
logger.info(f"Deleting dataset: {path}")
deeplake.delete(path, token=credentials["activeloop_token"], force=True)
def get_existing_deeplake_vector_store_paths(credentials: dict) -> list[str]:
if LOCAL_DEEPLAKE:
return glob(str(DATA_PATH / "*"), recursive=False)
else:
dataset_names = list_deeplake_datasets(
credentials["activeloop_id"], credentials["activeloop_token"]
)
dataset_pahs = [f"hub://{name}" for name in dataset_names]
return dataset_pahs
def get_or_create_deeplake_vector_store_paths_for_user(
credentials: dict, store_type: str
) -> list[str]:
all_paths = get_existing_deeplake_vector_store_paths(credentials)
# TODO: replace DEFAULT_USER with user id once stored in credentials
user_paths = [
p
for p in all_paths
if p.split(SPLIT)[-1] == DEFAULT_USER and p.split(SPLIT)[-2] == store_type
]
return user_paths
def get_or_create_deeplake_vector_store_display_name(dataset_path: str) -> str:
splits = dataset_path.split(SPLIT)
return f"{splits[-4]} ({splits[-3][:4]}-{splits[-3][4:6]}-{splits[-3][6:8]})"
def get_unique_deeplake_vector_store_path(store_type: str, name: str, credentials: dict) -> str:
store_type_dict = {STORES.KNOWLEDGE_BASE: "kb", STORES.SMART_FAQ: "faq"}
dataset_name = (
# [-4] vector store name
f"{SPLIT}{name}"
# [-3]: creation time
f"{SPLIT}{datetime.now().strftime('%Y%m%d%H%M%S')}"
# [-2]: vector store type
f"{SPLIT}{store_type_dict[store_type]}"
# [-1]: user
f"{SPLIT}{DEFAULT_USER}"
)
dataset_path = get_deeplake_dataset_path(dataset_name, credentials)
return dataset_path
def get_deeplake_docs_path(data_source: str, options: dict, credentials: dict) -> str:
dataset_name = clean_string_for_storing(data_source)
dataset_name += "-docs"
dataset_path = get_deeplake_dataset_path(dataset_name, options, credentials)
return dataset_path
def load_docs_from_deeplake(docs_path: str, credentials: dict) -> list[Document]:
ds = deeplake.load(docs_path, token=credentials["activeloop_token"])
metadatas = ds["metadata"].data()["value"]
texts = ds["text"].data()["value"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
return docs
def store_docs_to_deeplake(docs: list[Document], docs_path: str, credentials: dict):
ds = deeplake.empty(docs_path, token=credentials["activeloop_token"])
ds.create_tensor(
"text",
htype="text",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
chunk_compression="lz4",
)
ds.create_tensor(
"metadata",
htype="json",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
chunk_compression="lz4",
)
for doc in docs:
ds.append(
{
"text": doc.page_content,
"metadata": doc.metadata,
}
)
ds.commit()
logger.info(f"Stored docs to: {docs_path}")
def load_data_sources_or_docs_from_deeplake(
data_sources: list[str], options: dict, credentials: dict
) -> list[Document]:
docs = []
for data_source in data_sources:
if STORE_DOCS_EXTRA:
docs_path = get_deeplake_docs_path(data_source, options, credentials)
if deeplake.exists(docs_path, token=credentials["activeloop_token"]):
logger.info(f"Docs exist -> loading docs: {docs_path}")
docs.extend(load_docs_from_deeplake(docs_path, credentials))
else:
logger.info(
f"Docs do not exist for data source -> loading data source: {data_source}"
)
docs.extend(load_data_source(data_source))
store_docs_to_deeplake(docs, docs_path, credentials)
logger.info(f"Docs {docs_path} loaded!")
else:
docs.extend(load_data_source(data_source))
return docs
def get_or_create_deeplake_vector_store(
data_sources: list[str],
vector_store_path: str,
store_type: str,
options: dict,
credentials: dict,
) -> VectorStore:
t_start = time.time()
embeddings = get_embeddings(options, credentials)
if deeplake.exists(vector_store_path, token=credentials["activeloop_token"]):
logger.info(f"Vector Store '{vector_store_path}' exists -> loading")
vector_store = DeepLake(
dataset_path=vector_store_path,
read_only=True,
embedding_function=embeddings,
token=credentials["activeloop_token"],
)
else:
logger.info(f"Vector Store '{vector_store_path}' does not exist -> uploading")
docs = load_data_sources_or_docs_from_deeplake(data_sources, options, credentials)
docs = split_docs(docs, store_type, options)
vector_store = DeepLake.from_documents(
docs,
embeddings,
dataset_path=vector_store_path,
token=credentials["activeloop_token"],
verbose=VERBOSE,
)
logger.info(f"Vector Store {vector_store_path} loaded in {round(time.time() - t_start)}s!")
return vector_store
| [] |
2024-01-10 | gustavz/DataChad | datachad~backend~jobs.py | import io
from langchain.chains.base import Chain
from langchain.schema import BaseChatMessageHistory
from langchain.schema.vectorstore import VectorStore
from datachad.backend.chain import get_multi_chain
from datachad.backend.deeplake import (
get_or_create_deeplake_vector_store,
get_unique_deeplake_vector_store_path,
)
from datachad.backend.io import delete_files, save_files
from datachad.backend.models import STORES
def create_vector_store(
data_source: str | None,
files: list[io.BytesIO],
store_type: str,
name: str,
options: dict,
credentials: dict,
) -> VectorStore:
file_data_source = save_files(files, name)
vector_store_path = get_unique_deeplake_vector_store_path(store_type, name, credentials)
vector_store = get_or_create_deeplake_vector_store(
data_sources=[ds for ds in [data_source, file_data_source] if ds],
vector_store_path=vector_store_path,
store_type=store_type,
options=options,
credentials=credentials,
)
delete_files(files, name)
return vector_store
def create_chain(
use_vanilla_llm: bool,
knowledge_bases: str,
smart_faq: str,
chat_history: BaseChatMessageHistory,
options: dict,
credentials: dict,
) -> Chain:
knowledge_bases = [
get_or_create_deeplake_vector_store(
data_sources=[],
vector_store_path=path,
store_type=STORES.KNOWLEDGE_BASE,
options=options,
credentials=credentials,
)
for path in knowledge_bases
]
if smart_faq:
smart_faq = get_or_create_deeplake_vector_store(
data_sources=[],
vector_store_path=smart_faq,
store_type=STORES.SMART_FAQ,
options=options,
credentials=credentials,
)
chain = get_multi_chain(
use_vanilla_llm, knowledge_bases, smart_faq, chat_history, options, credentials
)
return chain
| [] |
2024-01-10 | gustavz/DataChad | datachad~backend~loader.py | import os
import re
import shutil
from pathlib import Path
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
GitLoader,
NotebookLoader,
OnlinePDFLoader,
PyPDFium2Loader,
PythonLoader,
TextLoader,
UnstructuredEPubLoader,
UnstructuredFileLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
WebBaseLoader,
)
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tqdm import tqdm
from datachad.backend.constants import DATA_PATH
from datachad.backend.logging import logger
from datachad.backend.models import STORES, get_tokenizer
class SmartFAQSplitter:
def split_documents(self, documents: list[Document]) -> list[Document]:
"""
Splits the given text into a list of strings based on the regex patterns of numbered lists.
Each new list item is separated by two blank lines like this:
1. First item
Some description here.
1. some numbered list
2. beloing to the first item
2. Second item
Another description.
a) another list
b) but with characters
3. Third item
And another one.
- a list with dashes
- more items
"""
splitted_documents = []
for document in documents:
split_text = re.split(r"(?=\n\n\d+\.)", document.page_content.strip())
filtered_text = [re.sub(r"^\n+|\n+$", "", section) for section in split_text]
splitted_documents.extend(
[
Document(
page_content=text,
metadata={"faq_no": int(re.findall(r"\d", text)[0])},
)
for text in filtered_text
]
)
return splitted_documents
class AutoGitLoader:
def __init__(self, data_source: str) -> None:
self.data_source = data_source
def load(self) -> list[Document]:
# We need to try both common main branches
# Thank you github for the "master" to "main" switch
# we need to make sure the data path exists
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
repo_name = self.data_source.split("/")[-1].split(".")[0]
repo_path = str((DATA_PATH / repo_name).absolute())
clone_url = self.data_source
if os.path.exists(repo_path):
clone_url = None
branches = ["main", "master"]
for branch in branches:
try:
docs = GitLoader(repo_path, clone_url, branch).load()
break
except Exception as e:
logger.error(f"Error loading git: {e}")
if os.path.exists(repo_path):
# cleanup repo afterwards
shutil.rmtree(repo_path)
try:
return docs
except:
raise RuntimeError("Error loading git. Make sure to use HTTPS GitHub repo links.")
FILE_LOADER_MAPPING = {
".csv": (CSVLoader, {"encoding": "utf-8"}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PyPDFium2Loader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
".ipynb": (NotebookLoader, {}),
".py": (PythonLoader, {}),
# Add more mappings for other file extensions and loaders as needed
}
WEB_LOADER_MAPPING = {
".git": (AutoGitLoader, {}),
".pdf": (OnlinePDFLoader, {}),
}
def load_document(
file_path: str,
mapping: dict = FILE_LOADER_MAPPING,
default_loader: BaseLoader = UnstructuredFileLoader,
) -> Document:
# Choose loader from mapping, load default if no match found
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in mapping:
loader_class, loader_args = mapping[ext]
loader = loader_class(file_path, **loader_args)
else:
loader = default_loader(file_path)
return loader.load()
def load_directory(path: str, silent_errors=True) -> list[Document]:
# We don't load hidden files starting with "."
all_files = list(Path(path).rglob("**/[!.]*"))
results = []
with tqdm(total=len(all_files), desc="Loading documents", ncols=80) as pbar:
for file in all_files:
try:
results.extend(load_document(str(file)))
except Exception as e:
if silent_errors:
logger.error(f"failed to load {file}")
else:
raise e
pbar.update()
return results
def load_data_source(data_source: str) -> list[Document]:
# Ugly thing that decides how to load data
# It aint much, but it's honest work
is_web = data_source.startswith("http")
is_dir = os.path.isdir(data_source)
is_file = os.path.isfile(data_source)
try:
if is_dir:
docs = load_directory(data_source)
elif is_file:
docs = load_document(data_source)
elif is_web:
docs = load_document(data_source, WEB_LOADER_MAPPING, WebBaseLoader)
else:
raise TypeError
return docs
except Exception as e:
error_msg = f"Failed to load your data source '{data_source}'."
logger.error(error_msg)
e.args += (error_msg,)
raise e
def split_docs(docs: list[Document], store_type: str, options: dict) -> list[Document]:
if store_type == STORES.SMART_FAQ:
text_splitter = SmartFAQSplitter()
else:
tokenizer = get_tokenizer(options)
def length_function(text: str) -> int:
# count chunks like the embeddings model tokenizer does
return len(tokenizer.encode(text))
chunk_overlap = int(options["chunk_size"] * options["chunk_overlap_pct"] / 100)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=options["chunk_size"],
chunk_overlap=chunk_overlap,
length_function=length_function,
separators=["\n\n", "#", "\.", "!", "\?", "\n", ",", " ", ""],
)
splitted_docs = text_splitter.split_documents(docs)
logger.info(f"Loaded: {len(splitted_docs)} document chucks")
return splitted_docs
| [] |
2024-01-10 | gustavz/DataChad | datachad~streamlit~helper.py | import os
import deeplake
import openai
import streamlit as st
from dotenv import load_dotenv
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.openai_info import get_openai_token_cost_for_model
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from datachad.backend.constants import (
CHUNK_OVERLAP_PCT,
CHUNK_SIZE,
DEFAULT_KNOWLEDGE_BASES,
DEFAULT_SMART_FAQ,
DISTANCE_METRIC,
K_FETCH_K_RATIO,
MAX_TOKENS,
MAXIMAL_MARGINAL_RELEVANCE,
TEMPERATURE,
)
from datachad.backend.deeplake import (
get_or_create_deeplake_vector_store_display_name,
get_or_create_deeplake_vector_store_paths_for_user,
)
from datachad.backend.jobs import create_chain, create_vector_store
from datachad.backend.logging import logger
from datachad.backend.models import MODELS, get_tokenizer
from datachad.streamlit.constants import PAGE_ICON
# loads environment variables
load_dotenv()
def init_session_state():
# Initialise all session state variables with defaults
SESSION_DEFAULTS = {
# general usage
"usage": {},
"chat_history": StreamlitChatMessageHistory(),
# authentication
"openai_api_key": "",
"activeloop_token": "",
"activeloop_id": "",
"credentals": {},
"auth_ok": False,
# data upload
"uploaded_files": None,
"data_type": None,
"data_name": None,
# data selection
"chain": None,
"knowledge_bases": DEFAULT_KNOWLEDGE_BASES,
"smart_faq": DEFAULT_SMART_FAQ,
# advanced options
"model": MODELS.GPT35TURBO,
"k_fetch_k_ratio": K_FETCH_K_RATIO,
"chunk_size": CHUNK_SIZE,
"chunk_overlap_pct": CHUNK_OVERLAP_PCT,
"temperature": TEMPERATURE,
"max_tokens": MAX_TOKENS,
"distance_metric": DISTANCE_METRIC,
"maximal_marginal_relevance": MAXIMAL_MARGINAL_RELEVANCE,
}
for k, v in SESSION_DEFAULTS.items():
if k not in st.session_state:
st.session_state[k] = v
def authenticate() -> None:
# Validate all credentials are set and correct
# Check for env variables to enable local dev and deployments with shared credentials
openai_api_key = (
st.session_state["openai_api_key"]
or os.environ.get("OPENAI_API_KEY")
or st.secrets.get("OPENAI_API_KEY")
)
activeloop_token = (
st.session_state["activeloop_token"]
or os.environ.get("ACTIVELOOP_TOKEN")
or st.secrets.get("ACTIVELOOP_TOKEN")
)
activeloop_id = (
st.session_state["activeloop_id"]
or os.environ.get("ACTIVELOOP_ID")
or st.secrets.get("ACTIVELOOP_ID")
)
if not (openai_api_key and activeloop_token and activeloop_id):
st.session_state["auth_ok"] = False
st.error("Credentials neither set nor stored", icon=PAGE_ICON)
return
try:
# Try to access openai and deeplake
with st.session_state["info_container"], st.spinner("Authentifying..."):
openai.api_key = openai_api_key
openai.models.list()
deeplake.exists(
f"hub://{activeloop_id}/DataChad-Authentication-Check",
token=activeloop_token,
)
except Exception as e:
logger.error(f"Authentication failed with {e}")
st.session_state["auth_ok"] = False
st.error("Authentication failed", icon=PAGE_ICON)
return
# store credentials in the session state
st.session_state["auth_ok"] = True
st.session_state["credentials"] = {
"openai_api_key": openai_api_key,
"activeloop_token": activeloop_token,
"activeloop_id": activeloop_id,
}
msg = "Authentification successful!"
st.session_state["info_container"].info(msg, icon=PAGE_ICON)
logger.info(msg)
def get_options() -> dict:
return {
key: st.session_state[key]
for key in [
"model",
"k_fetch_k_ratio",
"chunk_size",
"chunk_overlap_pct",
"temperature",
"max_tokens",
"distance_metric",
"maximal_marginal_relevance",
]
}
def update_vector_store() -> None:
try:
with st.session_state["info_container"], st.spinner("Updating Vector Stores..."):
options = get_options()
create_vector_store(
data_source=st.session_state["data_source"],
files=st.session_state["uploaded_files"],
store_type=st.session_state["data_type"],
name=st.session_state["data_name"],
options=options,
credentials=st.session_state["credentials"],
)
msg = (
f"Vector Store built for "
f"uploaded files: {st.session_state['uploaded_files']} "
f"and store type: {st.session_state['data_type']}"
f"with name: {st.session_state['data_name']}"
f"and options: {options}"
)
logger.info(msg)
st.session_state["info_container"].info("Upload successful!", icon=PAGE_ICON)
except Exception as e:
msg = f"Failed to build vectore chain with error: {e}"
logger.error(msg)
st.session_state["info_container"].error(msg, icon=PAGE_ICON)
def update_chain() -> None:
try:
with st.session_state["info_container"], st.spinner("Updating Knowledge Base..."):
st.session_state["chat_history"].clear()
options = get_options()
st.session_state["chain"] = create_chain(
use_vanilla_llm=st.session_state["use_vanilla_llm"],
knowledge_bases=st.session_state["knowledge_bases"],
smart_faq=st.session_state["smart_faq"],
chat_history=st.session_state["chat_history"],
options=options,
credentials=st.session_state["credentials"],
)
msg = (
f"Language chain built for "
f"knowledge base: {st.session_state['knowledge_bases']} "
f"and smart faq: {st.session_state['smart_faq']}"
f"with options: {options}"
)
logger.info(msg)
st.session_state["info_container"].info("Selection successful!", icon=PAGE_ICON)
except Exception as e:
msg = f"Failed to build language chain with error: {e}"
logger.error(msg)
st.session_state["info_container"].error(msg, icon=PAGE_ICON)
def get_existing_smart_faqs_and_default_index() -> list[str]:
smart_faqs = get_or_create_deeplake_vector_store_paths_for_user(
st.session_state["credentials"], "faq"
)
index = 0
if DEFAULT_SMART_FAQ and DEFAULT_SMART_FAQ in smart_faqs:
# we pick the first smart faq as default
# so we must sort it to the front
smart_faqs = set(smart_faqs)
smart_faqs.remove(DEFAULT_SMART_FAQ)
smart_faqs = [DEFAULT_SMART_FAQ] + list(smart_faqs)
index = 1
# first option should always be None
smart_faqs = [None] + smart_faqs
return smart_faqs, index
def get_existing_knowledge_bases() -> list[str]:
return get_or_create_deeplake_vector_store_paths_for_user(st.session_state["credentials"], "kb")
def format_vector_stores(item: str) -> str:
if item is not None:
return get_or_create_deeplake_vector_store_display_name(item)
return item
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.stream_text = initial_text
self.chain_state = 0
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.stream_text += token
self.container.markdown(self.stream_text)
def on_chain_end(self, outputs, **kwargs) -> None:
self.chain_state += 1
class PrintRetrievalHandler(BaseCallbackHandler):
def __init__(self, container):
self.status = container.status("**Context Retrieval**")
def on_retriever_start(self, serialized: dict, query: str, **kwargs) -> None:
self.status.write(f"**Question:** {query}")
self.status.update(label=f"**Context Retrieval:** {query}")
def on_retriever_end(self, documents, **kwargs) -> None:
for idx, doc in enumerate(documents):
try:
source = os.path.basename(doc.metadata["source"])
page = doc.metadata.get("page")
output = f"___\n**Source {idx}:** {source}"
output += f" (page {page+1})" if page is not None else ""
self.status.write(output)
except:
pass
self.status.markdown(doc.page_content)
self.status.update(state="complete")
class UsageHandler(BaseCallbackHandler):
prompt = ""
total_tokens = 0
prompt_tokens = 0
completion_tokens = 0
successful_requests = 0
total_cost = 0
def update_usage(self) -> None:
usage_properties = [
"total_tokens",
"prompt_tokens",
"completion_tokens",
"successful_requests",
"total_cost",
]
for prop in usage_properties:
value = getattr(self, prop, 0)
setattr(self, prop, 0)
st.session_state["usage"].setdefault(prop, 0)
st.session_state["usage"][prop] += value
def calculate_costs(self) -> None:
model = st.session_state["model"]
tokenizer = get_tokenizer({"model": model})
self.prompt_tokens = len(tokenizer.encode(self.prompt))
self.total_tokens = self.prompt_tokens + self.completion_tokens
completion_cost = get_openai_token_cost_for_model(
model.name, self.completion_tokens, is_completion=True
)
prompt_cost = get_openai_token_cost_for_model(model.name, self.prompt_tokens)
self.total_cost += prompt_cost + completion_cost
def on_llm_new_token(self, **kwargs) -> None:
self.completion_tokens += 1
def on_chat_model_start(self, serialized, messages, **kwargs) -> None:
self.successful_requests += 1
self.prompt += messages[0][0].content
def on_chain_end(self, outputs, **kwargs) -> None:
self.calculate_costs()
self.update_usage()
| [
"0"
] |
2024-01-10 | gustavz/DataChad | datachad~backend~models.py | from dataclasses import dataclass
from typing import Any
import streamlit as st
import tiktoken
from langchain.base_language import BaseLanguageModel
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import Embeddings, OpenAIEmbeddings
from transformers import AutoTokenizer
from datachad.backend.constants import LOCAL_EMBEDDINGS, MODEL_PATH
from datachad.backend.logging import logger
class Enum:
@classmethod
def all(cls) -> list[Any]:
return [v for k, v in cls.__dict__.items() if not k.startswith("_")]
@dataclass
class Model:
name: str
embedding: str
context: int
def __str__(self) -> str:
return self.name
class STORES(Enum):
KNOWLEDGE_BASE = "Knowledge Base"
SMART_FAQ = "Smart FAQ"
class EMBEDDINGS(Enum):
# Add more embeddings as needed
OPENAI = "text-embedding-ada-002"
HUGGINGFACE = "sentence-transformers/all-MiniLM-L6-v2"
class MODELS(Enum):
# Add more models as needed
GPT35TURBO = Model(
name="gpt-3.5-turbo",
embedding=EMBEDDINGS.OPENAI,
context=4096,
)
GPT35TURBO16K = Model(
name="gpt-3.5-turbo-16k",
embedding=EMBEDDINGS.OPENAI,
context=16385,
)
GPT4 = Model(
name="gpt-4",
embedding=EMBEDDINGS.OPENAI,
context=8192,
)
def get_model(options: dict, credentials: dict) -> BaseLanguageModel:
match options["model"].name:
case model_name if model_name.startswith("gpt"):
model = ChatOpenAI(
model_name=options["model"].name,
temperature=options["temperature"],
openai_api_key=credentials["openai_api_key"],
streaming=True,
)
# Added models need to be cased here
case _default:
msg = f"Model {options['model'].name} not supported!"
logger.error(msg)
st.error(msg)
exit
return model
def get_embeddings(options: dict, credentials: dict) -> Embeddings:
match options["model"].embedding:
case embedding if (embedding == EMBEDDINGS.HUGGINGFACE or LOCAL_EMBEDDINGS):
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDINGS.HUGGINGFACE, cache_folder=str(MODEL_PATH)
)
case EMBEDDINGS.OPENAI:
embeddings = OpenAIEmbeddings(
model=EMBEDDINGS.OPENAI,
disallowed_special=(),
openai_api_key=credentials["openai_api_key"],
)
# Added embeddings need to be cased here
case _default:
msg = f"Embeddings {options['model'].embedding} not supported!"
logger.error(msg)
st.error(msg)
exit
return embeddings
def get_tokenizer(options: dict) -> Embeddings:
match options["model"].embedding:
case embedding if (embedding == EMBEDDINGS.HUGGINGFACE or LOCAL_EMBEDDINGS):
tokenizer = AutoTokenizer.from_pretrained(EMBEDDINGS.HUGGINGFACE)
case EMBEDDINGS.OPENAI:
tokenizer = tiktoken.encoding_for_model(EMBEDDINGS.OPENAI)
# Added tokenizers need to be cased here
case _default:
msg = f"Tokenizer {options['model'].embedding} not supported!"
logger.error(msg)
st.error(msg)
exit
return tokenizer
| [] |
2024-01-10 | cleveriot/workshop-api | aimodule.py | import os
import openai
import tiktoken
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_sentiment(msg):
"""
Get the sentiment from a text message
parameters:
msg (string): The text message
returns:
sentiment (string): The sentiment detected in the message
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt="Classify the sentiment in this messge: {}".format(msg),
temperature=0,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
# Transform response in dict
r_dict = response.to_dict()
# Get only the 'text' value
sentiment = r_dict.get('choices')[0].get('text').replace('\n','')
return sentiment
def write_feedback(msg):
"""
Write a message to answer a given site feedback
parameters:
msg (string): The text message
returns:
feedback (string): The feedback message to post
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt="Evaluate the sentiment and the message idiom and reply with a kindly message with a follow-up question if the sentiment is negative. If positive, just reply only with a kindly message: {}".format(msg),
temperature=0,
max_tokens=180,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
# Transform response in dict
r_dict = response.to_dict()
# Get only the 'text' value
feedback = r_dict.get('choices')[0].get('text').replace('\n','')
return feedback
def summarize(text):
"""
Summarizes the provided text using the text-davinci-003 language model from OpenAI.
Args:
text (str): The text to be summarized.
Returns:
response (object): The response object containing the summary generated by the model.
"""
tokens = get_tokens(text)
print("Tokens: {}".format(tokens.nb_tokens))
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"Sumariza a reclamação a seguir para um responsável da qualidade do atendimento. Se houver menção da qualidade do atendimento customer care, também referir : {text} ",
temperature=1,
max_tokens=tokens.nb_tokens,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return response
def get_tokens(text,enc_name="p50k_base"):
"""
Returns a class object containing information about the tokens generated from the provided text.
Args:
text (str): The text for which tokens will be generated.
enc_name (str, optional): The name of the encoding scheme to be used. Defaults to "p50k_base".
Returns:
token_class (object): A class object containing information about the generated tokens.
"""
encoding = tiktoken.get_encoding(enc_name)
token_integers = encoding.encode(text)
nb_tokens = len(token_integers)
token_bytes = [encoding.decode_single_token_bytes(token) for token in token_integers]
tokens = {"nb_tokens": nb_tokens, "token_bytes": token_bytes, "token_integers": token_integers}
token_class = type("tokens", (), tokens)
return token_class
| [
"Evaluate the sentiment and the message idiom and reply with a kindly message with a follow-up question if the sentiment is negative. If positive, just reply only with a kindly message: PLACEHOLDER",
"Sumariza a reclamação a seguir para um responsável da qualidade do atendimento. Se houver menção da qualidade do atendimento customer care, também referir : PLACEHOLDER ",
"Classify the sentiment in this messge: PLACEHOLDER"
] |
2024-01-10 | evilmonkey19/vector_embeddings_course | mini-qa.py | import os
from langchain.vectorstores.cassandra import Cassandra
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.llms.openai import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from datasets import load_dataset
ASTRA_DB_SECURE_BUNDLE_PATH = os.environ['ASTRA_DB_SECURE_BUNDLE_PATH']
ASTRA_DB_APPLICATION_TOKEN = os.environ['ASTRA_DB_APPLICATION_TOKEN']
ASTRA_DB_CLIENT_ID = os.environ['ASTRA_DB_CLIENT_ID']
ASTRA_DB_CLIENT_SECRET = os.environ['ASTRA_DB_CLIENT_SECRET']
ASTRA_DB_KEYSPACE = os.environ['ASTRA_DB_KEYSPACE']
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
cloud_config = {
'secure_connect_bundle': ASTRA_DB_SECURE_BUNDLE_PATH,
}
auth_provider = PlainTextAuthProvider(ASTRA_DB_CLIENT_ID, ASTRA_DB_CLIENT_SECRET)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
astraSession = cluster.connect()
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
myEmbedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
myCassandraVStore = Cassandra(
embedding = myEmbedding,
session = astraSession,
keyspace = ASTRA_DB_KEYSPACE,
table_name = "qa_mini_demo",
)
print("Loading data from huggingface...")
myDataset = load_dataset("Biddls/Onion_News", split="train")
headlines = myDataset["text"][:50]
print("Generating embeddings and storing in AstraDB...")
myCassandraVStore.add_texts(headlines)
print(f"Inserted {len(headlines)} headlines.")
vectorIndex = VectorStoreIndexWrapper(vectorstore=myCassandraVStore)
first_question = True
while True:
if first_question:
query_text = input("Enter your question (or type 'quit' to exit): ")
first_question = False
else:
query_text = input("What's your next question (or type 'quit' to exit): ")
if query_text.lower() == 'quit':
break
print(f"QUESTION: {query_text}")
answer = vectorIndex.query(query_text, llm=llm).strip()
print(f"ANSWER: {answer}")
print("DOCUMENTS BY RELEVANCE:")
for doc, score in myCassandraVStore.similarity_search_with_score(query_text, k=4):
print(" %0.4f \" %s ...\"" % (score, doc.page_content[:60])) | [] |
2024-01-10 | intel/certified-developer | MLOps_Professional~lab8~sample~PickerBot.py | from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.indexes import VectorstoreIndexCreator
from langchain.embeddings import HuggingFaceEmbeddings
from datasets import load_dataset
import pandas as pd
import time
import os
class PickerBot():
def __init__(self, data, model):
self.data = data
self.model = model
def data_proc(self):
if not os.path.isfile(self.data):
# Download the customer service robot support dialogue from hugging face
dataset = load_dataset("FunDialogues/customer-service-apple-picker-maintenance", cache_dir=None)
# Convert the dataset to a pandas dataframe
dialogues = dataset['train']
df = pd.DataFrame(dialogues, columns=['id', 'description', 'dialogue'])
# Print the first 5 rows of the dataframe
df.head()
# only keep the dialogue column
dialog_df = df['dialogue']
# save the data to txt file
dialog_df.to_csv(self.data, sep=' ', index=False)
else:
print('data already exists in path.')
def create_vectorstore(self, chunk_size: int = 500, overlap: int = 25):
loader = TextLoader(self.data)
# Text Splitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=overlap)
# Embed the document and store into chroma DB
self.index = VectorstoreIndexCreator(embedding= HuggingFaceEmbeddings(), text_splitter=text_splitter).from_loaders([loader])
def inference(self, user_input: str, context_verbosity: bool = False, top_k: int=2):
# perform similarity search and retrieve the context from our documents
results = self.index.vectorstore.similarity_search(user_input, k=top_k)
# join all context information into one string
context = "\n".join([document.page_content for document in results])
if context_verbosity:
print(f"Retrieving information related to your question...")
print(f"Found this content which is most similar to your question: {context}")
template = """
Please use the following apple picker technical support related questions to answer questions.
Context: {context}
---
This is the user's question: {question}
Answer: This is what our auto apple picker technical expert suggest."""
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=self.model)
print("Processing the information with gpt4all...\n")
start_time = time.time()
response = llm_chain.run(user_input)
elapsed_time_milliseconds = (time.time() - start_time) * 1000
tokens = len(response.split())
time_per_token_milliseconds = elapsed_time_milliseconds / tokens if tokens != 0 else 0
processed_reponse = response + f" --> {time_per_token_milliseconds:.4f} milliseconds/token AND Time taken for response: {elapsed_time_milliseconds:.2f} milliseconds"
return processed_reponse | [
"\n Please use the following apple picker technical support related questions to answer questions. \n Context: {context}\n ---\n This is the user's question: {question}\n Answer: This is what our auto apple picker technical expert suggest.",
"question",
"context"
] |
2024-01-10 | lukovnikov/improved-diffusion | improved_diffusion~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [] # [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
# lines.append(dashes)
self.file.write(" ".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | sanchit-gandhi/seq2seq-speech | run_speech_recognition_whisper.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning OpenAI Whisper models for speech recognition.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
# flake8: noqa: E501
import logging
import os
import re
import torchaudio
import whisper
import sys
from dataclasses import dataclass, field
from typing import Optional, Dict, Union, List
import numpy as np
import torch
import datasets
from datasets import DatasetDict, load_dataset
import transformers
from torch import nn
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
Seq2SeqTrainer,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
import wandb
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/tokenizer we are going to fine-tune from.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Path to pretrained model or model identifier from OpenAI Whisper NGC."}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co or OpenAI Whisper NGC."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
manifest_path: str = field(
default="data",
metadata={
"help": "Manifest path."
},
)
tokenizer_path: str = field(
default="tokenizers",
metadata={
"help": "Tokenizer path."
},
)
freeze_encoder: bool = field(
default=False,
metadata={"help": "Freeze the acoustic encoder of the model. Recommend when fine-tuning on small datasets."}
)
num_beams: int = field(
default=1,
metadata={"help": "Number of beams for evaluation."},
)
length_penalty: float = field(
default=1.0,
metadata={"help": "Length penalty for evaluation."},
)
use_adam8bit: bool = field(
default=False,
metadata={"help": "Whether to use bitsandbytes 8bit AdamW optimiser."}
)
dropout_rate: float = field(
default=0.0,
metadata={"help": "The dropout ratio for all dropout layers (default=0)."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
dataset_cache_dir: Optional[str] = field(
default=None, metadata={"help": "Path to cache directory for saving and loading datasets"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of test examples to this "
"value if set."
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": "Truncate training audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
max_eval_duration_in_seconds: float = field(
default=None,
metadata={
"help": "Truncate eval/test audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
min_target_length: Optional[int] = field(
default=0,
metadata={
"help": "The minimum total sequence length for target text after tokenization. Sequences shorter "
"than this will be filtered."
},
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": "Whether to only do data preprocessing and skip training. "
"This is especially useful when data preprocessing errors out in distributed training due to timeout. "
"In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
"so that the cached datasets can consequently be loaded in distributed training"
},
)
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="validation",
metadata={
"help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"
},
)
test_split_name: str = field(
default="test",
metadata={"help": "The name of the test data set split to use (via the datasets library). Defaults to 'test'"},
)
do_lower_case: bool = field(
default=True,
metadata={"help": "Whether the target text should be lower cased."},
)
wandb_project: str = field(
default="speech-recognition-whisper",
metadata={"help": "The name of the wandb project."},
)
torchaudio_resampler: bool = field(
default=False,
metadata={
"help": "Whether to use torchaudio to resample. If `False` (default) will use the default datataset backed."
}
)
def write_wandb_pred(pred_str, label_str, prefix="eval"):
# convert str data to a wandb compatible format
str_data = [[label_str[i], pred_str[i]] for i in range(len(pred_str))]
# we'll log all predictions for the last epoch
wandb.log(
{
f"{prefix}/predictions": wandb.Table(
columns=["label_str", "pred_str"], data=str_data
)
},
)
def to_pad_to_mel(array):
"""Static function which:
1. Pads/trims a list of audio arrays to a max length of 30s
2. Computes log-mel filter coefficients from padded/trimmed audio sequences
Inputs:
array: list of audio arrays
Returns:
input_ids: torch.tensor of log-mel filter bank coefficients
"""
padded_input = whisper.pad_or_trim(np.asarray(array, dtype=np.float32))
input_ids = whisper.log_mel_spectrogram(padded_input)
return input_ids
def to_mel_to_pad(array):
"""Static function which:
1. Computes log-mel filter coefficients from padded/trimmed audio sequences
2. Pads/trims a list of audio arrays to a max length of 30s
Inputs:
array: list of audio arrays
Returns:
input_ids: torch.tensor of log-mel filter bank coefficients
"""
mels = whisper.log_mel_spectrogram(np.asarray(array, dtype=np.float32))
input_ids = whisper.pad_or_trim(mels, 3000)
return input_ids
@dataclass
class WhisperDataCollatorWithPadding:
"""
Data collator that dynamically pads the audio inputs received. An EOS token is appended to the labels sequences.
They are then dynamically padded to max length.
Args:
eos_token_id (`int`)
The end-of-sentence token for the Whisper tokenizer. Ensure to set for sequences to terminate before
generation max length.
"""
eos_token_id: int
time_stamp_token_id: int
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
"""
Since Whisper models don't have a HF processor defined (feature extractor + tokenizer), we'll pad by hand...
"""
# split inputs and labels since they have to be of different lengths
# and need different padding methods
input_ids = [feature["input_ids"] for feature in features]
labels = [feature["labels"] for feature in features]
# first, pad the audio inputs to max_len
input_ids = torch.concat([to_pad_to_mel(input_val)[None, :] for input_val in input_ids])
# next, append the eos token to our sequence of labels
labels = [lab + [self.eos_token_id] for lab in labels]
# finally, pad the target labels to max_len
label_lengths = [len(lab) for lab in labels]
max_label_len = max(label_lengths)
labels = [np.pad(lab, (0, max_label_len - lab_len), 'constant', constant_values=-100) for lab, lab_len in zip(labels, label_lengths)]
batch = {"labels": labels}
batch = {k: torch.tensor(np.array(v), requires_grad=False) for k, v in batch.items()}
batch["input_ids"] = input_ids
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Set wandb project ID before instantiating the Trainer
os.environ["WANDB_PROJECT"] = data_args.wandb_project
report_to_wandb = "wandb" in training_args.report_to
sample_rate = 16_000
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# load the model
if os.path.isfile(model_args.model_name_or_path):
checkpoint = torch.load(model_args.model_name_or_path)
need_to_rewrite_checkpoint = any(k.startswith("decoder.blocks") and ".mlp.3" in k for k in checkpoint.keys())
if need_to_rewrite_checkpoint:
new_checkpoint = {}
for k, v in checkpoint.items():
if k.startswith("decoder.blocks") and "mlp" in k.split("."):
if int(k.split(".mlp.")[-1].split(".")[0]) in [2, 4]:
continue
elif int(k.split(".mlp.")[-1].split(".")[0]) == 3:
k = k.replace(".mlp.3", ".mlp.2")
new_checkpoint[k] = v
with tempfile.TemporaryDirectory() as tmp:
file = os.path.join(tmp, "model.pt")
torch.save(new_checkpoint, file)
model = whisper.Whisper.load_trained(file)
else:
model = whisper.Whisper.load_trained(model_args.model_name_or_path)
del checkpoint
else:
model = whisper.load_model(model_args.model_name_or_path, dropout_rate=model_args.dropout_rate)
if training_args.do_train:
# set the dropout for the MLP layers -> we do this here as the MLP layers are written as a 'sequential'
# so changing the modelling code gives mis-matches in the state-dict
if not model_args.freeze_encoder:
# only apply dropout when training the encoder
for block_idx in range(len(model.encoder.blocks)):
mlp_layer = model.encoder.blocks[block_idx].mlp
# going very verbose to explain what we're doing here!
fc1 = mlp_layer[0]
act_fn = mlp_layer[1]
dropout = nn.Dropout(p=model_args.dropout_rate)
fc2 = mlp_layer[2]
model.encoder.blocks[block_idx].mlp = nn.Sequential(fc1, act_fn, dropout, fc2, dropout)
for block_idx in range(len(model.decoder.blocks)):
mlp_layer = model.decoder.blocks[block_idx].mlp
fc1 = mlp_layer[0]
act_fn = mlp_layer[1]
dropout_1 = nn.Dropout(p=model_args.dropout_rate)
fc2 = mlp_layer[2]
dropout_2 = nn.Dropout(p=model_args.dropout_rate)
model.decoder.blocks[block_idx].mlp = nn.Sequential(fc1, act_fn, dropout_1, fc2, dropout_2)
for block_idx in range(len(model.decoder.blocks)):
mlp_layer = model.decoder.blocks[block_idx].mlp
fc1 = mlp_layer[0]
act_fn = mlp_layer[1]
dropout1 = nn.Dropout(p=model_args.dropout_rate)
fc2 = mlp_layer[2]
dropout2 = nn.Dropout(p=model_args.dropout_rate)
model.decoder.blocks[block_idx].mlp = nn.Sequential(fc1, act_fn, dropout1, fc2, dropout2)
# load the tokenizer
whisper_tok = whisper.tokenizer.get_tokenizer(False, task="transcribe", language="en")
tokenizer = whisper_tok.tokenizer
tokenizer.pad_token = tokenizer.eos_token
# 4. Load dataset
raw_datasets = DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
cache_dir=data_args.dataset_cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.do_eval:
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
cache_dir=data_args.dataset_cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.do_predict:
test_split = data_args.test_split_name.split("+")
for split in test_split:
raw_datasets[split] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=split,
cache_dir=data_args.dataset_cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if not training_args.do_train and not training_args.do_eval and not training_args.do_predict:
raise ValueError(
"Cannot not train, not do evaluation and not do prediction. At least one of "
"training, evaluation or prediction has to be done."
)
# if not training, there is no need to run multiple epochs
if not training_args.do_train:
training_args.num_train_epochs = 1
if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
)
if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
)
# 6. Resample speech dataset ALWAYS
if data_args.torchaudio_resampler:
# TODO: remove hardcoding of orig sr
resampler = torchaudio.transforms.Resample(8_000, sample_rate)
else:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=sample_rate)
)
resampler = None
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
max_input_length = int(data_args.max_duration_in_seconds * sample_rate)
min_input_length = min(int(data_args.min_duration_in_seconds * sample_rate), 1)
max_eval_input_length = int(data_args.max_eval_duration_in_seconds * sample_rate) if data_args.max_eval_duration_in_seconds else None
max_target_length = data_args.max_target_length
min_target_length = data_args.min_target_length
audio_column_name = data_args.audio_column_name
num_workers = data_args.preprocessing_num_workers
text_column_name = data_args.text_column_name
do_lower_case = data_args.do_lower_case
dataset_name = data_args.dataset_name
# Define tokens to ignore/replace
tedlium_contractions = [" 's", " 't", " 're", " 've", " 'm", " 'll", " 'd", " 'clock", " 'all"]
gigaspeech_punctuation = {" <comma>": ",", " <period>": ".", " <questionmark>": "?", " <exclamationpoint>": "!"}
gigaspeech_disfluencies = ["<other>", "<sil>"]
swb_disfluencies = ["[noise]", "[laughter]", "[silence]", "[vocalized-noise]", "<a_aside>", "<b_aside>", "<e_aside>",
"[laughter-", "_1", "[laugh]", "[sigh]", "[cough]", "[mn]", "[breath]", "[lipsmack]",
"[sneeze]", "[skip]", "[pause]", "(%hesitation)", "(%HESITATION)"]
swb_punctuations = ["{", "}", "[", "]-", "]", "((", "))", "(", ")"]
swb_fillers = r"\b(uh|uhm|um|hmm|mm|mhm|mmm)\b"
earnings_disfluencies = ["<noise>", "<crosstalk>", "<affirmative>", "<inaudible>", "inaudible", "<laugh>", "<silence>"]
ignore_segments = ["ignore_time_segment_in_scoring", "<noise>", "<music>", "[noise]", "[laughter]", "[silence]",
"[vocalized-noise]", "<crosstalk>", "<affirmative>", "<inaudible>", "<laugh>", ""]
ignore_segments = ignore_segments + gigaspeech_disfluencies + swb_disfluencies + earnings_disfluencies
if training_args.do_train and data_args.max_train_samples is not None:
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if training_args.do_eval and data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
if training_args.do_predict and data_args.max_predict_samples is not None:
for split in test_split:
raw_datasets[split] = raw_datasets[split].select(range(data_args.max_predict_samples))
# filter data where the targets are ignored in scoring
def is_target_labels(input_str):
return input_str.lower() not in ignore_segments
raw_datasets = raw_datasets.filter(
is_target_labels,
num_proc=num_workers,
input_columns=[text_column_name],
desc="filtering data where the targets are ignored in scoring",
)
def prepare_dataset(batch):
# pre-process audio
try:
sample = batch[audio_column_name]
except ValueError:
# E22: some samples are empty (no audio). Reading the empty audio array will trigger
# a soundfile ValueError. For now, we'll manually set these arrays to a zero array.
# They will be filtered in the subsequent filtering stage and so are
# explicitly ignored during training.
sample = {"array": np.array([0.]), "sampling_rate": sample_rate}
if resampler is not None:
speech_tensor = torch.FloatTensor(sample["array"])
speech_tensor = speech_tensor.squeeze()
speech_tensor = resampler(speech_tensor)
sample["array"] = speech_tensor.numpy()
sample["sampling_rate"] = resampler.new_freq
# For training Whisper we perform the audio preprocessing in the WhisperDataCollator
# => we only need to supply it with the raw audio values
batch["input_ids"] = sample["array"]
batch["input_lengths"] = len(batch["input_ids"])
# 'Error correction' of targets
input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
# LibriSpeech ASR
if dataset_name == "librispeech_asr":
pass # no error correction necessary
# VoxPopuli
if dataset_name == "google/xtreme_s":
pass # no error correction necessary
# Common Voice 9
if dataset_name == "mozilla-foundation/common_voice_9_0":
if input_str.startswith('"') and input_str.endswith('"'):
# we can remove trailing quotation marks as they do not affect the transcription
input_str = input_str[1:-1]
# replace double quotation marks with single
input_str = input_str.replace('""', '"')
# TED-LIUM (Release 3)
if dataset_name == "LIUM/tedlium":
# delete the <unk> token from the text
input_str = input_str.replace("<unk>", "")
# replace spaced apostrophes with un-spaced (it 's -> it's)
for contraction in tedlium_contractions:
input_str = input_str.replace(contraction, contraction[1:])
# GigaSpeech
if dataset_name == "speechcolab/gigaspeech":
for disfluency in gigaspeech_disfluencies:
input_str = input_str.replace(disfluency, "")
# convert spelled out punctuation to symbolic form
for punctuation, replacement in gigaspeech_punctuation.items():
input_str = input_str.replace(punctuation, replacement)
# SWB: hide the path to the private HF dataset
if "switchboard" in dataset_name:
# In one conversation people speak some German phrases that are tagged as
# <german (( ja wohl )) > -- we remove these
input_str = re.sub("<[^>]*>", "", input_str)
# Remove junk tokens
for disfluency in swb_disfluencies:
input_str = input_str.replace(disfluency, "")
# normalise acronyms (Fisher: u_.c_.l_.a., SWBD: u c l a)
input_str = input_str.replace("_.", " ").replace(".", "")
# Replace partially pronounced words (square brackets + hyphen): westmin[ster]- to westmin- or -[go]ing to -ing
# Replace anomalous words (square brackets + backslack): [lemguini/linguini] to linguini
# Replace the combo of the two: [lem[guini]-/linguini] to lem-
# Example: we [ah/are] -[go]ing to westmin[ster]- for [lem[guini]-/linguini]
# Target: we ah -ing to westmin- for lem-
# Treat anomalous words first then destroy the content of all square brackets (partially pronounced words)
# First treat partially pronounced anomalous words by removing correct word: [lem[guini]-/linguini] to [lem[guini]-
input_str = re.sub(r"\-\/.*?\]", "-", input_str)
# Now replace anomalous words with their correct transcriptions: [lemguini/linguini] to linguini
split_str = input_str.split("/")
if len(split_str) > 1:
input_str = " ".join(
[" ".join([" ".join(i.split(" ")[:-1]) for i in split_str])] + [split_str[-1].split(" ")[-1]])
# Remove the trailing brackets on the start/end of words
processed_str = []
for word in input_str.split():
if word[0] == "[":
processed_str.append(word[1:])
elif word[-1] == "]":
processed_str.append(word[:-1])
else:
processed_str.append(word)
# Stick the processed words back together
input_str = " ".join(processed_str)
# Now we can remove all words in square brackets: -[go]ing to -ing
input_str = re.sub(r"\-\[(.*?)\]", "-", input_str)
# westmin[ster]- to westmin-
input_str = re.sub(r"\[(.*?)\]\-", "-", input_str)
# tech[n]ology to tech-ology
input_str = re.sub(r"\[(.*?)\]", "-", input_str)
# partially pronounced words are now done!
# remove erroneous punctuations (curly braces, trailing square brackets, etc.)
for punctuation in swb_punctuations:
input_str = input_str.replace(punctuation, "")
# Remove fillers from the train set not present in the test set
input_str = re.sub(swb_fillers, "", input_str)
# Earnings 22: still figuring out best segmenting method. Thus, dataset name subject to change
if "earnings22" in dataset_name:
# Remove the 100ms offset at the end of the sample
sampling_rate = sample["sampling_rate"]
offset = int(100 * (10 ** -3) * sampling_rate)
batch["input_ids"] = sample["array"][:-offset]
batch["input_lengths"] = len(batch["input_ids"])
# Remove junk tokens
for disfluency in earnings_disfluencies:
input_str = input_str.replace(disfluency, "")
# SPGISpeech
if dataset_name == "kensho/spgispeech":
pass # no error correction necessary
# JIWER compliance (for WER/CER calc.)
# remove multiple spaces
input_str = re.sub(r"\s\s+", " ", input_str)
# strip trailing spaces
input_str = input_str.strip()
# Finally, we tokenize the processed text
batch["labels"] = tokenizer(input_str).input_ids
return batch
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=next(iter(raw_datasets.values())).column_names,
num_proc=num_workers,
desc="preprocess train dataset",
)
# filter training data with inputs longer than max_input_length
def is_audio_in_length_range(input_length):
return min_input_length < input_length < max_input_length
if training_args.do_train:
vectorized_datasets["train"] = vectorized_datasets["train"].filter(
is_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_lengths"],
)
if max_eval_input_length is not None:
# filter training data with inputs longer than max_input_length
def is_eval_audio_in_length_range(input_length):
return min_input_length < input_length < max_eval_input_length
if training_args.do_eval:
vectorized_datasets["eval"] = vectorized_datasets["eval"].filter(
is_eval_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_lengths"],
)
if training_args.do_predict:
for split in test_split:
vectorized_datasets[split] = vectorized_datasets[split].filter(
is_eval_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_lengths"],
)
# filter training data with targets shorter than min_target_length or longer than max_target_length
def is_labels_in_length_range(labels):
return min_target_length < len(labels) < max_target_length
if training_args.do_train:
vectorized_datasets["train"] = vectorized_datasets["train"].filter(
is_labels_in_length_range,
num_proc=num_workers,
input_columns=["labels"],
)
# filter data with targets empty sentences
def is_labels_greater_than_min(labels):
return len(labels) > 0
vectorized_datasets = vectorized_datasets.filter(
is_labels_greater_than_min,
num_proc=num_workers,
input_columns=["labels"],
)
# for large datasets it is advised to run the preprocessing on a
# single machine first with `args.preprocessing_only` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step `args.preprocessing_only` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
logger.info(f"Data preprocessing finished. Files cached at {cache}.")
return
if model_args.freeze_encoder:
model.freeze_encoder()
logging.info("Model encoder has been frozen")
# 8. Load Metric
metric_wer = datasets.load_metric("wer")
metric_cer = datasets.load_metric("cer")
def compute_metrics(pred):
pred_ids = pred.predictions
pred.label_ids[pred.label_ids == -100] = tokenizer.eos_token_id
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
pred_str = [x.lstrip().strip() for x in pred_str]
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
wer = metric_wer.compute(predictions=pred_str, references=label_str)
cer = metric_cer.compute(predictions=pred_str, references=label_str)
return {"wer": wer, "cer": cer}
def compute_metrics_and_predictions(pred):
pred_ids = pred.predictions
pred.label_ids[pred.label_ids == -100] = tokenizer.eos_token_id
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
pred_str = [x.lstrip().strip() for x in pred_str]
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
wer = metric_wer.compute(predictions=pred_str, references=label_str)
cer = metric_cer.compute(predictions=pred_str, references=label_str)
return {"wer": wer, "cer": cer, "pred_str": pred_str, "label_str": label_str}
class WhisperTrainer(Seq2SeqTrainer):
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
self.model.save_to(save_path=os.path.join(output_dir, model_args.model_name_or_path + ".whisper"))
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Define data collator
eos = tokenizer.eos_token_id
t_stamp = tokenizer("<|notimestamps|>").input_ids[0]
whisper_data_collator = WhisperDataCollatorWithPadding(eos_token_id=eos, time_stamp_token_id=t_stamp)
# make sure model uses 50257 as BOS
bos = tokenizer("<|startoftranscript|>").input_ids[0]
model.config.decoder_start_token_id = bos
# Initialize Trainer
trainer = WhisperTrainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=vectorized_datasets['train'] if training_args.do_train else None,
eval_dataset=vectorized_datasets['eval'] if training_args.do_eval else None,
data_collator=whisper_data_collator,
)
# 8. Finally, we can start training
# Training
if training_args.do_train:
# use last checkpoint if exist
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(vectorized_datasets["train"])
)
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Change decoding strategy for final eval/predict
# if training_args.do_eval or training_args.do_predict:
# trainer.model.num_beams = 2
trainer.compute_metrics = compute_metrics_and_predictions
results = {}
if training_args.do_eval:
if not training_args.do_train and report_to_wandb:
# manually init wandb
wandb.init(project=data_args.wandb_project, name=training_args.run_name)
# Have to run this as a predict step, otherwise trainer will try to log the pred/label strings to wandb
eval_results = trainer.predict(vectorized_datasets["eval"], metric_key_prefix="eval", num_beams=model_args.num_beams, length_penalty=model_args.length_penalty)
metrics = eval_results.metrics
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
)
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
pred_str = metrics.pop("eval_pred_str", None)
label_str = metrics.pop("eval_label_str", None)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if report_to_wandb:
metrics = {os.path.join("eval", k[len("eval") + 1:]): v for k, v in metrics.items()}
wandb.log(metrics)
write_wandb_pred(pred_str, label_str, prefix="eval")
if training_args.do_predict:
if not training_args.do_train and not training_args.do_eval and report_to_wandb:
# manually init wandb
wandb.init(project=data_args.wandb_project, name=training_args.run_name)
for split in test_split:
predict_results = trainer.predict(
vectorized_datasets[split], metric_key_prefix=split, num_beams=model_args.num_beams, length_penalty=model_args.length_penalty)
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(vectorized_datasets[split])
)
metrics[f"{split}_samples"] = min(max_predict_samples, len(vectorized_datasets[split]))
pred_str = metrics.pop(f"{split}_pred_str", None)
label_str = metrics.pop(f"{split}_label_str", None)
trainer.log_metrics(split, metrics)
trainer.save_metrics(split, metrics)
if report_to_wandb:
metrics = {os.path.join(split, k[len(split)+1:]): v for k, v in metrics.items()}
wandb.log(metrics)
write_wandb_pred(pred_str, label_str, prefix=split)
# Write model card and (optionally) push to hub
config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "speech-recognition",
"tags": ["automatic-speech-recognition", data_args.dataset_name],
"dataset_args": (
f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:"
f" {data_args.eval_split_name}"
),
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
}
if "common_voice" in data_args.dataset_name:
kwargs["language"] = config_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
return results
if __name__ == "__main__":
main()
| [] |
2024-01-10 | f1tenth/f1tenth_gym_onboard | src~f110_gym~f110_core.py | #!/usr/bin/env python
from __future__ import print_function
import os, sys, cv2, math, time
import numpy as np
import msgpack
import msgpack_numpy as m
from collections import deque
#ROS Dependencies
import roslib, rospy
import numpy as np
from std_msgs.msg import String
from ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive
from sensor_msgs.msg import Image, LaserScan, Joy
from cv_bridge import CvBridge, CvBridgeError
__author__ = 'Dhruv Karthik <[email protected]>'
class Env(object):
"""
Stripped down version from OpenaiGym
"""
# Set this in SOME subclasses
metadata = {'render.modes': []}
reward_range = (-float('inf'), float('inf'))
spec = None
# Set these in ALL subclasses
action_space = None
observation_space = None
ser_msg_length = 0
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the agent
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError
def reset(self, **kwargs):
"""Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation.
"""
raise NotImplementedError
def serialize_obs(self):
"""Returns a function that allows you to serialize each observation as a multipart"""
raise NotImplementedError
class f110Env(Env):
""" Implements a Gym Environment & neccessary funcs for the F110 Autonomous RC Car(similar structure to gym.Env or gym.Wrapper)
"""
def __init__(self):
rospy.init_node("Gym_Recorder", anonymous=True, disable_signals=True)
#At least need LIDAR, IMG & STEER for everything here to work
self.obs_info = {
'lidar': {'topic':'/scan', 'type':LaserScan, 'callback':self.lidar_callback},
'img': {'topic':'/usb_cam/image_raw', 'type':Image, 'callback':self.img_callback},
'steer':{'topic':'/vesc/low_level/ackermann_cmd_mux/output', 'type':AckermannDriveStamped, 'callback':self.steer_callback}
}
#one observation could be 4 consecutive readings, so init deque for safety
self.latest_obs = deque(maxlen=4)
self.latest_reading_dict = {}
self.record = False
self.rev = False
self.last_step_time = time.time()
#misc
self.bridge = CvBridge()
self.history= deque(maxlen=500) #for reversing during reset
#GYM Properties (set in subclasses)
self.observation_space = ['lidar', 'steer', 'img']
self.action_space = ['angle', 'speed']
self.ser_msg_length = 4
self.joy_array = []
self.setup_subs()
#Subscribe to joy (to access record_button) & publish to ackermann
self.joy_sub = rospy.Subscriber('/vesc/joy', Joy, self.joy_callback)
self.drive_pub = rospy.Publisher("vesc/high_level/ackermann_cmd_mux/input/nav_0", AckermannDriveStamped, queue_size=20)
############ GYM METHODS ###################################
def _get_obs(self):
"""
Returns latest observation
"""
while(len(self.latest_obs) == 0):
rospy.sleep(0.04)
obs_dict = self.latest_obs[-1]
return obs_dict
def reset(self, **kwargs):
"""
Reverse until we're not 'tooclose'
"""
print("\n RESETTING_ENV")
self.record = False
self.rev = True
obs = self._get_obs()
while(self.tooclose()):
self.reverse()
#Back up a bit more
for i in range(10):
dmsg = self.get_drive_msg(0.0, -2.0)
self.drive_pub.publish(dmsg)
self.record = True
self.rev = False
#TODO: consider sleeping a few milliseconds?
self.latest_obs.clear()
return obs
def get_reward(self):
"""
TODO:Implement reward functionality
"""
return 0
def step(self, action):
"""
Action should be a steer_dict = {"angle":float, "speed":float}
"""
#execute action
drive_msg = self.get_drive_msg(action.get("angle"), action.get("speed"), flip_angle=-1.0)
self.drive_pub.publish(drive_msg)
#get reward & check if done & return
obs = self._get_obs()
reward = self.get_reward()
done = self.tooclose()
info = {'record':self.record, 'buttons':self.joy_array}
self.latest_obs.clear()
return obs, reward, done, info
def serialize_obs(self):
""" Currently assume obs consists of sensor [lidar, steer, img]
"""
def _ser(obs_dict):
lidar_dump = msgpack.dumps(obs_dict["lidar"])
steer_dump = msgpack.dumps(obs_dict["steer"])
cv_img = obs_dict["img"]
cv_md = dict(
dtype=str(cv_img.dtype),
shape=cv_img.shape,
)
cv_md_dump = msgpack.dumps(cv_md)
multipart_msg = [lidar_dump, steer_dump, cv_md_dump, cv_img]
return multipart_msg
return _ser
############ GYM METHODS ###################################
############ ROS HANDLING METHODS ###################################
def setup_subs(self):
"""
Initializes subscribers w/ obs_info & returns a list of subscribers
"""
obs_info = self.obs_info
makesub = lambda subdict : rospy.Subscriber(subdict['topic'], subdict['type'], subdict['callback'])
sublist = []
for topic in obs_info:
sublist.append(makesub(obs_info[topic]))
self.sublist = sublist
def add_to_history(self, data):
if abs(data.drive.steering_angle) > 0.05 and data.drive.steering_angle < -0.05 and data.drive.steering_angle is not None:
steer_dict = {"angle":data.drive.steering_angle, "speed":data.drive.speed}
for i in range(40):
self.history.append(steer_dict)
def steer_callback(self, data):
if data.drive.steering_angle > 0.34:
data.drive.steering_angle = 0.34
elif data.drive.steering_angle < -0.34:
data.drive.steering_angle = -0.34
steer = dict(
angle = -1.0 * data.drive.steering_angle,
steering_angle_velocity = data.drive.steering_angle_velocity,
speed = data.drive.speed
)
self.latest_reading_dict["steer"] = steer
self.add_to_history(data) #add steering commands to history
def lidar_callback(self, data):
lidar = dict(
angle_min = data.angle_min,
angle_increment = data.angle_increment,
ranges = data.ranges
)
self.latest_reading_dict["lidar"] = lidar
def joy_callback(self, data):
record_button = data.buttons[1]
if record_button:
self.record = True
else:
self.record = False
self.joy_array = list(data.buttons)
def set_status_str(self, prefix=''):
status_str = ''
if self.record:
status_str = 'True'
else:
status_str = 'False'
sys.stdout.write(prefix + "curr_recording: %s" % status_str)
sys.stdout.flush()
def is_reading_complete(self):
#checks if all the readings are present in latest_reading_dict
base_check = "lidar" in self.latest_reading_dict and "steer" in self.latest_reading_dict
return base_check
def base_preprocessing(self, cv_img):
cv_img = cv2.resize(cv_img, None, fx=0.5, fy=0.5)
return cv_img
def update_latest_obs(self):
self.latest_obs.append(self.latest_reading_dict)
self.latest_reading_dict = {}
def img_callback(self, data):
self.set_status_str(prefix='\r')
#img_callback adds latest_reading to the self.lates_obs
if self.is_reading_complete():
try:
cv_img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv_img = self.base_preprocessing(cv_img)
self.latest_reading_dict["img"] = cv_img
#at this point, the reading must be done
self.update_latest_obs()
def get_drive_msg(self, angle, speed, flip_angle=1.0):
drive_msg = AckermannDriveStamped()
drive_msg.header.stamp = rospy.Time.now()
drive_msg.header.frame_id = "odom"
drive_msg.drive.steering_angle = flip_angle * angle
drive_msg.drive.speed = speed
return drive_msg
def reverse(self):
"""
Uses self.history to back out
"""
sign = lambda x: (1, -1)[x < 0]
default_steer_dict = {"angle":0.0, "speed":-1.0}
try:
steer_dict = self.history.pop()
except:
steer_dict = default_steer_dict
rev_angle = steer_dict["angle"]
rev_speed = -2.0
#print("REVERSE {rev_angle}".format(rev_angle = rev_angle))
drive_msg = self.get_drive_msg(rev_angle, rev_speed)
self.drive_pub.publish(drive_msg)
def tooclose(self):
"""
Uses self.latest_obs to determine if we are too_close (currently uses LIDAR)
"""
tc = True
if len(self.latest_obs) > 0:
reading = self.latest_obs[-1]
#Use LIDAR Reading to check if we're too close
lidar = reading["lidar"]
ranges = lidar.get("ranges")
angle_min = lidar.get("angle_min")
angle_incr = lidar.get("angle_incr")
rfrac = lambda st, en : ranges[int(st*len(ranges)):int(en*len(ranges))]
mindist = lambda r, min_range : np.nanmin(r[r != -np.inf]) <= min_range
#ensure that boundaries are met in each region
r1 = rfrac(0, 1./4.)
r2 = rfrac(1./4., 3./4.)
r3 = rfrac(3./4., 1.)
if mindist(r1, 0.2) or mindist(r2, 0.4) or mindist(r3, 0.2):
tc = True
else:
tc = False
else:
tc = False
return tc
############ ROS HANDLING METHODS ###################################
class f110Wrapper(Env):
"""
Wraps the f110Env to allow a modular transformation.
This class is the base class for all wrappers. The subclasses can override some methods to change behaviour of the original f110Env w/out touching the original code
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def compute_reward(self, info):
return self.env.get_reward()
class f110ObservationWrapper(f110Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
raise NotImplementedError
def serialize_obs(self):
raise NotImplementedError
class f110RewardWrapper(f110Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
def reward(self, reward):
raise NotImplementedError
class f110ActionWrapper(f110Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(self.action(action))
def action(self, action):
raise NotImplementedError
def reverse_action(self, action):
raise NotImplementedError
| [] |
2024-01-10 | kai1130/Projects | UniFi%20Ricardian%20Contracts~code~EntityExtract.py | import os
import re
import json
import openai
import c_utils
from datetime import timedelta
from dateutil import relativedelta
from dateutil.relativedelta import relativedelta
import dateutil.parser as dparser
os.environ['OPENAI_API_KEY'] = 'secret'
openai.api_key = os.getenv("OPENAI_API_KEY")
def extract_fields(prompt):
res = openai.Completion.create(
engine='text-davinci-001',
temperature=0,
prompt=prompt)
return res['choices'][0]['text']
def extract_instrument(instrument_test):
instrument_question = 'What is the financial instrument?'
instrument_train = '''secret'''
instrument_prompt = f'{instrument_question}\n{instrument_train}\n{instrument_test}'
res = c_utils.strip(extract_fields(instrument_prompt))
return res
def extract_nominal(nominal_test):
nominal_question = 'What is nominal currency and amount?'
nominal_train = '''secret'''
nominal_prompt = f'{nominal_question}\n{nominal_train}\n{nominal_test}'
res = c_utils.strip(extract_fields(nominal_prompt))
currency, amt = res.split(',')[:2]
res_dict = {'currency':currency, 'amt':amt}
res_link = {'currency':(nominal_test, currency), 'amt':(nominal_test, amt)}
return res_dict, res_link
def extract_interest(interest_test):
interest_question = 'What is the interest rate?'
interest_train = '''secret'''
interest_prompt = f'{interest_question}\n{interest_train}\n{interest_test}'
res = extract_fields(interest_prompt)
if 'round' in interest_test:
res = f'{res},rounded'
else:
res = f'{res},not rounded'
rate, interval, rounded = c_utils.strip(res).split(',')[:3]
res_dict = {'rate': rate, 'interval': interval, 'rounded': rounded}
res_link = {'rate': (interest_test, rate), 'interval': (interest_test, interval), 'rounded': (interest_test, rounded)}
return res_dict, res_link
def parse_absolute_time(time_str):
abs_time = dparser.parse(time_str,fuzzy=True).date()
return abs_time
def parse_relative_time(time_str):
time_params = {}
years = re.search('((?P<years>\d+)\s*year)', time_str)
months = re.search('((?P<months>\d+)\s*month)', time_str)
weeks = re.search('((?P<weeks>\d+)\s*week)', time_str)
days = re.search('((?P<days>\d+)\s*day)', time_str)
if years:
time_params = c_utils.merge_dicts(time_params, years.groupdict())
if months:
time_params = c_utils.merge_dicts(time_params, months.groupdict())
if weeks:
time_params = c_utils.merge_dicts(time_params, weeks.groupdict())
if days:
time_params = c_utils.merge_dicts(time_params, days.groupdict())
rel_time = relativedelta(**{k: int(v) for k, v in time_params.items()}) if time_params else None
return rel_time
def extract_date(end_str, start_str):
time_params = {'start_date': None,
'end_date': None,
'time_diff': None}
time_params['start_date'] = parse_absolute_time(start_str)
time_params['end_date'] = None
time_params['time_diff'] = parse_relative_time(end_str)
if not time_params['time_diff']:
time_params['end_date'] = parse_absolute_time(end_str)
time_params['time_diff'] = relativedelta(time_params['end_date'], time_params['start_date'])
else:
time_params['end_date'] = time_params['start_date']+time_params['time_diff']
time_link = {'start_date': (start_str, time_params['start_date']), 'end_date': ((start_str, end_str), time_params['end_date']), 'time_diff': ((start_str, end_str), time_params['time_diff'])}
return time_params, time_link
def extract_tokenName(tokenName):
res_dict = {'tokenName': str(tokenName).strip()}
res_link = {'tokenName': (tokenName, str(tokenName).strip())}
return res_dict, res_link
def extract_tokenSymbol(tokenSymbol):
res_dict = {'tokenSymbol':str(tokenSymbol).strip()}
res_link = {'tokenSymbol':(tokenSymbol,str(tokenSymbol).strip())}
return res_dict, res_link
def extract_multiple(multiple_test):
multiple_question = 'What is the multiple?'
multiple_train = '''secret'''
instrument_prompt = f'{multiple_question}\n{multiple_train}\n{multiple_test}'
res = int(re.search(r'\d+', extract_fields(instrument_prompt)).group())
res_dict = {'multiple':res}
res_link = {'multiple':(multiple_test, res)}
return res_dict, res_link
| [
"PLACEHOLDER\nPLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | daniel-rychlewski/hsi-toolbox | examples~automated_deep_compression~ADC.py | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""To execute this code:
$ time python3 compress_classifier.py --arch=plain20_cifar ../../../data.cifar10 --resume=checkpoint.plain20_cifar.pth.tar --lr=0.05 --amc --amc-protocol=mac-constrained --amc-target-density=0.5 -p=50
Coach installation:
===================
After creating the virtual environment and installing Distiller's Python package dependencies, go ahead and
setup Coach per: https://github.com/NervanaSystems/coach#installation.
Make sure that you install Coach's package dependencies into the same virtual environment that already contains
Distiller's dependency packages. You do this by ensuring that Distiller's virtual environment is the active environment
when you install Coach.
*NOTE: you may need to update TensorFlow to the expected version:
$ pip3 install tensorflow==1.9.0
Finally, if you are running Coach in a development environment, you need to tell the Python runtime where to find
the Coach code:
$ export PYTHONPATH=<path-to-coach-code>
Spinningup installation:
========================
Spinup require that we use exactly Python 3.6 so if you are not using this Python version see the instructions here:
http://ubuntuhandbook.org/index.php/2017/07/install-python-3-6-1-in-ubuntu-16-04-lts/
$ sudo update-alternatives --config python3
For Python 3.6 you may also need to install a new virtual-env:
$ sudo apt-get install python3.6-venv
Then create and activate your venv, and populate it with the Distiller packages:
$ python3 -m venv distiller_env_python3.6
$ source distiller_env_python3.6/bin/activate
$ pip3 install -r requirements.txt
You want to install Spinup into this venv. First clone Spinup and then install it into your venv:
$ cd <spinningup-repo>
$ sudo apt-get install python3.6-dev
$ pip3 install -e .
https://spinningup.openai.com/en/latest/user/installation.html?highlight=license
"""
import math
import os
import copy
import logging
import numpy as np
import torch
import csv
try:
import gym
except ImportError as e:
print("WARNING: to use automated compression you will need to install extra packages")
print("See instructions in the header of examples/automated_deep_compression/ADC.py")
raise e
from gym import spaces
import distiller
from collections import OrderedDict, namedtuple
from types import SimpleNamespace
from distiller import normalize_module_name, SummaryGraph
from examples.automated_deep_compression.adc_random_env import random_agent
# Choose which RL library to use: Coach from Intel AI Lab, or Spinup from OpenAI
#RLLIB = "spinup"
RLLIB = "coach"
msglogger = logging.getLogger()
Observation = namedtuple('Observation', ['n', 'c', 'h', 'w', 'stride', 'k', 'MACs', 'reduced', 'rest', 'prev_a'])
LayerDesc = namedtuple('LayerDesc', ['t', 'n', 'c', 'h', 'w', 'stride', 'k', 'MACs', 'reduced', 'rest'])
LayerDescLen = len(LayerDesc._fields)
ALMOST_ONE = 0.9999
class CSVFile(object):
def __init__(self, fname, headers):
"""Create the CSV file and write the column names"""
with open(fname, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
self.fname = fname
def add_record(self, fields):
# We close the file each time to flush on every write, and protect against data-loss on crashes
with open(self.fname, 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
class AMCStatsFile(CSVFile):
def __init__(self, fname):
headers = ['episode', 'top1', 'reward', 'total_macs', 'normalized_macs',
'normalized_nnz', 'ckpt_name', 'action_history', 'agent_action_history']
super().__init__(fname, headers)
class FineTuneStatsFile(CSVFile):
def __init__(self, fname):
headers = ['episode', 'ft_top1_list']
super().__init__(fname, headers)
def is_using_continuous_action_space(agent):
return agent in ("DDPG", "ClippedPPO-continuous", "Random-policy")
if RLLIB == "spinup":
import tensorflow as tf
from spinup.algos.ddpg import core
from .ddpg import ddpg
def ddpg_spinup(env1, env2):
from spinup.utils.run_utils import setup_logger_kwargs
exp_name = "Test"
seed = 0
# The number and size of the Actor-Critic MLP hidden layers
layers, hid = 2, 300
logger_kwargs = setup_logger_kwargs(exp_name) # , seed)
ddpg.ddpg(env=env1, test_env=env2, actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[hid]*layers, output_activation=tf.sigmoid),
gamma=1, # discount rate
seed=seed,
epochs=400,
replay_size=2000,
batch_size=64,
start_steps=env1.amc_cfg.num_heatup_epochs,
steps_per_epoch=800 * env1.num_layers(), # every 50 episodes perform 10 episodes of testing
act_noise=0.5,
pi_lr=1e-4,
q_lr=1e-3,
logger_kwargs=logger_kwargs)
if RLLIB == "coach":
from rl_coach.base_parameters import TaskParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.schedules import ConstantSchedule, PieceWiseSchedule, ExponentialSchedule
def log_amc_config(amc_cfg):
try:
msglogger.info('AMC configuration:')
for k, v in amc_cfg.items():
msglogger.info("\t{} : {}".format(k, v))
except TypeError as e:
pass
def count_conv_layer(model):
"""Count the number of Convolution layers exist in this model"""
conv_cnt = 0
for module in model.modules():
if type(module) == torch.nn.Conv2d:
conv_cnt += 1
return conv_cnt
def mac_constrained_experimental_reward_fn(env, top1, top5, vloss, total_macs):
"""A more intuitive reward for constraining the compute and optimizing the
accuracy under this constraint.
"""
macs_normalized = total_macs/env.dense_model_macs
reward = top1/100
if macs_normalized > (env.amc_cfg.target_density+0.002):
reward = -3 - macs_normalized
else:
reward += 1
return reward
def harmonic_mean_reward_fn(env, top1, top5, vloss, total_macs):
"""This reward is based on the idea of weighted harmonic mean
Balance compute and accuracy provided a beta value that weighs the two components.
See: https://en.wikipedia.org/wiki/F1_score
"""
beta = 1
#beta = 0.75 # How much to favor accuracy
macs_normalized = total_macs/env.dense_model_macs
reward = (1 + beta**2) * top1/100 * macs_normalized / (beta**2 * macs_normalized + top1/100)
return reward
def amc_reward_fn(env, top1, top5, vloss, total_macs):
"""This reward punishes the agent when it produces networks that don't comply with the MACs resource-constraint,
(the negative reward is in proportion to the network density). Otherwise, the reward is the Top1 accuracy.
"""
if not env.is_macs_constraint_achieved(total_macs):
current_density = total_macs / env.dense_model_macs
reward = env.amc_cfg.target_density - current_density
else:
reward = top1/100
return reward
def do_adc_internal(model, args, optimizer_data, validate_fn, save_checkpoint_fn, train_fn):
dataset = args.dataset
arch = args.arch
perform_thinning = True # args.amc_thinning
num_ft_epochs = args.amc_ft_epochs
action_range = args.amc_action_range
np.random.seed()
conv_cnt = count_conv_layer(model)
msglogger.info("Executing AMC: RL agent - %s RL library - %s", args.amc_agent_algo, RLLIB)
# Create a dictionary of parameters that Coach will handover to DistillerWrapperEnvironment
# Once it creates it.
services = distiller.utils.MutableNamedTuple({
'validate_fn': validate_fn,
'save_checkpoint_fn': save_checkpoint_fn,
'train_fn': train_fn})
app_args = distiller.utils.MutableNamedTuple({
'dataset': dataset,
'arch': arch,
'optimizer_data': optimizer_data})
amc_cfg = distiller.utils.MutableNamedTuple({
'protocol': args.amc_protocol,
'agent_algo': args.amc_agent_algo,
'perform_thinning': perform_thinning,
'num_ft_epochs': num_ft_epochs,
'action_range': action_range,
'conv_cnt': conv_cnt,
'reward_frequency': args.amc_reward_frequency,
'ft_frequency': args.amc_ft_frequency,
'pruning_pattern': "filters"}) # "channels"}) #
#net_wrapper = NetworkWrapper(model, app_args, services)
#return sample_networks(net_wrapper, services)
if args.amc_protocol == "accuracy-guaranteed":
amc_cfg.target_density = None
amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: -(1-top1/100) * math.log(total_macs)
amc_cfg.action_constrain_fn = None
elif args.amc_protocol == "mac-constrained":
amc_cfg.target_density = args.amc_target_density
amc_cfg.reward_fn = lambda env, top1, top5, vloss, total_macs: top1/100 #(90.5 - top1) / 10
amc_cfg.action_constrain_fn = DistillerWrapperEnvironment.get_action
elif args.amc_protocol == "mac-constrained-experimental":
amc_cfg.target_density = args.amc_target_density
amc_cfg.reward_fn = amc_reward_fn
amc_cfg.action_constrain_fn = None
else:
raise ValueError("{} is not supported currently".format(args.amc_protocol))
steps_per_episode = conv_cnt
if args.amc_agent_algo == "DDPG":
amc_cfg.heatup_noise = 0.5
amc_cfg.initial_training_noise = 0.5
amc_cfg.training_noise_decay = 0.996 # 0.998
amc_cfg.num_heatup_epochs = args.amc_heatup_epochs
amc_cfg.num_training_epochs = args.amc_training_epochs
training_noise_duration = amc_cfg.num_training_epochs * steps_per_episode
heatup_duration = amc_cfg.num_heatup_epochs * steps_per_episode
if amc_cfg.agent_algo == "Random-policy":
return random_agent(DistillerWrapperEnvironment(model, app_args, amc_cfg, services))
if RLLIB == "spinup":
msglogger.info("AMC: Using spinup")
env1 = DistillerWrapperEnvironment(model, app_args, amc_cfg, services)
env2 = DistillerWrapperEnvironment(model, app_args, amc_cfg, services)
ddpg_spinup(env1, env2)
else:
msglogger.info("AMC: Using coach")
# When we import the graph_manager from the ADC_DDPG preset, we implicitly instruct
# Coach to create and use our DistillerWrapperEnvironment environment.
# So Distiller calls Coach, which creates the environment, trains the agent, and ends.
if args.amc_agent_algo == "DDPG":
from examples.automated_deep_compression.presets.ADC_DDPG import graph_manager, agent_params
agent_params.exploration.noise_percentage_schedule = PieceWiseSchedule([
(ConstantSchedule(amc_cfg.heatup_noise), EnvironmentSteps(heatup_duration)),
(ExponentialSchedule(amc_cfg.initial_training_noise, 0, amc_cfg.training_noise_decay),
EnvironmentSteps(training_noise_duration))])
# agent_params.exploration.noise_percentage_schedule = ConstantSchedule(0)
elif "ClippedPPO" in args.amc_agent_algo:
from examples.automated_deep_compression.presets.ADC_ClippedPPO import graph_manager, agent_params
# These parameters are passed to the Distiller environment
graph_manager.env_params.additional_simulator_parameters = {'model': model,
'app_args': app_args,
'amc_cfg': amc_cfg,
'services': services}
coach_logs_dir = os.path.join(msglogger.logdir, 'coach')
os.mkdir(coach_logs_dir)
task_parameters = TaskParameters(experiment_path=coach_logs_dir)
graph_manager.create_graph(task_parameters)
graph_manager.improve()
# This is a temporary hack!
resnet50_params = ["module.layer1.0.conv1.weight", "module.layer1.0.conv2.weight",
"module.layer1.1.conv1.weight", "module.layer1.1.conv2.weight",
"module.layer1.2.conv1.weight", "module.layer1.2.conv2.weight",
"module.layer2.0.conv1.weight", "module.layer2.0.conv2.weight",
"module.layer2.1.conv1.weight", "module.layer2.1.conv2.weight",
"module.layer2.2.conv1.weight", "module.layer2.2.conv2.weight",
"module.layer2.3.conv1.weight", "module.layer2.3.conv2.weight",
"module.layer3.0.conv1.weight", "module.layer3.0.conv2.weight",
"module.layer3.1.conv1.weight", "module.layer3.1.conv2.weight",
"module.layer3.2.conv1.weight", "module.layer3.2.conv2.weight",
"module.layer3.3.conv1.weight", "module.layer3.3.conv2.weight",
"module.layer3.4.conv1.weight", "module.layer3.4.conv2.weight",
"module.layer3.5.conv1.weight", "module.layer3.5.conv2.weight",
"module.layer4.0.conv1.weight", "module.layer4.0.conv2.weight",
"module.layer4.1.conv1.weight", "module.layer4.1.conv2.weight",
"module.layer4.2.conv1.weight", "module.layer4.2.conv2.weight"]
resnet20_params = ["module.layer1.0.conv1.weight", "module.layer1.1.conv1.weight", "module.layer1.2.conv1.weight",
"module.layer2.0.conv1.weight", "module.layer2.1.conv1.weight", "module.layer2.2.conv1.weight",
"module.layer3.0.conv1.weight", "module.layer3.1.conv1.weight", "module.layer3.2.conv1.weight"]
resnet56_params = ["module.layer1.0.conv1.weight", "module.layer1.1.conv1.weight", "module.layer1.2.conv1.weight",
"module.layer1.3.conv1.weight", "module.layer1.4.conv1.weight", "module.layer1.5.conv1.weight",
"module.layer1.6.conv1.weight", "module.layer1.7.conv1.weight", "module.layer1.8.conv1.weight",
"module.layer2.0.conv1.weight", "module.layer2.1.conv1.weight", "module.layer2.2.conv1.weight",
"module.layer2.3.conv1.weight", "module.layer2.4.conv1.weight", "module.layer2.5.conv1.weight",
"module.layer2.6.conv1.weight", "module.layer2.7.conv1.weight", "module.layer2.8.conv1.weight",
"module.layer3.0.conv1.weight", "module.layer3.1.conv1.weight", "module.layer3.2.conv1.weight",
"module.layer3.3.conv1.weight", "module.layer3.4.conv1.weight", "module.layer3.5.conv1.weight",
"module.layer3.6.conv1.weight", "module.layer3.7.conv1.weight", "module.layer3.8.conv1.weight"]
plain20_params = ["module.layer1.0.conv1.weight", "module.layer1.0.conv2.weight",
"module.layer1.1.conv1.weight", "module.layer1.1.conv2.weight",
"module.layer1.2.conv1.weight", "module.layer1.2.conv2.weight",
"module.layer2.0.conv1.weight", "module.layer2.0.conv2.weight",
"module.layer2.1.conv1.weight", "module.layer2.1.conv2.weight",
"module.layer2.2.conv1.weight", "module.layer2.2.conv2.weight",
"module.layer3.0.conv1.weight", "module.layer3.0.conv2.weight",
"module.layer3.1.conv1.weight", "module.layer3.1.conv2.weight",
"module.layer3.2.conv1.weight", "module.layer3.2.conv2.weight"]
resnet50_layers = [param[:-len(".weight")] for param in resnet50_params]
resnet20_layers = [param[:-len(".weight")] for param in resnet20_params]
resnet56_layers = [param[:-len(".weight")] for param in resnet56_params]
plain20_layers = [param[:-len(".weight")] for param in plain20_params]
class NetworkWrapper(object):
def __init__(self, model, app_args, services):
self.app_args = app_args
self.services = services
self.conv_layers, _, _ = self.collect_conv_details(model)
self.reset(model)
def get_model_resources_requirements(self, model=None):
if model is None:
model = self.model
_, total_macs, total_nnz = collect_conv_details(model, self.app_args.dataset, True)
return total_macs, total_nnz
@property
def arch(self):
return self.app_args.arch
def collect_conv_details(self, model):
# Temporary ugly hack!
resnet_layers = None
if self.app_args.arch == "resnet20_cifar":
resnet_layers = resnet20_layers
elif self.app_args.arch == "resnet56_cifar":
resnet_layers = resnet56_layers
elif self.app_args.arch == "resnet50":
resnet_layers = resnet50_layers
elif self.app_args.arch == "plain20_cifar":
resnet_layers = plain20_layers
return collect_conv_details(model, self.app_args.dataset, True, resnet_layers)
def num_layers(self):
return len(self.conv_layers)
def get_layer(self, idx):
try:
return self.conv_layers[idx]
except KeyError:
return None
def get_layer_macs(self, layer):
"""Return the number of MACs required to compute <layer>'s Convolution"""
if layer is None:
return 0
conv_module = distiller.model_find_module(self.model, layer.name)
# MACs = volume(OFM) * (#IFM * K^2)
dense_macs = (conv_module.out_channels * layer.ofm_h * layer.ofm_w) * (conv_module.in_channels * layer.k**2)
return dense_macs
def reset(self, model):
self.model = model
self.zeros_mask_dict = distiller.create_model_masks_dict(self.model)
def create_scheduler(self):
scheduler = distiller.CompressionScheduler(self.model)
masks = {param_name: masker.mask for param_name, masker in self.zeros_mask_dict.items()}
scheduler.load_state_dict(state={'masks_dict': masks})
return scheduler
def remove_structures(self, layer_id, fraction_to_prune, prune_what="channels"):
"""Physically remove channels and corresponding filters from the model
Returns the compute-sparsity of the layer with index 'layer_id'
"""
if layer_id not in range(self.num_layers()):
raise ValueError("idx=%d is not in correct range (0-%d)" % (layer_id, self.num_layers()))
if fraction_to_prune < 0:
raise ValueError("fraction_to_prune=%f is illegal" % (fraction_to_prune))
if fraction_to_prune == 0:
return 0
if fraction_to_prune == 1.0:
# For now, prevent the removal of entire layers
fraction_to_prune = ALMOST_ONE
layer = self.conv_layers[layer_id]
macs_before = self.get_layer_macs(layer)
conv_pname = layer.name + ".weight"
conv_p = distiller.model_find_param(self.model, conv_pname)
msglogger.info("ADC: trying to remove %.1f%% %s from %s" % (fraction_to_prune*100, prune_what, conv_pname))
if prune_what == "channels":
calculate_sparsity = distiller.sparsity_ch
remove_structures_fn = distiller.remove_channels
group_type = "Channels"
elif prune_what == "filters":
calculate_sparsity = distiller.sparsity_3D
group_type = "Filters"
remove_structures_fn = distiller.remove_filters
else:
raise ValueError("unsupported structure {}".format(prune_what))
# Create a channel-ranking pruner
pruner = distiller.pruning.L1RankedStructureParameterPruner("adc_pruner", group_type,
fraction_to_prune, conv_pname)
pruner.set_param_mask(conv_p, conv_pname, self.zeros_mask_dict, meta=None)
del pruner
if (self.zeros_mask_dict[conv_pname].mask is None or
calculate_sparsity(self.zeros_mask_dict[conv_pname].mask) == 0):
msglogger.info("remove_structures: aborting because there are no structures to prune")
return 0
# Use the mask to prune
self.zeros_mask_dict[conv_pname].apply_mask(conv_p)
remove_structures_fn(self.model, self.zeros_mask_dict, self.app_args.arch, self.app_args.dataset, optimizer=None)
conv_p = distiller.model_find_param(self.model, conv_pname)
return 1 - (self.get_layer_macs(layer) / macs_before)
def validate(self):
top1, top5, vloss = self.services.validate_fn(model=self.model)
return top1, top5, vloss
def train(self, num_epochs, episode=0):
# Train for zero or more epochs
opt_cfg = self.app_args.optimizer_data
optimizer = torch.optim.SGD(self.model.parameters(), lr=opt_cfg['lr'],
momentum=opt_cfg['momentum'], weight_decay=opt_cfg['weight_decay'])
compression_scheduler = self.create_scheduler()
acc_list = []
for _ in range(num_epochs):
# Fine-tune the model
accuracies = self.services.train_fn(model=self.model, compression_scheduler=compression_scheduler,
optimizer=optimizer, epoch=episode)
acc_list.extend(accuracies)
del compression_scheduler
return acc_list
class DistillerWrapperEnvironment(gym.Env):
def __init__(self, model, app_args, amc_cfg, services):
self.pylogger = distiller.data_loggers.PythonLogger(msglogger)
self.tflogger = distiller.data_loggers.TensorBoardLogger(msglogger.logdir)
self.orig_model = model
self.app_args = app_args
self.amc_cfg = amc_cfg
self.services = services
self.net_wrapper = NetworkWrapper(model, app_args, services)
self.dense_model_macs, self.dense_model_size = self.net_wrapper.get_model_resources_requirements(model)
self.reset(init_only=True)
msglogger.info("Model %s has %d Convolution layers", self.app_args.arch, self.net_wrapper.num_layers())
msglogger.info("\tTotal MACs: %s" % distiller.pretty_int(self.dense_model_macs))
log_amc_config(amc_cfg)
self.episode = 0
self.best_reward = -1000
self.action_low = amc_cfg.action_range[0]
self.action_high = amc_cfg.action_range[1]
# Gym spaces documentation: https://gym.openai.com/docs/
if is_using_continuous_action_space(self.amc_cfg.agent_algo):
self.action_space = spaces.Box(self.action_low, self.action_high, shape=(1,))
self.action_space.default_action = self.action_low
else:
self.action_space = spaces.Discrete(10)
self.STATE_EMBEDDING_LEN = len(Observation._fields)
#self.observation_space = spaces.Box(0, float("inf"), shape=(self.STATE_EMBEDDING_LEN+self.num_layers(),))
self.observation_space = spaces.Box(0, float("inf"), shape=(self.STATE_EMBEDDING_LEN+1,))
#self.observation_space = spaces.Box(0, float("inf"), shape=(LayerDescLen * self.num_layers(), ))
#self.create_network_record_file()
self.stats_file = AMCStatsFile(os.path.join(msglogger.logdir, 'amc.csv'))
self.ft_stats_file = FineTuneStatsFile(os.path.join(msglogger.logdir, 'ft_top1.csv'))
def reset(self, init_only=False):
"""Reset the environment.
This is invoked by the Agent.
"""
msglogger.info("Resetting the environment (init_only={})".format(init_only))
self.current_layer_id = 0
self.prev_action = 0
self.model = copy.deepcopy(self.orig_model)
self.net_wrapper.reset(self.model)
self._removed_macs = 0
self.action_history = []
self.agent_action_history = []
if init_only:
return
initial_observation = self.get_obs()
return initial_observation
def current_layer(self):
return self.net_wrapper.get_layer(self.current_layer_id)
def episode_is_done(self):
return self.current_layer_id == self.net_wrapper.num_layers()
def removed_macs(self):
"""Return the amount of MACs removed so far.
This is normalized to the range 0..1
"""
return self._removed_macs / self.dense_model_macs
def render(self, mode='human'):
"""Provide some feedback to the user about what's going on.
This is invoked by the Agent.
"""
if self.current_layer_id == 0:
msglogger.info("+" + "-" * 50 + "+")
msglogger.info("Starting a new episode %d", self.episode)
msglogger.info("+" + "-" * 50 + "+")
msglogger.info("Render Environment: current_layer_id=%d" % self.current_layer_id)
distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
def get_action(self, pruning_action):
"""Compute a resource-constrained action"""
reduced = self._removed_macs
rest = self.rest_macs_raw() * self.action_high
target_reduction = (1 - self.amc_cfg.target_density) * self.dense_model_macs
duty = target_reduction - (reduced + rest)
flops = self.net_wrapper.get_layer_macs(self.current_layer())
assert flops > 0
pruning_action_final = min(self.action_high, max(pruning_action, duty/flops))
if pruning_action_final != pruning_action:
msglogger.info("action ********** pruning_action={}==>pruning_action_final={:.2f}: reduced={:.2f} rest={:.2f} target={:.2f} duty={:.2f} flops={:.2f}".
format(pruning_action, pruning_action_final, reduced/self.dense_model_macs,
rest/self.dense_model_macs, 1-self.amc_cfg.target_density,
duty/self.dense_model_macs,
flops/self.dense_model_macs))
return pruning_action_final
def step(self, pruning_action):
"""Take a step, given an action.
The action represents the desired sparsity for the "current" layer.
This function is invoked by the Agent.
"""
msglogger.info("env.step - current_layer_id={} episode={}".format(self.current_layer_id, self.episode))
pruning_action = pruning_action[0]
msglogger.info("\tAgent pruning_action={}".format(pruning_action))
self.agent_action_history.append(pruning_action)
if is_using_continuous_action_space(self.amc_cfg.agent_algo):
pruning_action = np.clip(pruning_action, self.action_low, self.action_high)
else:
# Divide the action space into 10 discrete levels (0%, 10%, 20%,....90% sparsity)
pruning_action = pruning_action / 10
msglogger.info("\tAgent clipped pruning_action={}".format(pruning_action))
if self.amc_cfg.action_constrain_fn is not None:
pruning_action = self.amc_cfg.action_constrain_fn(self, pruning_action=pruning_action)
msglogger.info("Constrained pruning_action={}".format(pruning_action))
# Calculate the final compression rate
total_macs_before, _ = self.net_wrapper.get_model_resources_requirements(self.model)
layer_macs = self.net_wrapper.get_layer_macs(self.current_layer())
msglogger.info("\tlayer_macs={:.2f}".format(layer_macs / self.dense_model_macs))
msglogger.info("\tremoved_macs={:.2f}".format(self.removed_macs()))
msglogger.info("\trest_macs={:.2f}".format(self.rest_macs()))
if pruning_action > 0:
pruning_action = self.net_wrapper.remove_structures(self.current_layer_id,
fraction_to_prune=pruning_action,
prune_what=self.amc_cfg.pruning_pattern)
else:
pruning_action = 0
self.action_history.append(pruning_action)
total_macs_after, _ = self.net_wrapper.get_model_resources_requirements(self.model)
layer_macs_after_action = self.net_wrapper.get_layer_macs(self.current_layer())
# Update the various counters after taking the step
self.current_layer_id += 1
self._removed_macs += (total_macs_before - total_macs_after)
msglogger.info("actual_action={}".format(pruning_action))
msglogger.info("layer_macs={} layer_macs_after_action={} removed now={}".format(layer_macs,
layer_macs_after_action,
(layer_macs - layer_macs_after_action)))
msglogger.info("self._removed_macs={}".format(self._removed_macs))
assert math.isclose(layer_macs_after_action / layer_macs, 1 - pruning_action)
stats = ('Performance/Validation/',
OrderedDict([('requested_action', pruning_action)]))
distiller.log_training_progress(stats, None,
self.episode, steps_completed=self.current_layer_id,
total_steps=self.amc_cfg.conv_cnt, log_freq=1, loggers=[self.tflogger])
if self.episode_is_done():
msglogger.info("Episode is ending")
observation = self.get_final_obs()
reward, top1, total_macs, total_nnz = self.compute_reward()
normalized_macs = total_macs / self.dense_model_macs * 100
normalized_nnz = total_nnz / self.dense_model_size * 100
self.finalize_episode(top1, reward, total_macs, normalized_macs,
normalized_nnz, self.action_history, self.agent_action_history)
self.episode += 1
else:
if self.amc_cfg.ft_frequency is not None and self.current_layer_id % self.amc_cfg.ft_frequency == 0:
self.net_wrapper.train(1, self.episode)
observation = self.get_obs()
if self.amc_cfg.reward_frequency is not None and self.current_layer_id % self.amc_cfg.reward_frequency == 0:
reward, top1, total_macs, total_nnz = self.compute_reward(False)
else:
reward = 0
self.prev_action = pruning_action
info = {}
return observation, reward, self.episode_is_done(), info
def one_hot(self, n, r):
"""Produce a one-hot representation of the layer id"""
#return [1 if i == n else 0 for i in range(r)]
return [n]
def get_obs(self):
"""Produce a state embedding (i.e. an observation)"""
current_layer_macs = self.net_wrapper.get_layer_macs(self.current_layer())
current_layer_macs_pct = current_layer_macs/self.dense_model_macs
current_layer = self.current_layer()
conv_module = distiller.model_find_module(self.model, current_layer.name)
obs = [#current_layer.t,
conv_module.out_channels,
conv_module.in_channels,
current_layer.ifm_h,
current_layer.ifm_w,
current_layer.stride[0],
current_layer.k,
current_layer_macs_pct*100,
self.removed_macs()*100,
self.rest_macs()*100,
self.prev_action*100]
onehot_id = self.one_hot(self.current_layer_id, self.net_wrapper.num_layers())
msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
obs = np.array(onehot_id + obs)
assert (self.removed_macs() + current_layer_macs_pct + self.rest_macs()) <= 1
return obs
def get_final_obs(self):
"""Return the final state embedding (observation)
The final state is reached after we traverse all of the Convolution layers.
"""
obs = [#-1,
0,
0,
0,
0,
0,
0,
0,
self.removed_macs()*100,
self.rest_macs()*100,
self.prev_action*100]
onehot_id = self.one_hot(self.net_wrapper.num_layers(), self.net_wrapper.num_layers())
msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
obs = np.array(onehot_id + obs)
return obs
def whole_network_get_obs(self):
"""Produce a state embedding (i.e. an observation)"""
num_layers = self.net_wrapper.num_layers()
network_obs = np.empty(shape=(LayerDescLen, num_layers))
for layer_id in range(num_layers):
layer = self.get_layer(layer_id)
layer_macs = self.net_wrapper.get_layer_macs(layer)
layer_macs_pct = layer_macs/self.dense_model_macs
conv_module = distiller.model_find_module(self.model, layer.name)
obs = [layer.t,
conv_module.out_channels,
conv_module.in_channels,
layer.ifm_h,
layer.ifm_w,
layer.stride[0],
layer.k,
layer_macs_pct,
self.removed_macs(),
self.rest_macs()]
network_obs[:, layer_id] = np.array(obs)
#msglogger.info("obs={} {}".format(onehot_id, Observation._make(obs)))
#network_obs = network_obs.reshape(network_obs.shape[0], network_obs.shape[1], 1)
network_obs = network_obs.reshape(network_obs.shape[0] * network_obs.shape[1])
#msglogger.info("* obs={}".format(network_obs))
return network_obs
def whole_network_get_final_obs(self):
return self.get_obs()
def rest_macs_raw(self):
"""Return the number of remaining MACs in the layers following the current layer"""
rest = 0
for layer_id in range(self.current_layer_id, self.net_wrapper.num_layers()):
rest += self.net_wrapper.get_layer_macs(self.net_wrapper.get_layer(layer_id + 1))
return rest
def rest_macs(self):
return self.rest_macs_raw() / self.dense_model_macs
def is_macs_constraint_achieved(self, compressed_model_total_macs):
current_density = compressed_model_total_macs / self.dense_model_macs
return self.amc_cfg.target_density >= current_density
def compute_reward(self, log_stats=True):
"""Compute the reward"""
distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
total_macs, total_nnz = self.net_wrapper.get_model_resources_requirements(self.model)
if self.amc_cfg.perform_thinning:
compression = distiller.model_numel(self.model, param_dims=[4]) / self.dense_model_size
else:
compression = 1 - distiller.model_sparsity(self.model)/100
# What a hack!
total_nnz *= compression
accuracies = self.net_wrapper.train(self.amc_cfg.num_ft_epochs, self.episode)
self.ft_stats_file.add_record([self.episode, accuracies])
top1, top5, vloss = self.net_wrapper.validate()
reward = self.amc_cfg.reward_fn(self, top1, top5, vloss, total_macs)
if log_stats:
macs_normalized = total_macs/self.dense_model_macs
msglogger.info("Total parameters left: %.2f%%" % (compression*100))
msglogger.info("Total compute left: %.2f%%" % (total_macs/self.dense_model_macs*100))
stats = ('Performance/EpisodeEnd/',
OrderedDict([('Loss', vloss),
('Top1', top1),
('Top5', top5),
('reward', reward),
('total_macs', int(total_macs)),
('macs_normalized', macs_normalized*100),
('log(total_macs)', math.log(total_macs)),
('total_nnz', int(total_nnz))]))
distiller.log_training_progress(stats, None, self.episode, steps_completed=0, total_steps=1,
log_freq=1, loggers=[self.tflogger, self.pylogger])
return reward, top1, total_macs, total_nnz
def finalize_episode(self, top1, reward, total_macs, normalized_macs,
normalized_nnz, action_history, agent_action_history):
"""Write the details of one network to a CSV file and create a checkpoint file"""
if reward > self.best_reward:
self.best_reward = reward
ckpt_name = self.save_checkpoint(is_best=True)
msglogger.info("Best reward={} episode={} top1={}".format(reward, self.episode, top1))
else:
ckpt_name = self.save_checkpoint(is_best=False)
fields = [self.episode, top1, reward, total_macs, normalized_macs,
normalized_nnz, ckpt_name, action_history, agent_action_history]
self.stats_file.add_record(fields)
def save_checkpoint(self, is_best=False):
"""Save the learned-model checkpoint"""
scheduler = self.net_wrapper.create_scheduler()
episode = str(self.episode).zfill(3)
if is_best:
fname = "BEST_adc_episode_{}".format(episode)
else:
fname = "adc_episode_{}".format(episode)
self.services.save_checkpoint_fn(epoch=0, model=self.model,
scheduler=scheduler, name=fname)
del scheduler
return fname
def collect_conv_details(model, dataset, perform_thinning, layers_to_prune=None):
dummy_input = distiller.get_dummy_input(dataset)
g = SummaryGraph(model, dummy_input)
conv_layers = OrderedDict()
total_macs = 0
total_params = 0
for id, (name, m) in enumerate(model.named_modules()):
if isinstance(m, torch.nn.Conv2d):
conv = SimpleNamespace()
conv.t = len(conv_layers)
conv.k = m.kernel_size[0]
conv.stride = m.stride
# Use the SummaryGraph to obtain some other details of the models
conv_op = g.find_op(normalize_module_name(name))
assert conv_op is not None
conv.weights_vol = conv_op['attrs']['weights_vol']
total_params += conv.weights_vol
conv.macs = conv_op['attrs']['MACs']
conv_pname = name + ".weight"
conv_p = distiller.model_find_param(model, conv_pname)
if not perform_thinning:
#conv.macs *= distiller.density_ch(conv_p) # Channel pruning
conv.macs *= distiller.density_3D(conv_p) # Filter pruning
total_macs += conv.macs
conv.ofm_h = g.param_shape(conv_op['outputs'][0])[2]
conv.ofm_w = g.param_shape(conv_op['outputs'][0])[3]
conv.ifm_h = g.param_shape(conv_op['inputs'][0])[2]
conv.ifm_w = g.param_shape(conv_op['inputs'][0])[3]
conv.name = name
conv.id = id
if layers_to_prune is None or name in layers_to_prune:
conv_layers[len(conv_layers)] = conv
return conv_layers, total_macs, total_params
import pandas as pd
def sample_networks(net_wrapper, services):
"""Sample networks from the posterior distribution.
1. Sort the networks we discovered using AMC by their reward.
2. Use the top 10% best-performing networks discovered by AMC to postulate a posterior distribution of the
density/sparsity of each layer:
p([layers-sparsity] | Top1, L1)
3. Sample 100 networks from this distribution.
For each such network: fine-tune, score using Top1, and save
"""
#fname = "logs/resnet20___2019.01.29-102912/amc.csv"
fname = "logs/resnet20___2019.02.03-210001/amc.csv"
df = pd.read_csv(fname)
#top1_sorted_df = df.sort_values(by=['top1'], ascending=False)
top1_sorted_df = df.sort_values(by=['reward'], ascending=False)
top10pct = top1_sorted_df[:int(len(df.index) * 0.1)]
dense_macs, _ = net_wrapper.get_model_resources_requirements()
layer_sparsities_list = []
for index, row in top10pct.iterrows():
layer_sparsities = row['action_history']
layer_sparsities = layer_sparsities[1:-1].split(",") # convert from string to list
layer_sparsities = [float(sparsity) for sparsity in layer_sparsities]
layer_sparsities_list.append(layer_sparsities)
layer_sparsities = np.array(layer_sparsities_list)
mean = layer_sparsities.mean(axis=0)
cov = np.cov(layer_sparsities.T)
num_networks = 100
data = np.random.multivariate_normal(mean, cov, num_networks)
orig_model = net_wrapper.model
for i in range(num_networks):
model = copy.deepcopy(orig_model)
net_wrapper.reset(model)
for layer_id, sparsity_level in enumerate(data[i]):
sparsity_level = min(max(0, sparsity_level), ALMOST_ONE)
net_wrapper.remove_structures(layer_id,
fraction_to_prune=sparsity_level,
prune_what="channels")
net_wrapper.train(1)
top1, top5, vloss = net_wrapper.validate()
"""Save the learned-model checkpoint"""
scheduler = net_wrapper.create_scheduler()
total_macs, _ = net_wrapper.get_model_resources_requirements(model)
fname = "{}_top1_{:2f}__density_{:2f}_sampled".format(net_wrapper.arch, top1, total_macs/dense_macs)
services.save_checkpoint_fn(epoch=0, model=net_wrapper.model,
scheduler=scheduler, name=fname)
del scheduler
| [] |
2024-01-10 | AksenovRom/magisterialCouncil | bot~telegram_bot.py | from __future__ import annotations
import asyncio
import logging
import os
import json
import requests
from uuid import uuid4
from telegram import BotCommandScopeAllGroupChats, Update, constants
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultArticle
from telegram import InputTextMessageContent, BotCommand
from telegram.error import RetryAfter, TimedOut
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, CallbackQueryHandler, Application, ContextTypes, CallbackContext
from pydub import AudioSegment
from utils import is_group_chat, get_thread_id, message_text, wrap_with_indicator, split_into_chunks, \
edit_message_with_retry, get_stream_cutoff_values, is_allowed, get_remaining_budget, is_admin, is_within_budget, \
get_reply_to_message_id, add_chat_request_to_usage_tracker, error_handler
from openai_helper import OpenAIHelper, localized_text
from usage_tracker import UsageTracker
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
bot_language = self.config['bot_language']
self.commands = [
BotCommand(command='help', description=localized_text('help_description', bot_language)),
BotCommand(command='reset', description=localized_text('reset_description', bot_language)),
BotCommand(command='image', description=localized_text('image_description', bot_language)),
BotCommand(command='stats', description=localized_text('stats_description', bot_language)),
BotCommand(command='resend', description=localized_text('resend_description', bot_language))
]
self.group_commands = [BotCommand(
command='chat', description=localized_text('chat_description', bot_language)
)] + self.commands
self.disallowed_message = localized_text('disallowed', bot_language)
self.budget_limit_message = localized_text('budget_limit', bot_language)
self.usage = {}
self.last_message = {}
self.inline_queries_cache = {}
async def help(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = self.group_commands if is_group_chat(update) else self.commands
commands_description = [f'/{command.command} - {command.description}' for command in commands]
bot_language = self.config['bot_language']
help_text = (
localized_text('help_text', bot_language)[0] +
'\n\n' +
'\n'.join(commands_description) +
'\n\n' +
localized_text('help_text', bot_language)[1] +
'\n\n' +
localized_text('help_text', bot_language)[2]
)
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
(transcribe_minutes_today, transcribe_seconds_today, transcribe_minutes_month,
transcribe_seconds_month) = self.usage[user_id].get_current_transcription_duration()
current_cost = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
remaining_budget = get_remaining_budget(self.config, self.usage, update)
bot_language = self.config['bot_language']
text_current_conversation = (
f"*{localized_text('stats_conversation', bot_language)[0]}*:\n"
f"{chat_messages} {localized_text('stats_conversation', bot_language)[1]}\n"
f"{chat_token_length} {localized_text('stats_conversation', bot_language)[2]}\n"
f"----------------------------\n"
)
text_today = (
f"*{localized_text('usage_today', bot_language)}:*\n"
f"{tokens_today} {localized_text('stats_tokens', bot_language)}\n"
f"{images_today} {localized_text('stats_images', bot_language)}\n"
f"{transcribe_minutes_today} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_today} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_today']:.2f}\n"
f"----------------------------\n"
)
text_month = (
f"*{localized_text('usage_month', bot_language)}:*\n"
f"{tokens_month} {localized_text('stats_tokens', bot_language)}\n"
f"{images_month} {localized_text('stats_images', bot_language)}\n"
f"{transcribe_minutes_month} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_month} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_month']:.2f}"
)
# text_budget filled with conditional content
text_budget = "\n\n"
budget_period = self.config['budget_period']
if remaining_budget < float('inf'):
text_budget += (
f"{localized_text('stats_budget', bot_language)}"
f"{localized_text(budget_period, bot_language)}: "
f"${remaining_budget:.2f}.\n"
)
# add OpenAI account information for admin request
if is_admin(self.config, user_id):
text_budget += (
f"{localized_text('stats_openai', bot_language)}"
f"{self.openai.get_billing_current_month():.2f}"
)
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def resend(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resend the last request
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' is not allowed to resend the message')
await self.send_disallowed_message(update, context)
return
chat_id = update.effective_chat.id
if chat_id not in self.last_message:
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' does not have anything to resend')
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('resend_failed', self.config['bot_language'])
)
return
# Update message text, clear self.last_message and send the request to prompt
logging.info(f'Resending the last prompt from user: {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
with update.message._unfrozen() as message:
message.text = self.last_message.pop(chat_id)
await self.prompt(update=update, context=context)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('reset_done', self.config['bot_language'])
)
async def mine(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Server controls
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
mode = message_text(update.message)
command_data = {
'cpu': 2,
'ram': 2,
'error': True,
'mine_server_start': True
}
if mode == "full":
command_data['ram'] = 9
elif mode == "base":
command_data['ram'] = 3
elif mode.startswith("config"):
command_params = mode.split(' ')
if len(command_params) != 4 \
or not command_params[1].isdigit()\
or not command_params[2].isdigit()\
or command_params[3] not in ['on', 'off']:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text='mine config cpu ram on/off'
)
return
command_data['cpu'] = int(command_params[1])
command_data['ram'] = int(command_params[2])
command_data['mine_server_start'] = True if command_params[3] == "on" else False
else:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text='full or base'
)
return
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.config['timeweb'],
}
data = {
"configurator": {
"ram": 1024 * command_data['ram'],
"cpu": command_data['cpu']
}
}
response = requests.patch('https://api.timeweb.cloud/api/v1/servers/1615641', headers=headers, data=json.dumps(data))
if response.status_code == 200:
response_text = "Done"
else:
response_text = json.dumps(response.json(), indent=2)
# TODO:
# скрипт запуска бота и сервера, опирающийся на конфиг файл
# внемение изменений в конфиг файл и конфигурацию самого сервера
#
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=response_text
)
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not self.config['enable_image_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
image_query = message_text(update.message)
if image_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('image_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New image generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
await update.effective_message.reply_photo(
reply_to_message_id=get_reply_to_message_id(self.config, update),
photo=image_url
)
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('image_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_PHOTO)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not self.config['enable_transcription'] or not await self.check_allowed_and_within_budget(update, context):
return
if is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
filename = update.message.effective_attachment.file_unique_id
async def _execute():
filename_mp3 = f'{filename}.mp3'
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
if os.path.exists(filename):
os.remove(filename)
return
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
try:
transcript = await self.openai.transcribe(filename_mp3)
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# check if transcript starts with any of the prefixes
response_to_transcription = any(transcript.lower().startswith(prefix.lower()) if prefix else False
for prefix in self.config['voice_reply_prompts'])
if self.config['voice_reply_transcript'] and not response_to_transcription:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\""
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = (
f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\"\n\n"
f"_{localized_text('answer', bot_language)}:_\n{response}"
)
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('transcribe_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if update.edited_message or not update.message or update.message.via_bot:
return
if not await self.check_allowed_and_within_budget(update, context):
return
logging.info(
f'New message received from user {update.message.from_user.name} (id: {update.message.from_user.id})')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = message_text(update.message)
self.last_message[chat_id] = prompt
if is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.lower().startswith(trigger_keyword.lower()):
prompt = prompt[len(trigger_keyword):].strip()
if update.message.reply_to_message and \
update.message.reply_to_message.text and \
update.message.reply_to_message.from_user.id != context.bot.id:
prompt = f'"{update.message.reply_to_message.text}" {prompt}'
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
try:
total_tokens = 0
if self.config['stream']:
async def _reply():
nonlocal total_tokens
await update.effective_message.reply_chat_action(
action=constants.ChatAction.TYPING,
message_thread_id=get_thread_id(update)
)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
await wrap_with_indicator(update, context, _reply, constants.ChatAction.TYPING)
else:
async def _reply():
nonlocal total_tokens
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = split_into_chunks(response)
for index, chunk in enumerate(chunks):
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk
)
except Exception as exception:
raise exception
await wrap_with_indicator(update, context, _reply, constants.ChatAction.TYPING)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('chat_fail', self.config['bot_language'])} {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if len(query) < 3:
return
if not await self.check_allowed_and_within_budget(update, context, is_inline=True):
return
callback_data_suffix = "gpt:"
result_id = str(uuid4())
self.inline_queries_cache[result_id] = query
callback_data = f'{callback_data_suffix}{result_id}'
await self.send_inline_query_result(update, result_id, message_content=query, callback_data=callback_data)
async def send_inline_query_result(self, update: Update, result_id, message_content, callback_data=""):
"""
Send inline query result
"""
try:
reply_markup = None
bot_language = self.config['bot_language']
if callback_data:
reply_markup = InlineKeyboardMarkup([[
InlineKeyboardButton(text=f'🤖 {localized_text("answer_with_chatgpt", bot_language)}',
callback_data=callback_data)
]])
inline_query_result = InlineQueryResultArticle(
id=result_id,
title=localized_text("ask_chatgpt", bot_language),
input_message_content=InputTextMessageContent(message_content),
description=message_content,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea'
'-b02a7a32149a.png',
reply_markup=reply_markup
)
await update.inline_query.answer([inline_query_result], cache_time=0)
except Exception as e:
logging.error(f'An error occurred while generating the result card for inline query {e}')
async def handle_callback_inline_query(self, update: Update, context: CallbackContext):
"""
Handle the callback query from the inline query result
"""
callback_data = update.callback_query.data
user_id = update.callback_query.from_user.id
inline_message_id = update.callback_query.inline_message_id
name = update.callback_query.from_user.name
callback_data_suffix = "gpt:"
query = ""
bot_language = self.config['bot_language']
answer_tr = localized_text("answer", bot_language)
loading_tr = localized_text("loading", bot_language)
try:
if callback_data.startswith(callback_data_suffix):
unique_id = callback_data.split(':')[1]
total_tokens = 0
# Retrieve the prompt from the cache
query = self.inline_queries_cache.get(unique_id)
if query:
self.inline_queries_cache.pop(unique_id)
else:
error_message = (
f'{localized_text("error", bot_language)}. '
f'{localized_text("try_again", bot_language)}'
)
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{error_message}',
is_inline=True)
return
if self.config['stream']:
stream_response = self.openai.get_chat_response_stream(chat_id=user_id, query=query)
i = 0
prev = ''
backoff = 0
async for content, tokens in stream_response:
if len(content.strip()) == 0:
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n{answer_tr}:\n{content}',
is_inline=True)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
divider = '_' if use_markdown else ''
text = f'{query}\n\n{divider}{answer_tr}:{divider}\n{content}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text = text[:4096]
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text, markdown=use_markdown, is_inline=True)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _send_inline_query_response():
nonlocal total_tokens
# Edit the current message to indicate that the answer is being processed
await context.bot.edit_message_text(inline_message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{loading_tr}',
parse_mode=constants.ParseMode.MARKDOWN)
logging.info(f'Generating response for inline query by {name}')
response, total_tokens = await self.openai.get_chat_response(chat_id=user_id, query=query)
text_content = f'{query}\n\n_{answer_tr}:_\n{response}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text_content = text_content[:4096]
# Edit the original message with the generated content
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text_content, is_inline=True)
await wrap_with_indicator(update, context, _send_inline_query_response,
constants.ChatAction.TYPING, is_inline=True)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.error(f'Failed to respond to an inline query via button callback: {e}')
logging.exception(e)
localized_answer = localized_text('chat_fail', self.config['bot_language'])
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f"{query}\n\n_{answer_tr}:_\n{localized_answer} {str(e)}",
is_inline=True)
async def check_allowed_and_within_budget(self, update: Update, context: ContextTypes.DEFAULT_TYPE,
is_inline=False) -> bool:
"""
Checks if the user is allowed to use the bot and if they are within their budget
:param update: Telegram update object
:param context: Telegram context object
:param is_inline: Boolean flag for inline queries
:return: Boolean indicating if the user is allowed to use the bot
"""
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
if not await is_allowed(self.config, update, context, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) is not allowed to use the bot')
await self.send_disallowed_message(update, context, is_inline)
return False
if not is_within_budget(self.config, self.usage, update, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) reached their usage limit')
await self.send_budget_reached_message(update, context, is_inline)
return False
return True
async def send_disallowed_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the disallowed message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.disallowed_message,
disable_web_page_preview=True
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.disallowed_message)
async def send_budget_reached_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the budget reached message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.budget_limit_message
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.budget_limit_message)
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.group_commands, scope=BotCommandScopeAllGroupChats())
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(CommandHandler('resend', self.resend))
application.add_handler(CommandHandler('mine', self.mine))
application.add_handler(CommandHandler(
'chat', self.prompt, filters=filters.ChatType.GROUP | filters.ChatType.SUPERGROUP)
)
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP, constants.ChatType.PRIVATE
]))
application.add_handler(CallbackQueryHandler(self.handle_callback_inline_query))
application.add_error_handler(error_handler)
application.run_polling()
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.