date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | RiptidePzh/LLM_Chatbot_App | friend_replica~recollection.py | import json
import os
from datetime import datetime
from typing import Dict, List
from friend_replica.format_chat import format_chat_history, split_chat_data
from friend_replica.semantic_search import Chat
from langchain.prompts import PromptTemplate
class LanguageModelwithRecollection():
'''
Wrap GPT4ALL models and Chat memory up.
'''
def __init__(self,
model,
chat: Chat,
debug: bool=False,
num_context: int=15,
num_search: int=3,
threshold: float=.5
) -> None:
self.model = model
self.chat = chat if chat.chat_config else print("Please first pass chat_config to initialize Chat with one friend.")
self.debug = debug
self.num_context = num_context
self.num_search = num_search
self.threshold = threshold
def generate_thoughts(self, friend_input, key_word_only=False):
if self.chat.chat_config.language == "english":
template = """[[INST]]<<SYS>> Be consise. Reply with the topic summary content only.
<</SYS>>
Summarize the topic of the given sentences into less than three words:
'''
{friend_input}
'''
Topic Summary:
[[/INST]] """
else:
template = """请用不超过三个中文短语概括句子内容,请只用这些中文短语作为回答:
[Round 1]
问:昨天那场音乐会真的爆炸好听,我哭死
答:昨天 音乐会
[Round 2]
问:还记得我上周跟你提到的那本机器学习教材吗?
答:上周 机器学习 教材
[Round 3]
问:{friend_input}
答:"""
prompt = PromptTemplate(
template=template,
input_variables=[
'friend_input'
],
)
prompt_text = prompt.format(friend_input=friend_input)
key_word = self.model(prompt_text) if self.chat.chat_config.language == "english" else self.model(prompt_text)[len(prompt_text):]
if self.debug:
print(key_word)
if not key_word_only:
thoughts = self.chat.semantic_search(
key_word,
friend_name=self.chat.chat_config.friend_name,
debug=False,
num_context=self.num_context,
k=self.num_search,
threshold=self.threshold
)
return thoughts, key_word
else:
return key_word
def generalize_personality(self, chat_block:List[Dict]):
'''
Generate personality for the chat and store the personality in json file for future usage.
Input: One chat_block, a list of concatenated chat messages (List[Dict])
Output: LLM summary of peronality (str),
stored in personality_{friend_name}.json under chat_history directory
'''
if self.chat.chat_config.language == "english":
prompt_template = """[[INST]]<<SYS>>Be as concise and in-depth as possible. Reply in one to two sentences with the summary content only.
<</SYS>>
Summarize in one to two sentences the personality of {my_name} and the relationship between {friend_name} and {my_name}, from the chat history given below:
'''
{chat_history}
'''
Short summary:
[[/INST]] """
else:
prompt_template = """
从过往聊天记录中,总结{my_name}的性格特点,以及{my_name}和{friend_name}之间的人际关系。
过往聊天:
'''
{chat_history}
'''
"""
prompt = PromptTemplate(
template=prompt_template,
input_variables=[
'my_name',
'friend_name',
'chat_history',
],
)
prompt_text = prompt.format(
my_name=self.chat.chat_config.my_name,
friend_name=self.chat.chat_config.friend_name,
chat_history='\n'.join(format_chat_history(chat_block, chat_config=self.chat.chat_config, for_read=True)),
)
if self.chat.chat_config.language == "english":
personality = self.model(prompt_text)
else:
personality = self.model(prompt_text)[len(prompt_text):]
return personality
def personality_archive(self):
'''
Generate personality archive for the chat.
Input: the chat model, since personality_archive should work on all the chat_blocks
Output: memory_archive (List[Dict])
with keys "time_interval", "memory", "key_word" in each entry
also stored in memory_{friend_name}.json file under chat_history directory
'''
personality_archive = []
for block in self.chat.chat_blocks:
personality = self.generalize_personality(block)
time_interval = (block[0]['msgCreateTime'], block[-1]['msgCreateTime'])
personality_entry = {
'time_interval': time_interval,
'personality': personality,
}
personality_archive.append(personality_entry)
start_time = datetime.fromtimestamp(time_interval[0]).strftime('%Y-%m-%d %H:%M')
end_time = datetime.fromtimestamp(time_interval[1]).strftime('%Y-%m-%d %H:%M')
print(f"######## Personality entry from {start_time} to {end_time}:")
print(personality)
personality_archive.sort(key=lambda x: x['time_interval'][0])
json_data = json.dumps(personality_archive, indent=4)
output_js = os.path.join(self.chat.friend_path, f'personality_{self.chat.chat_config.friend_name}.json')
with open(output_js, 'w', encoding='utf-8') as json_file:
json_file.write(json_data)
print(f"######## Finished Personality Archive Initialization of friend '{self.chat.chat_config.friend_name}'")
return personality_archive
def summarize_memory(self, chat_block:List[Dict]):
'''
Summarize block of chat history.
Input: One chat_block, a list of concatenated chat messages (List[Dict])
Output: LLM summary of the chat_block memory (str)
'''
if self.chat.chat_config.language == "english":
template = """[[INST]]<<SYS>>Be concise. Reply with the summary content only.
<</SYS>>
Summarize the main idea of the following conversation.
'''
{chat_block}
'''
Summary:
[[/INST]]"""
else:
template = """请用一句话简短地概括下列聊天记录的整体思想.
[Round 1]
对话:
friend: 中午去哪吃?
me: 西域美食吃吗
friend: 西域美食
friend: 好油啊
friend: 想吃点好的
me: 那要不去万达那边?
friend: 行的行的
总结:
以上对话发生在2023年8月16日中午,我和我的朋友在商量中饭去哪里吃,经过商量后决定去万达。
[Round 2]
对话:
{chat_block}
总结:"""
prompt = PromptTemplate(
template=template,
input_variables=["chat_block"],
)
prompt_text = prompt.format(chat_block='\n'.join(format_chat_history(chat_block, chat_config=self.chat.chat_config, for_read=True)))
return self.model(prompt_text) if self.chat.chat_config.language == "english" else self.model(prompt_text)[len(prompt_text):]
def memory_archive(self):
'''
Generate memory archive for the chat.
Input: The whole chat object, since memory_archive should work on all the chat_blocks
Output: memory_archive (List[Dict])
with keys "time_interval", "memory", "key_word" in each entry
also stored in memory_{friend_name}.json file under chat_history directory
'''
memory_archive = []
for block in self.chat.chat_blocks:
memory = self.summarize_memory(block)
key_word = self.generate_thoughts(memory, key_word_only=True)
# Deal with fickle output of LLM
if "Sure" in key_word or "\n" in key_word:
key_word = key_word.split('\n')[-1].strip('\"')
key_word = key_word.strip()
time_interval = (block[0]['msgCreateTime'], block[-1]['msgCreateTime'])
memory_entry = {
"time_interval": time_interval,
"memory": memory,
"key_word": key_word,
}
memory_archive.append(memory_entry)
start_time = datetime.fromtimestamp(time_interval[0]).strftime('%Y-%m-%d %H:%M')
end_time = datetime.fromtimestamp(time_interval[1]).strftime('%Y-%m-%d %H:%M')
print(f"####### Memory entry from {start_time} to {end_time}: ")
print("Memory:", memory)
print("Key Word:", key_word)
print("######## ")
json_data = json.dumps(memory_archive, indent=4)
output_js = os.path.join(self.chat.friend_path, f'memory_{self.chat.chat_config.friend_name}.json')
os.makedirs(os.path.dirname(output_js), exist_ok=True)
with open(output_js, 'w', encoding='utf-8') as json_file:
json_file.write(json_data)
print(f"######## Finished Memory Archive Initialization of friend '{self.chat.chat_config.friend_name}'")
return memory_archive
def chat_with_archive(self):
'''
Chat with memory and personality archive.
'''
chat_blocks = self.chat.chat_blocks
# Load Personality Archive
personality_archive = os.path.join(self.chat.friend_path, f'personality_{self.chat.chat_config.friend_name}.json')
if os.path.exists(personality_archive):
with open(personality_archive,'r', encoding='utf-8') as json_file:
personality_archive = json.load(json_file)
else:
# Initialize Personality Archive if not initialized before
personality_archive = self.personality_archive()
# Load Memory Archive
memory_archive = os.path.join(self.chat.friend_path, f'memory_{self.chat.chat_config.friend_name}.json')
if os.path.exists(memory_archive):
with open(memory_archive,'r', encoding='utf-8') as json_file:
memory_archive = json.load(json_file)
else:
# Initialize Memory Archive if not initialized before
memory_archive = self.memory_archive()
# # Collect recent memories and recent personality
# current_time = datetime.now().timestamp()
# recent_memories = []
# recent_personality = []
# for memory_entry, personality_entry in zip(memory_archive, personality_archive):
# entry_time = memory_entry['time_interval'][1]
# if current_time - entry_time > 60 * 60 * 24 * 30: # Only show memories within a month
# continue
# else:
# recent_memories.append(memory_entry)
# recent_personality.append(personality_entry)
# Auto Reply with display of recent memories
auto_reply = f"Hi, {self.chat.chat_config.friend_name}! I'm the agent bot of {self.chat.chat_config.my_name}. I have memory of us discussing these topics:\n"
for i, memory_entry in enumerate(memory_archive):
str_time = datetime.fromtimestamp(memory_entry['time_interval'][1]).strftime('%m.%d')
auto_reply += f"#{i} {str_time}: {memory_entry['key_word']}\n"
auto_reply += "Do you want to continue on any of these?"
print(auto_reply)
input_index = input("Enter the # of the topic if you want to continue: ")
# If user wants to continue on previous topics
if input_index.isdigit():
input_index = int(input_index)
if input_index < len(memory_archive):
# Reply with the topic summary
reply = f"Okay! Let's continue on [{memory_archive[input_index]['key_word']}]\n"
memory = memory_archive[input_index]['memory']
reply += "I recall last time: " + memory
print(reply)
friend_input = input("What do you think?")
print(f'{self.chat.chat_config.friend_name}: {friend_input}')
assert len(chat_blocks) == len(memory_archive) and len(chat_blocks) == len(personality_archive)
matching_chat_block = chat_blocks[input_index]
personality = personality_archive[input_index]['personality']
# # Grab the original chat_block that matches the time interval of the memory
# start_time, end_time = recent_memories[input_index]['time_interval']
# for chat_block in chat_blocks:
# if start_time == chat_block[0]['msgCreateTime'] and end_time==chat_block[-1]['msgCreateTime']:
# matching_chat_block = chat_block
# break
if self.chat.chat_config.language == "english":
prompt_template = """[[INST]]<<SYS>>You are roleplaying a robot with the personality of {my_name} in a casual online chat with {friend_name}.
as described here: {personality}.
Refer to Memory as well as Recent Conversation , respond to the latest message of {friend_name}.
Start the short, casual response with {my_name}:
<</SYS>>
Memory:
'''
{memory}
'''
Recent Conversation:
'''
{recent_chat}
'''
{friend_name}: {friend_input}
[[/INST]] """
prompt_text = prompt_template.format(
my_name=self.chat.chat_config.my_name,
friend_name=self.chat.chat_config.friend_name,
personality=personality,
memory=memory,
recent_chat='\n'.join(format_chat_history(matching_chat_block, self.chat.chat_config, for_read=True)),
friend_input=friend_input,
)
if self.chat.chat_config.language == "english":
out = self.model(prompt_text, stop='\n')
else:
out = self.model(prompt_text)[len(prompt_text):].split('\n')[0]
return out
else:
# If user doesn't want to continue on previous topics
friend_input = input("Alright! Let's talk about something else. What do you want to talk about?")
return self.chat_with_recollection(friend_input)
def chat_with_recollection(
self,
friend_input,
current_chat: str = None,
):
chat_blocks = self.chat.chat_blocks
personality_data = os.path.join(self.chat.friend_path, f'personality_{self.chat.chat_config.friend_name}.json')
if os.path.exists(personality_data):
with open(personality_data,'r', encoding='utf-8') as json_file:
personality_data = json.load(json_file)
personality = personality_data[-1]['personality']
else:
personality = self.generalize_personality(chat_blocks[-1])
recollections, key_words = self.generate_thoughts(friend_input)
recollections = '\n\n'.join(['\n'.join(format_chat_history(recollection, self.chat.chat_config, for_read=True)) for recollection in recollections])
if self.debug:
print(recollections)
if self.chat.chat_config.language == "english":
prompt_template = """[[INST]]<<SYS>>You are roleplaying a robot with the personality of {my_name} in a casual online chat with {friend_name}.
as described here: {personality}.
Refer to Memory as well as Recent Conversation , respond to the latest message of {friend_name} with one sentence only.
Start the short, casual response with {my_name}:
<</SYS>>
Memory:
'''
{recollections}
'''
Recent Conversation:
'''
{recent_chat}
'''
{current_chat}
{friend_name}: {friend_input}
[[/INST]] """
else:
prompt_template = """接下来请你扮演一个在一场随性的网络聊天中拥有{my_name}性格特征的角色。
首先从过往聊天记录中,根据{my_name}的性格特点{personatlity},并掌握{my_name}和{friend_name}之间的人际关系。
之后,运用近期聊天内容以及记忆中的信息,回复{friend_name}发送的消息。
请用一句话,通过简短、随意的方式用{my_name}的身份进行回复:
记忆:
'''
{recollections}
'''
近期聊天:
'''
{recent_chat}
'''
{current_chat}
{friend_name}: {friend_input}
"""
prompt = PromptTemplate(
template=prompt_template,
input_variables=[
'my_name',
'friend_name',
'recent_chat',
'recollections',
'friend_input',
'current_chat',
'personality'
],
)
if self.debug:
print(chat_blocks[-1])
prompt_text = prompt.format(
my_name=self.chat.chat_config.my_name,
friend_name=self.chat.chat_config.friend_name,
personality=personality,
recent_chat='\n'.join(format_chat_history(chat_blocks[-1], self.chat.chat_config, for_read=True)),
recollections=recollections,
friend_input=friend_input,
current_chat=current_chat
)
if self.chat.chat_config.language == "english":
out = self.model(prompt_text, stop='\n')
else:
out = self.model(prompt_text)[len(prompt_text):].split('\n')[0]
return out
def __call__(
self,
friend_input,
current_chat
):
return self.chat_with_recollection(friend_input, current_chat)
| [
"\n",
"friend_input",
"请用不超过三个中文短语概括句子内容,请只用这些中文短语作为回答:\n \n [Round 1]\n 问:昨天那场音乐会真的爆炸好听,我哭死\n 答:昨天 音乐会\n \n [Round 2]\n 问:还记得我上周跟你提到的那本机器学习教材吗?\n 答:上周 机器学习 教材\n \n [Round 3]\n 问:{friend_input}\n 答:",
"recollections",
"chat_block",
"\n 从过往聊天记录中,总结{my_name}的性格特点,以及{my_name}和{friend_name}之间的人际关系。\n \n 过往聊天:\n '''\n {chat_history}\n '''\n\n ",
"chat_history",
"[[INST]]<<SYS>>You are roleplaying a robot with the personality of {my_name} in a casual online chat with {friend_name}.\n as described here: {personality}.\n Refer to Memory as well as Recent Conversation , respond to the latest message of {friend_name}.\n Start the short, casual response with {my_name}: \n <</SYS>>\n \n Memory:\n '''\n {memory}\n '''\n\n Recent Conversation:\n '''\n {recent_chat}\n '''\n\n {friend_name}: {friend_input}\n [[/INST]] ",
"friend_name",
"接下来请你扮演一个在一场随性的网络聊天中拥有{my_name}性格特征的角色。\n 首先从过往聊天记录中,根据{my_name}的性格特点{personatlity},并掌握{my_name}和{friend_name}之间的人际关系。\n 之后,运用近期聊天内容以及记忆中的信息,回复{friend_name}发送的消息。\n 请用一句话,通过简短、随意的方式用{my_name}的身份进行回复:\n \n 记忆:\n '''\n {recollections}\n '''\n\n 近期聊天:\n '''\n {recent_chat}\n '''\n \n\n {current_chat}\n {friend_name}: {friend_input}\n \n ",
"personality",
"current_chat",
"[[INST]]<<SYS>> Be consise. Reply with the topic summary content only.\n <</SYS>>\n Summarize the topic of the given sentences into less than three words:\n '''\n {friend_input}\n '''\n Topic Summary:\n [[/INST]] ",
"recent_chat",
"my_name",
"[[INST]]<<SYS>>You are roleplaying a robot with the personality of {my_name} in a casual online chat with {friend_name}.\n as described here: {personality}.\n Refer to Memory as well as Recent Conversation , respond to the latest message of {friend_name} with one sentence only.\n Start the short, casual response with {my_name}: \n <</SYS>>\n \n Memory:\n '''\n {recollections}\n '''\n\n Recent Conversation:\n '''\n {recent_chat}\n '''\n\n {current_chat}\n {friend_name}: {friend_input}\n [[/INST]] ",
"[[INST]]<<SYS>>Be as concise and in-depth as possible. Reply in one to two sentences with the summary content only.\n <</SYS>>\n Summarize in one to two sentences the personality of {my_name} and the relationship between {friend_name} and {my_name}, from the chat history given below:\n '''\n {chat_history}\n '''\n Short summary:\n [[/INST]] ",
"[[INST]]<<SYS>>Be concise. Reply with the summary content only.\n <</SYS>>\n Summarize the main idea of the following conversation.\n '''\n {chat_block}\n '''\n Summary:\n [[/INST]]",
"请用一句话简短地概括下列聊天记录的整体思想.\n \n [Round 1]\n 对话:\n friend: 中午去哪吃?\n me: 西域美食吃吗\n friend: 西域美食\n friend: 好油啊\n friend: 想吃点好的\n me: 那要不去万达那边?\n friend: 行的行的\n \n 总结:\n 以上对话发生在2023年8月16日中午,我和我的朋友在商量中饭去哪里吃,经过商量后决定去万达。\n \n [Round 2]\n 对话:\n {chat_block}\n \n 总结:"
] |
2024-01-10 | TheVaxly/ChatBot-Larry- | commands~img.py | import openai
import os
OPENAI_API_KEY = os.getenv("img")
openai.api_key = OPENAI_API_KEY
def gen(prompt):
response = openai.Image.create(
prompt=prompt,
model="image-alpha-001"
)
image_url = response['data'][0]['url']
return image_url | [] |
2024-01-10 | GMahlerTheTragic/civic | scripts~civic_evidence_gpt4_query.py | import json
from time import sleep
import torch
from openai import OpenAI
import pandas as pd
import os
from torchmetrics.classification import MulticlassF1Score
from torchmetrics.classification import Accuracy
from civic.config import DATA_PROCESSED_DIR
client = OpenAI()
label_to_num = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4}
def get_gpt_4_example(prepend_string, abstract):
return f"""Metadata:\n\"{prepend_string}\"\nAbstract:\n:\"{abstract}\""""
def get_random_few_shot_examples(n_samples_per_evidence_level, is_test=False):
if is_test:
sampled_df = pd.read_csv(
os.path.join(DATA_PROCESSED_DIR, "civic_evidence_test_gpt4.csv")
)
else:
df = pd.read_csv(os.path.join(DATA_PROCESSED_DIR, "civic_evidence_train.csv"))
sampled_df = pd.DataFrame()
for value in df["evidenceLevel"].unique():
sampled_rows = df[df["evidenceLevel"] == value].sample(
n_samples_per_evidence_level
)
sampled_df = pd.concat([sampled_df, sampled_rows], axis=0)
out = [
{
"example": get_gpt_4_example(
sampled_df.iloc[i]["prependString"],
sampled_df.iloc[i]["sourceAbstract"],
),
"label": sampled_df.iloc[i]["evidenceLevel"],
}
for i in range(sampled_df.shape[0])
]
return out
def _get_prompt_from_sample(sample):
return [
{
"role": "user",
"content": f"Extract the level of clinical significance from this combination of metadata and abstract:\n{sample['example']}",
},
{
"role": "assistant",
"content": f"""{sample['label']}""",
},
]
def gpt4_query(examples, prompt):
messages = (
[
{
"role": "system",
"content": "You are an expert on rare tumor treatments. In the following you will be presented with "
+ "abstracts from medical research papers. These abstracts deal with treatment approaches for rare "
+ "cancers as characterized by their specific genomic variants. Your job is to infer the level of "
+ "clinical significance of the investigations described in the abstract from the abstract and relevant"
+ 'metadata. The labels should range from "A" (indicating the strongest clinical significance) to '
+ '"E" (indicating the weakest clinical significance). You will answer machine-like with exactly one '
+ "character (the level of clinical significance you think is appropriate). You will be presented with "
"examples.",
},
*examples,
prompt,
],
)
completion = client.chat.completions.create(
model="gpt-4-1106-preview", messages=messages[0]
)
print(completion)
projected_label = completion.choices[0].message.content
return projected_label
def do_gpt4_evaluation(n_shots):
f1_score = MulticlassF1Score(num_classes=5, average=None)
macro_f1_score = MulticlassF1Score(num_classes=5, average="macro")
micro_f1_score = MulticlassF1Score(num_classes=5, average="micro")
accuracy = Accuracy(task="multiclass", num_classes=5)
train_examples = [
_get_prompt_from_sample(sample)
for sample in get_random_few_shot_examples(n_shots)
]
examples = sum(train_examples, [])
test_examples = [
_get_prompt_from_sample(sample)
for sample in get_random_few_shot_examples(5, is_test=True)
]
predicted_labels = []
actual_labels = []
for i in range(len(test_examples)):
if (i + 1) % 4 == 0:
sleep(10)
val = gpt4_query(examples, test_examples[i][0])
projected_label = label_to_num.get(val)
actual_label = label_to_num.get(test_examples[i][1]["content"])
predicted_labels.append(projected_label)
actual_labels.append(actual_label)
print(f"Projected label : {projected_label}")
print(f"Actual label : {actual_label}")
return {
"f1-scores": f1_score(
torch.tensor(predicted_labels), torch.tensor(actual_labels)
).tolist(),
"micro-f1-score": micro_f1_score(
torch.tensor(predicted_labels), torch.tensor(actual_labels)
).item(),
"macro-f1-score": macro_f1_score(
torch.tensor(predicted_labels), torch.tensor(actual_labels)
).item(),
"accuracy": accuracy(
torch.tensor(predicted_labels), torch.tensor(actual_labels)
).item(),
}
def main():
metrics_dict = {}
n_shots = 10
print(f"using approx {n_shots * 700 * 5 * 25} tokens")
metrics = do_gpt4_evaluation(n_shots)
print(metrics)
metrics_dict[str(n_shots)] = metrics
with open(f"gpt4_results__nshots{n_shots}.json", "w") as json_file:
json.dump(metrics_dict, json_file)
if __name__ == "__main__":
main()
| [
"Extract the level of clinical significance from this combination of metadata and abstract:\nPLACEHOLDER",
"PLACEHOLDER",
"You are an expert on rare tumor treatments. In the following you will be presented with abstracts from medical research papers. These abstracts deal with treatment approaches for rare cancers as characterized by their specific genomic variants. Your job is to infer the level of clinical significance of the investigations described in the abstract from the abstract and relevantmetadata. The labels should range from \"A\" (indicating the strongest clinical significance) to \"E\" (indicating the weakest clinical significance). You will answer machine-like with exactly one character (the level of clinical significance you think is appropriate). You will be presented with examples."
] |
2024-01-10 | OpenNLG/OpenBA | training~megatron~model~bert_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""BERT model...."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
from megatron.model import LayerNorm
from megatron.model.utils import openai_gelu, erf_gelu
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, hidden_size, init_method,
layernorm_epsilon, parallel_output):
super(BertLMHead, self).__init__()
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
tensor_parallel.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
setattr(self.dense.weight, 'sequence_parallel', args.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', args.sequence_parallel)
self.layernorm = LayerNorm(hidden_size,
eps=layernorm_epsilon,
sequence_parallel=args.sequence_parallel)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
# lm_logits : [s, b, h] and lm_labels: [s, b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s, b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True):
super(BertModel, self).__init__()
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.return_embeddings = args.output_bert_embeddings
if self.return_embeddings:
assert self.post_process and self.add_binary_head
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(args.init_method_std,
args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings(init_method_normal)
if self.post_process:
self.lm_head = BertLMHead(
self.word_embeddings_weight().size(0),
args.hidden_size, init_method, args.layernorm_epsilon, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(args.hidden_size, 2,
init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
# Return pooled output (e.g., when computing Bert embeddings).
if self.return_embeddings:
# Sum attention mask.
embeddings = torch.transpose(lm_output, 0, 1)
masks = torch.sum(attention_mask, dim=1)
# Collect masked embeddings.
output = torch.zeros(
size=(embeddings.shape[0], embeddings.shape[2]),
dtype=torch.float32,
device=torch.cuda.current_device())
for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
output[i, :] = torch.mean(embedding[1: mask - 1], dim=0)
return output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.word_embeddings_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | text_splitter~chinese_text_splitter.py | from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
from configs.model_config import CHUNK_SIZE
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = CHUNK_SIZE, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text1(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text = text.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
ele2_id + 1:]
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
return ls
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | startup.py | from multiprocessing import Process, Queue
import multiprocessing as mp
import subprocess
import sys
import os
from pprint import pprint
# 设置numexpr最大线程数,默认为CPU核心数
try:
import numexpr
n_cores = numexpr.utils.detect_number_of_cores()
os.environ["NUMEXPR_MAX_THREADS"] = str(n_cores)
except:
pass
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import EMBEDDING_DEVICE, EMBEDDING_MODEL, llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, \
logger
from configs.server_config import (WEBUI_SERVER, API_SERVER, OPEN_CROSS_DOMAIN, FSCHAT_CONTROLLER, FSCHAT_MODEL_WORKERS,
FSCHAT_OPENAI_API, fschat_controller_address, fschat_model_worker_address,
fschat_openai_api_address, )
from server.utils import MakeFastAPIOffline, FastAPI
import argparse
from typing import Tuple, List
from configs import VERSION
def set_httpx_timeout(timeout=60.0):
import httpx
httpx._config.DEFAULT_TIMEOUT_CONFIG.connect = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.read = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.write = timeout
def create_controller_app(
dispatch_method: str,
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
return app
def create_model_worker_app(**kwargs) -> Tuple[argparse.ArgumentParser, FastAPI]:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id
import argparse
import threading
import fastchat.serve.model_worker
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
parser = argparse.ArgumentParser()
args = parser.parse_args([])
# default args. should be deleted after pr is merged by fastchat
args.gpus = None
args.max_gpu_memory = "20GiB"
args.load_8bit = False
args.cpu_offloading = None
args.gptq_ckpt = None
args.gptq_wbits = 16
args.gptq_groupsize = -1
args.gptq_act_order = False
args.awq_ckpt = None
args.awq_wbits = 16
args.awq_groupsize = -1
args.num_gpus = 1
args.model_names = []
args.conv_template = None
args.limit_worker_concurrency = 5
args.stream_interval = 2
args.no_register = False
for k, v in kwargs.items():
setattr(args, k, v)
if args.gpus:
if args.num_gpus is None:
args.num_gpus = len(args.gpus.split(','))
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
)
sys.modules["fastchat.serve.model_worker"].worker = worker
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({LLM_MODEL})"
return app
def create_openai_api_app(
controller_address: str,
api_keys: List = [],
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def _set_app_seq(app: FastAPI, q: Queue, run_seq: int):
if run_seq == 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
q.put(run_seq)
elif run_seq > 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
def run_controller(q: Queue, run_seq: int = 1):
import uvicorn
app = create_controller_app(FSCHAT_CONTROLLER.get("dispatch_method"))
_set_app_seq(app, q, run_seq)
host = FSCHAT_CONTROLLER["host"]
port = FSCHAT_CONTROLLER["port"]
uvicorn.run(app, host=host, port=port)
def run_model_worker(
model_name: str = LLM_MODEL,
controller_address: str = "",
q: Queue = None,
run_seq: int = 2,
):
import uvicorn
kwargs = FSCHAT_MODEL_WORKERS[model_name].copy()
host = kwargs.pop("host")
port = kwargs.pop("port")
model_path = llm_model_dict[model_name].get("local_model_path", "")
kwargs["model_path"] = model_path
kwargs["model_names"] = [model_name]
kwargs["controller_address"] = controller_address or fschat_controller_address()
kwargs["worker_address"] = fschat_model_worker_address()
app = create_model_worker_app(**kwargs)
_set_app_seq(app, q, run_seq)
uvicorn.run(app, host=host, port=port)
def run_openai_api(q: Queue, run_seq: int = 3):
import uvicorn
controller_addr = fschat_controller_address()
app = create_openai_api_app(controller_addr) # todo: not support keys yet.
_set_app_seq(app, q, run_seq)
host = FSCHAT_OPENAI_API["host"]
port = FSCHAT_OPENAI_API["port"]
uvicorn.run(app, host=host, port=port)
def run_api_server(q: Queue, run_seq: int = 4):
from server.api import create_app
import uvicorn
app = create_app()
_set_app_seq(app, q, run_seq)
host = API_SERVER["host"]
port = API_SERVER["port"]
uvicorn.run(app, host=host, port=port)
def run_webui(q: Queue, run_seq: int = 5):
host = WEBUI_SERVER["host"]
port = WEBUI_SERVER["port"]
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
p = subprocess.Popen(["streamlit", "run", "webui.py",
"--server.address", host,
"--server.port", str(port)])
p.wait()
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all-webui",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py and webui.py",
dest="all_webui",
)
parser.add_argument(
"--all-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py",
dest="all_api",
)
parser.add_argument(
"--llm-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers",
dest="llm_api",
)
parser.add_argument(
"-o",
"--openai-api",
action="store_true",
help="run fastchat's controller/openai_api servers",
dest="openai_api",
)
parser.add_argument(
"-m",
"--model-worker",
action="store_true",
help="run fastchat's model_worker server with specified model name. specify --model-name if not using default LLM_MODEL",
dest="model_worker",
)
parser.add_argument(
"-n",
"--model-name",
type=str,
default=LLM_MODEL,
help="specify model name for model worker.",
dest="model_name",
)
parser.add_argument(
"-c",
"--controller",
type=str,
help="specify controller address the worker is registered to. default is server_config.FSCHAT_CONTROLLER",
dest="controller_address",
)
parser.add_argument(
"--api",
action="store_true",
help="run api.py server",
dest="api",
)
parser.add_argument(
"-w",
"--webui",
action="store_true",
help="run webui.py server",
dest="webui",
)
args = parser.parse_args()
return args
def dump_server_info(after_start=False):
import platform
import langchain
import fastchat
from configs.server_config import api_address, webui_address
print("\n\n")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print(f"操作系统:{platform.platform()}.")
print(f"python版本:{sys.version}")
print(f"项目版本:{VERSION}")
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
print("\n")
print(f"当前LLM模型:{LLM_MODEL} @ {LLM_DEVICE}")
pprint(llm_model_dict[LLM_MODEL])
print(f"当前Embbedings模型: {EMBEDDING_MODEL} @ {EMBEDDING_DEVICE}")
if after_start:
print("\n")
print(f"服务端运行信息:")
if args.openai_api:
print(f" OpenAI API Server: {fschat_openai_api_address()}/v1")
print(" (请确认llm_model_dict中配置的api_base_url与上面地址一致。)")
if args.api:
print(f" Chatchat API Server: {api_address()}")
if args.webui:
print(f" Chatchat WEBUI Server: {webui_address()}")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print("\n\n")
if __name__ == "__main__":
import time
mp.set_start_method("spawn")
queue = Queue()
args = parse_args()
if args.all_webui:
args.openai_api = True
args.model_worker = True
args.api = True
args.webui = True
elif args.all_api:
args.openai_api = True
args.model_worker = True
args.api = True
args.webui = False
elif args.llm_api:
args.openai_api = True
args.model_worker = True
args.api = False
args.webui = False
dump_server_info()
logger.info(f"正在启动服务:")
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
processes = {}
if args.openai_api:
process = Process(
target=run_controller,
name=f"controller({os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["controller"] = process
process = Process(
target=run_openai_api,
name=f"openai_api({os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["openai_api"] = process
if args.model_worker:
process = Process(
target=run_model_worker,
name=f"model_worker({os.getpid()})",
args=(args.model_name, args.controller_address, queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["model_worker"] = process
if args.api:
process = Process(
target=run_api_server,
name=f"API Server{os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["api"] = process
if args.webui:
process = Process(
target=run_webui,
name=f"WEBUI Server{os.getpid()})",
args=(queue, len(processes) + 1),
daemon=True,
)
process.start()
processes["webui"] = process
try:
# log infors
while True:
no = queue.get()
if no == len(processes):
time.sleep(0.5)
dump_server_info(True)
break
else:
queue.put(no)
if model_worker_process := processes.get("model_worker"):
model_worker_process.join()
for name, process in processes.items():
if name != "model_worker":
process.join()
except:
if model_worker_process := processes.get("model_worker"):
model_worker_process.terminate()
for name, process in processes.items():
if name != "model_worker":
process.terminate()
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~knowledge_base~kb_service~default_kb_service.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from server.knowledge_base.kb_service.base import KBService
class DefaultKBService(KBService):
def do_create_kb(self):
pass
def do_drop_kb(self):
pass
def do_add_doc(self, docs: List[Document], embeddings: Embeddings):
pass
def do_clear_vs(self):
pass
def vs_type(self) -> str:
return "default"
def do_init(self):
pass
def do_search(self):
pass
def do_insert_multi_knowledge(self):
pass
def do_insert_one_knowledge(self):
pass
def do_delete_doc(self):
pass
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | chains~llmchain_with_history.py | from langchain.chat_models import ChatOpenAI
from configs.model_config import llm_model_dict, LLM_MODEL
from langchain import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
model = ChatOpenAI(
streaming=True,
verbose=True,
# callbacks=[callback],
openai_api_key=llm_model_dict[LLM_MODEL]["api_key"],
openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"],
model_name=LLM_MODEL
)
human_prompt = "{input}"
human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)
chat_prompt = ChatPromptTemplate.from_messages(
[("human", "我们来玩成语接龙,我先来,生龙活虎"),
("ai", "虎头虎脑"),
("human", "{input}")])
chain = LLMChain(prompt=chat_prompt, llm=model, verbose=True)
print(chain({"input": "恼羞成怒"})) | [
"human",
"[('human', '我们来玩成语接龙,我先来,生龙活虎'), ('ai', '虎头虎脑'), ('human', '{input}')]",
"{input}",
"我们来玩成语接龙,我先来,生龙活虎"
] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~knowledge_base~kb_doc_api.py | import os
import urllib
from fastapi import File, Form, Body, Query, UploadFile
from configs.model_config import (DEFAULT_VS_TYPE, EMBEDDING_MODEL, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD)
from server.utils import BaseResponse, ListResponse
from server.knowledge_base.utils import validate_kb_name, list_docs_from_folder, KnowledgeFile
from fastapi.responses import StreamingResponse, FileResponse
import json
from server.knowledge_base.kb_service.base import KBServiceFactory
from typing import List, Dict
from langchain.docstore.document import Document
class DocumentWithScore(Document):
score: float = None
def search_docs(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
) -> List[DocumentWithScore]:
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return []
docs = kb.search_docs(query, top_k, score_threshold)
data = [DocumentWithScore(**x[0].dict(), score=x[1]) for x in docs]
return data
async def list_docs(
knowledge_base_name: str
) -> ListResponse:
if not validate_kb_name(knowledge_base_name):
return ListResponse(code=403, msg="Don't attack me", data=[])
knowledge_base_name = urllib.parse.unquote(knowledge_base_name)
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return ListResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}", data=[])
else:
all_doc_names = kb.list_docs()
return ListResponse(data=all_doc_names)
async def upload_doc(file: UploadFile = File(..., description="上传文件"),
knowledge_base_name: str = Form(..., description="知识库名称", examples=["kb1"]),
override: bool = Form(False, description="覆盖已有文件"),
not_refresh_vs_cache: bool = Form(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
file_content = await file.read() # 读取上传文件的内容
try:
kb_file = KnowledgeFile(filename=file.filename,
knowledge_base_name=knowledge_base_name)
if (os.path.exists(kb_file.filepath)
and not override
and os.path.getsize(kb_file.filepath) == len(file_content)
):
# TODO: filesize 不同后的处理
file_status = f"文件 {kb_file.filename} 已存在。"
return BaseResponse(code=404, msg=file_status)
with open(kb_file.filepath, "wb") as f:
f.write(file_content)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件上传失败,报错信息为: {e}")
try:
kb.add_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件向量化失败,报错信息为: {e}")
return BaseResponse(code=200, msg=f"成功上传文件 {kb_file.filename}")
async def delete_doc(knowledge_base_name: str = Body(..., examples=["samples"]),
doc_name: str = Body(..., examples=["file_name.md"]),
delete_content: bool = Body(False),
not_refresh_vs_cache: bool = Body(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
knowledge_base_name = urllib.parse.unquote(knowledge_base_name)
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
if not kb.exist_doc(doc_name):
return BaseResponse(code=404, msg=f"未找到文件 {doc_name}")
try:
kb_file = KnowledgeFile(filename=doc_name,
knowledge_base_name=knowledge_base_name)
kb.delete_doc(kb_file, delete_content, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件删除失败,错误信息:{e}")
return BaseResponse(code=200, msg=f"{kb_file.filename} 文件删除成功")
async def update_doc(
knowledge_base_name: str = Body(..., examples=["samples"]),
file_name: str = Body(..., examples=["file_name"]),
not_refresh_vs_cache: bool = Body(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
'''
更新知识库文档
'''
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
try:
kb_file = KnowledgeFile(filename=file_name,
knowledge_base_name=knowledge_base_name)
if os.path.exists(kb_file.filepath):
kb.update_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
return BaseResponse(code=200, msg=f"成功更新文件 {kb_file.filename}")
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件更新失败,错误信息是:{e}")
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件更新失败")
async def download_doc(
knowledge_base_name: str = Query(..., examples=["samples"]),
file_name: str = Query(..., examples=["test.txt"]),
):
'''
下载知识库文档
'''
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
try:
kb_file = KnowledgeFile(filename=file_name,
knowledge_base_name=knowledge_base_name)
if os.path.exists(kb_file.filepath):
return FileResponse(
path=kb_file.filepath,
filename=kb_file.filename,
media_type="multipart/form-data")
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 读取文件失败,错误信息是:{e}")
return BaseResponse(code=500, msg=f"{kb_file.filename} 读取文件失败")
async def recreate_vector_store(
knowledge_base_name: str = Body(..., examples=["samples"]),
allow_empty_kb: bool = Body(True),
vs_type: str = Body(DEFAULT_VS_TYPE),
embed_model: str = Body(EMBEDDING_MODEL),
):
'''
recreate vector store from the content.
this is usefull when user can copy files to content folder directly instead of upload through network.
by default, get_service_by_name only return knowledge base in the info.db and having document files in it.
set allow_empty_kb to True make it applied on empty knowledge base which it not in the info.db or having no documents.
'''
async def output():
kb = KBServiceFactory.get_service(knowledge_base_name, vs_type, embed_model)
if not kb.exists() and not allow_empty_kb:
yield {"code": 404, "msg": f"未找到知识库 ‘{knowledge_base_name}’"}
else:
kb.create_kb()
kb.clear_vs()
docs = list_docs_from_folder(knowledge_base_name)
for i, doc in enumerate(docs):
try:
kb_file = KnowledgeFile(doc, knowledge_base_name)
yield json.dumps({
"code": 200,
"msg": f"({i + 1} / {len(docs)}): {doc}",
"total": len(docs),
"finished": i,
"doc": doc,
}, ensure_ascii=False)
if i == len(docs) - 1:
not_refresh_vs_cache = False
else:
not_refresh_vs_cache = True
kb.add_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
yield json.dumps({
"code": 500,
"msg": f"添加文件‘{doc}’到知识库‘{knowledge_base_name}’时出错:{e}。已跳过。",
})
return StreamingResponse(output(), media_type="text/event-stream")
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~knowledge_base~kb_service~faiss_kb_service.py | import os
import shutil
from configs.model_config import (
KB_ROOT_PATH,
CACHED_VS_NUM,
EMBEDDING_MODEL,
EMBEDDING_DEVICE,
SCORE_THRESHOLD
)
from server.knowledge_base.kb_service.base import KBService, SupportedVSType
from functools import lru_cache
from server.knowledge_base.utils import get_vs_path, load_embeddings, KnowledgeFile
from langchain.vectorstores import FAISS
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings,HuggingFaceBgeEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from typing import List
from langchain.docstore.document import Document
from server.utils import torch_gc
# make HuggingFaceEmbeddings hashable
def _embeddings_hash(self):
if isinstance(self, HuggingFaceEmbeddings):
return hash(self.model_name)
elif isinstance(self, HuggingFaceBgeEmbeddings):
return hash(self.model_name)
elif isinstance(self, OpenAIEmbeddings):
return hash(self.model)
HuggingFaceEmbeddings.__hash__ = _embeddings_hash
OpenAIEmbeddings.__hash__ = _embeddings_hash
HuggingFaceBgeEmbeddings.__hash__ = _embeddings_hash
_VECTOR_STORE_TICKS = {}
_VECTOR_STORE_TICKS = {}
@lru_cache(CACHED_VS_NUM)
def load_vector_store(
knowledge_base_name: str,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = EMBEDDING_DEVICE,
embeddings: Embeddings = None,
tick: int = 0, # tick will be changed by upload_doc etc. and make cache refreshed.
):
print(f"loading vector store in '{knowledge_base_name}'.")
vs_path = get_vs_path(knowledge_base_name)
if embeddings is None:
embeddings = load_embeddings(embed_model, embed_device)
if not os.path.exists(vs_path):
os.makedirs(vs_path)
if "index.faiss" in os.listdir(vs_path):
search_index = FAISS.load_local(vs_path, embeddings, normalize_L2=True)
else:
# create an empty vector store
doc = Document(page_content="init", metadata={})
search_index = FAISS.from_documents([doc], embeddings, normalize_L2=True)
ids = [k for k, v in search_index.docstore._dict.items()]
search_index.delete(ids)
search_index.save_local(vs_path)
if tick == 0: # vector store is loaded first time
_VECTOR_STORE_TICKS[knowledge_base_name] = 0
return search_index
def refresh_vs_cache(kb_name: str):
"""
make vector store cache refreshed when next loading
"""
_VECTOR_STORE_TICKS[kb_name] = _VECTOR_STORE_TICKS.get(kb_name, 0) + 1
print(f"知识库 {kb_name} 缓存刷新:{_VECTOR_STORE_TICKS[kb_name]}")
class FaissKBService(KBService):
vs_path: str
kb_path: str
def vs_type(self) -> str:
return SupportedVSType.FAISS
@staticmethod
def get_vs_path(knowledge_base_name: str):
return os.path.join(FaissKBService.get_kb_path(knowledge_base_name), "vector_store")
@staticmethod
def get_kb_path(knowledge_base_name: str):
return os.path.join(KB_ROOT_PATH, knowledge_base_name)
def do_init(self):
self.kb_path = FaissKBService.get_kb_path(self.kb_name)
self.vs_path = FaissKBService.get_vs_path(self.kb_name)
def do_create_kb(self):
if not os.path.exists(self.vs_path):
os.makedirs(self.vs_path)
load_vector_store(self.kb_name)
def do_drop_kb(self):
self.clear_vs()
shutil.rmtree(self.kb_path)
def do_search(self,
query: str,
top_k: int,
score_threshold: float = SCORE_THRESHOLD,
embeddings: Embeddings = None,
) -> List[Document]:
search_index = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name))
docs = search_index.similarity_search_with_score(query, k=top_k, score_threshold=score_threshold)
return docs
def do_add_doc(self,
docs: List[Document],
embeddings: Embeddings,
**kwargs,
):
vector_store = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name, 0))
vector_store.add_documents(docs)
torch_gc()
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
refresh_vs_cache(self.kb_name)
def do_delete_doc(self,
kb_file: KnowledgeFile,
**kwargs):
embeddings = self._load_embeddings()
vector_store = load_vector_store(self.kb_name,
embeddings=embeddings,
tick=_VECTOR_STORE_TICKS.get(self.kb_name, 0))
ids = [k for k, v in vector_store.docstore._dict.items() if v.metadata["source"] == kb_file.filepath]
if len(ids) == 0:
return None
vector_store.delete(ids)
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
refresh_vs_cache(self.kb_name)
return True
def do_clear_vs(self):
shutil.rmtree(self.vs_path)
os.makedirs(self.vs_path)
refresh_vs_cache(self.kb_name)
def exist_doc(self, file_name: str):
if super().exist_doc(file_name):
return "in_db"
content_path = os.path.join(self.kb_path, "content")
if os.path.isfile(os.path.join(content_path, file_name)):
return "in_folder"
else:
return False
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~knowledge_base~kb_service~milvus_kb_service.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import Milvus
from configs.model_config import SCORE_THRESHOLD, kbs_config
from server.knowledge_base.kb_service.base import KBService, SupportedVSType
from server.knowledge_base.utils import KnowledgeFile
class MilvusKBService(KBService):
milvus: Milvus
@staticmethod
def get_collection(milvus_name):
from pymilvus import Collection
return Collection(milvus_name)
@staticmethod
def search(milvus_name, content, limit=3):
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10},
}
c = MilvusKBService.get_collection(milvus_name)
return c.search(content, "embeddings", search_params, limit=limit, output_fields=["content"])
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.MILVUS
def _load_milvus(self, embeddings: Embeddings = None):
if embeddings is None:
embeddings = self._load_embeddings()
self.milvus = Milvus(embedding_function=embeddings,
collection_name=self.kb_name, connection_args=kbs_config.get("milvus"))
def do_init(self):
self._load_milvus()
def do_drop_kb(self):
self.milvus.col.drop()
def do_search(self, query: str, top_k: int,score_threshold: float, embeddings: Embeddings):
# todo: support score threshold
self._load_milvus(embeddings=embeddings)
return self.milvus.similarity_search_with_score(query, top_k)
def add_doc(self, kb_file: KnowledgeFile, **kwargs):
"""
向知识库添加文件
"""
docs = kb_file.file2text()
self.milvus.add_documents(docs)
from server.db.repository.knowledge_file_repository import add_doc_to_db
status = add_doc_to_db(kb_file)
return status
def do_add_doc(self, docs: List[Document], embeddings: Embeddings, **kwargs):
pass
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
filepath = kb_file.filepath.replace('\\', '\\\\')
delete_list = [item.get("pk") for item in
self.milvus.col.query(expr=f'source == "{filepath}"', output_fields=["pk"])]
self.milvus.col.delete(expr=f'pk in {delete_list}')
def do_clear_vs(self):
if self.milvus.col:
self.milvus.col.drop()
if __name__ == '__main__':
# 测试建表使用
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
milvusService = MilvusKBService("test")
milvusService.add_doc(KnowledgeFile("README.md", "test"))
milvusService.delete_doc(KnowledgeFile("README.md", "test"))
milvusService.do_drop_kb()
print(milvusService.search_docs("测试"))
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~llm_api.py | from multiprocessing import Process, Queue
import multiprocessing as mp
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, logger
from server.utils import MakeFastAPIOffline
host_ip = "0.0.0.0"
controller_port = 20001
model_worker_port = 20002
openai_api_port = 8888
base_url = "http://127.0.0.1:{}"
def set_httpx_timeout(timeout=60.0):
import httpx
httpx._config.DEFAULT_TIMEOUT_CONFIG.connect = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.read = timeout
httpx._config.DEFAULT_TIMEOUT_CONFIG.write = timeout
def create_controller_app(
dispatch_method="shortest_queue",
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
return app
def create_model_worker_app(
worker_address=base_url.format(model_worker_port),
controller_address=base_url.format(controller_port),
model_path=llm_model_dict[LLM_MODEL].get("local_model_path"),
device=LLM_DEVICE,
gpus=None,
max_gpu_memory="20GiB",
load_8bit=False,
cpu_offloading=None,
gptq_ckpt=None,
gptq_wbits=16,
gptq_groupsize=-1,
gptq_act_order=False,
awq_ckpt=None,
awq_wbits=16,
awq_groupsize=-1,
model_names=[LLM_MODEL],
num_gpus=1, # not in fastchat
conv_template=None,
limit_worker_concurrency=5,
stream_interval=2,
no_register=False,
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id
import argparse
import threading
import fastchat.serve.model_worker
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.model_path = model_path
args.model_names = model_names
args.device = device
args.load_8bit = load_8bit
args.gptq_ckpt = gptq_ckpt
args.gptq_wbits = gptq_wbits
args.gptq_groupsize = gptq_groupsize
args.gptq_act_order = gptq_act_order
args.awq_ckpt = awq_ckpt
args.awq_wbits = awq_wbits
args.awq_groupsize = awq_groupsize
args.gpus = gpus
args.num_gpus = num_gpus
args.max_gpu_memory = max_gpu_memory
args.cpu_offloading = cpu_offloading
args.worker_address = worker_address
args.controller_address = controller_address
args.conv_template = conv_template
args.limit_worker_concurrency = limit_worker_concurrency
args.stream_interval = stream_interval
args.no_register = no_register
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if gpus and num_gpus is None:
num_gpus = len(gpus.split(','))
args.num_gpus = num_gpus
gptq_config = GptqConfig(
ckpt=gptq_ckpt or model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
# torch.multiprocessing.set_start_method('spawn')
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
)
sys.modules["fastchat.serve.model_worker"].worker = worker
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({LLM_MODEL})"
return app
def create_openai_api_app(
controller_address=base_url.format(controller_port),
api_keys=[],
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def run_controller(q):
import uvicorn
app = create_controller_app()
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
q.put(1)
uvicorn.run(app, host=host_ip, port=controller_port)
def run_model_worker(q, *args, **kwargs):
import uvicorn
app = create_model_worker_app(*args, **kwargs)
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != 1:
q.put(no)
else:
break
q.put(2)
uvicorn.run(app, host=host_ip, port=model_worker_port)
def run_openai_api(q):
import uvicorn
app = create_openai_api_app()
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != 2:
q.put(no)
else:
break
q.put(3)
uvicorn.run(app, host=host_ip, port=openai_api_port)
if __name__ == "__main__":
mp.set_start_method("spawn")
queue = Queue()
logger.info(llm_model_dict[LLM_MODEL])
model_path = llm_model_dict[LLM_MODEL]["local_model_path"]
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
if not model_path:
logger.error("local_model_path 不能为空")
else:
controller_process = Process(
target=run_controller,
name=f"controller({os.getpid()})",
args=(queue,),
daemon=True,
)
controller_process.start()
model_worker_process = Process(
target=run_model_worker,
name=f"model_worker({os.getpid()})",
args=(queue,),
# kwargs={"load_8bit": True},
daemon=True,
)
model_worker_process.start()
openai_api_process = Process(
target=run_openai_api,
name=f"openai_api({os.getpid()})",
args=(queue,),
daemon=True,
)
openai_api_process.start()
try:
model_worker_process.join()
controller_process.join()
openai_api_process.join()
except KeyboardInterrupt:
model_worker_process.terminate()
controller_process.terminate()
openai_api_process.terminate()
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~knowledge_base~kb_service~pg_kb_service.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import PGVector
from sqlalchemy import text
from configs.model_config import EMBEDDING_DEVICE, kbs_config
from server.knowledge_base.kb_service.base import SupportedVSType, KBService
from server.knowledge_base.utils import load_embeddings, KnowledgeFile
class PGKBService(KBService):
pg_vector: PGVector
def _load_pg_vector(self, embedding_device: str = EMBEDDING_DEVICE, embeddings: Embeddings = None):
_embeddings = embeddings
if _embeddings is None:
_embeddings = load_embeddings(self.embed_model, embedding_device)
self.pg_vector = PGVector(embedding_function=_embeddings,
collection_name=self.kb_name,
connection_string=kbs_config.get("pg").get("connection_uri"))
def do_init(self):
self._load_pg_vector()
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.PG
def do_drop_kb(self):
with self.pg_vector.connect() as connect:
connect.execute(text(f'''
-- 删除 langchain_pg_embedding 表中关联到 langchain_pg_collection 表中 的记录
DELETE FROM langchain_pg_embedding
WHERE collection_id IN (
SELECT uuid FROM langchain_pg_collection WHERE name = '{self.kb_name}'
);
-- 删除 langchain_pg_collection 表中 记录
DELETE FROM langchain_pg_collection WHERE name = '{self.kb_name}';
'''))
connect.commit()
def do_search(self, query: str, top_k: int, score_threshold: float, embeddings: Embeddings):
# todo: support score threshold
self._load_pg_vector(embeddings=embeddings)
return self.pg_vector.similarity_search_with_score(query, top_k)
def add_doc(self, kb_file: KnowledgeFile, **kwargs):
"""
向知识库添加文件
"""
docs = kb_file.file2text()
self.pg_vector.add_documents(docs)
from server.db.repository.knowledge_file_repository import add_doc_to_db
status = add_doc_to_db(kb_file)
return status
def do_add_doc(self, docs: List[Document], embeddings: Embeddings, **kwargs):
pass
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
with self.pg_vector.connect() as connect:
filepath = kb_file.filepath.replace('\\', '\\\\')
connect.execute(
text(
''' DELETE FROM langchain_pg_embedding WHERE cmetadata::jsonb @> '{"source": "filepath"}'::jsonb;'''.replace(
"filepath", filepath)))
connect.commit()
def do_clear_vs(self):
self.pg_vector.delete_collection()
if __name__ == '__main__':
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
pGKBService = PGKBService("test")
pGKBService.create_kb()
pGKBService.add_doc(KnowledgeFile("README.md", "test"))
pGKBService.delete_doc(KnowledgeFile("README.md", "test"))
pGKBService.drop_kb()
print(pGKBService.search_docs("测试"))
| [] |
2024-01-10 | wangxuqi/langchain-ChatGLM | server~chat~knowledge_base_chat.py | from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import (llm_model_dict, LLM_MODEL, PROMPT_TEMPLATE,
VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD)
from server.chat.utils import wrap_done
from server.utils import BaseResponse
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, List, Optional
import asyncio
from langchain.prompts.chat import ChatPromptTemplate
from server.chat.utils import History
from server.knowledge_base.kb_service.base import KBService, KBServiceFactory
import json
import os
from urllib.parse import urlencode
from server.knowledge_base.kb_doc_api import search_docs
def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
history: List[History] = Body([],
description="历史对话",
examples=[[
{"role": "user",
"content": "我们来玩成语接龙,我先来,生龙活虎"},
{"role": "assistant",
"content": "虎头虎脑"}]]
),
stream: bool = Body(False, description="流式输出"),
local_doc_url: bool = Body(False, description="知识文件返回本地路径(true)或URL(false)"),
request: Request = None,
):
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
history = [History.from_data(h) for h in history]
async def knowledge_base_chat_iterator(query: str,
kb: KBService,
top_k: int,
history: Optional[List[History]],
) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
openai_api_key=llm_model_dict[LLM_MODEL]["api_key"],
openai_api_base=llm_model_dict[LLM_MODEL]["api_base_url"],
model_name=LLM_MODEL,
openai_proxy=llm_model_dict[LLM_MODEL].get("openai_proxy")
)
docs = search_docs(query, knowledge_base_name, top_k, score_threshold)
context = "\n".join([doc.page_content for doc in docs])
input_msg = History(role="user", content=PROMPT_TEMPLATE).to_msg_template(False)
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_template() for i in history] + [input_msg])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = []
for inum, doc in enumerate(docs):
filename = os.path.split(doc.metadata["source"])[-1]
if local_doc_url:
url = "file://" + doc.metadata["source"]
else:
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name":filename})
url = f"{request.base_url}knowledge_base/download_doc?" + parameters
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
source_documents.append(text)
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token,
"docs": source_documents},
ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": answer,
"docs": source_documents},
ensure_ascii=False)
await task
return StreamingResponse(knowledge_base_chat_iterator(query, kb, top_k, history),
media_type="text/event-stream")
| [
"虎头虎脑",
"我们来玩成语接龙,我先来,生龙活虎"
] |
2024-01-10 | sbucarion/merger-arb | scraper~scraper.py | import re
import time
import requests
from bs4 import BeautifulSoup
import openai
import pdfkit
import sqlite3
import os
from datetime import datetime
def get_all_filings(soup):
#Go through all filings with html text and accerssion
filings = []
for row in soup.findAll("tr"):
if "Accession Number" in row.text:
filings.append(row)
return filings
def get_acc_no(text):
# Extract the accession number using regex
match = re.search(r"Accession Number: (\d{10}-\d{2}-\d{6})", text)
if match:
accession_number = match.group(1)
return (accession_number)
def get_filing_metadata(filing):
for links in filing.findAll('a'):
href = links['href']
if ".htm" in href:
#"click" on link (just request that link)
x = requests.get(r"http://sec.gov" + href, headers=headers)
soup = BeautifulSoup(x.text, "html.parser")
cik = re.search(r"CIK:\s+(\d+)", soup.text).group(1)
for _ in soup.findAll('a'):
if "ix?doc=" in _['href']:
partial_link = _['href'].split("/ix?doc=")[-1]
filing_link = "http://sec.gov" + partial_link
return filing_link, cik
def get_filing_time(filing):
time_data = filing.findAll('td')[3]
date = time_data.contents[0]
time = time_data.contents[2]
datetime_obj = datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')
unix_time = int(datetime_obj.timestamp())
return unix_time
def get_filing(filing_link):
raw_filing = BeautifulSoup(requests.get(filing_link, headers=headers).text, "html.parser").find("body").text
filing = clean_filing(raw_filing)
return filing
def clean_filing(raw_filing):
filing = raw_filing.replace("\n", " ").replace("\xa0", " ").strip()
filing = " ".join(filing.split())
filing = "UNITED STATES SECURITIES AND EXCHANGE COMMISSION" + \
filing.split("UNITED STATES SECURITIES AND EXCHANGE COMMISSION")[-1]
return filing.lower()
def is_filing_merger(filing_text):
#Need to make more solid determination
if "merger" not in filing_text:
return False
if "item 1.01".lower() in filing_text and "item 7.01".lower() in filing_text:
return True
return False
headers = {
"User-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36"
}
base_url = r"https://www.sec.gov/cgi-bin/browse-edgar?company=&CIK=&type=8-K&owner=include&count=100&action=getcurrent"
current_dir = os.getcwd() + "\\"
path_wkhtmltopdf = current_dir.split("scraper")[0] + "wkhtmltopdf\\bin\\wkhtmltopdf.exe"
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
DB_PATH = current_dir.split("scraper")[0] + "database\\filing_data.sqlite3"
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
while True:
latest_8k_filings = requests.get(base_url, headers=headers).text
soup = BeautifulSoup(latest_8k_filings, "html.parser")
#Iterate through all filings on page
filings = get_all_filings(soup)
x = time.time()
cursor.execute("SELECT accession_no, unix_number FROM seen_filings")
seen = cursor.fetchall()
all_accession_numbers = {row[0] for row in seen}
max_unix_number = max({row[1] for row in seen})
for filing in filings:
filing_acc_no = get_acc_no(filing.findAll('td')[2].text)
if filing_acc_no in all_accession_numbers:
time.sleep(2)
continue
filing_link, company_cik = get_filing_metadata(filing)
filing_time = get_filing_time(filing)
if max_unix_number > filing_time:
time.sleep(2)
continue
filing_text = get_filing(filing_link)
if is_filing_merger(filing_text.lower()):
print(True, filing_link)
try:
#Store metadata to db
cursor.execute("INSERT INTO data (accession_no,cik, unix_number) VALUES (?, ?, ?)",
(filing_acc_no, company_cik, filing_time))
# Commit the changes to the database
conn.commit()
except:
time.sleep(2)
continue
#Save as pdf to 8k folder
filings_path = current_dir.split("scraper")[0] + "8ks\\"
filing_path = filings_path + filing_acc_no + ".pdf"
pdfkit.from_url(filing_link, filing_path, configuration=config)
try:
cursor.execute("INSERT INTO seen_filings (accession_no, unix_number) VALUES (?, ?)",
(filing_acc_no, filing_time))
conn.commit()
except:
time.sleep(2)
continue
print("Scanned: ", filing_link)
print("_______________________________")
print("\n")
cursor.execute("SELECT accession_no, unix_number FROM seen_filings")
seen = cursor.fetchall()
all_accession_numbers = {row[0] for row in seen}
max_unix_number = max({row[1] for row in seen})
time.sleep(2) | [] |
2024-01-10 | entelecheia/thematos | src~thematos~models~base.py | import logging
import sys
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import tomotopy as tp
from hyfi import HyFI
from hyfi.task import BatchTask
from thematos.datasets import Corpus
from .config import LdaConfig, TrainConfig, TrainSummaryConfig, WordcloudConfig
from .prior import WordPrior
from .types import CoherenceMetrics, ModelSummary
logger = logging.getLogger(__name__)
class TopicModel(BatchTask):
_config_group_ = "/model"
_config_name_ = "topic"
task_name: str = "topic"
batch_name: str = "model"
model_type: str = "BASE"
wordprior: WordPrior = WordPrior()
corpus: Corpus = Corpus()
model_args: LdaConfig = LdaConfig()
train_args: TrainConfig = TrainConfig()
train_summary_args: TrainSummaryConfig = TrainSummaryConfig()
wc_args: WordcloudConfig = WordcloudConfig()
coherence_metric_list: List[str] = ["u_mass", "c_uci", "c_npmi", "c_v"]
eval_coherence: bool = True
set_wordprior: bool = False
autosave: bool = True
save_full: bool = True
verbose: bool = False
# internal attributes
_model_: Optional[Any] = None
_timestamp_: Optional[str] = None
_coherence_metrics_: Optional[CoherenceMetrics] = None
_model_summary_: Optional[ModelSummary] = None
_ll_per_words_: List[Tuple[int, float]] = []
_doc_ids_: List[Any] = None
_doc_topic_dists_df_: Optional[pd.DataFrame] = None
_topic_term_dists_df_: Optional[pd.DataFrame] = None
@property
def model_id(self) -> str:
model_type = self.model_type.upper()
margs = [model_type, self.batch_id, f"k({self.model_args.k})"]
return "_".join(margs)
@property
def model(self):
if self._model_ is None:
raise ValueError("Model has not been trained yet.")
return self._model_
@property
def coherence_metrics_dict(self) -> Dict:
return self._coherence_metrics_.model_dump() if self._coherence_metrics_ else {}
@property
def model_summary_dict(self) -> Dict:
return self._model_summary_.model_dump() if self._model_summary_ else {}
@property
def train_args_dict(self) -> Dict:
return self.train_args.kwargs if self.train_args else {}
@property
def model_args_dict(self) -> Dict:
return self.model_args.kwargs if self.model_args else {}
@property
def timestamp(self) -> str:
if self._timestamp_ is None:
raise ValueError("Model has not been trained yet.")
return self._timestamp_
@property
def tp_corpus(self) -> tp.utils.Corpus:
return self.corpus.corpus
@property
def doc_ids(self) -> List[Any]:
if not self._doc_ids_:
self._doc_ids_ = self.corpus.doc_ids
return self._doc_ids_
@property
def ll_per_words(self) -> Optional[pd.DataFrame]:
if not self._ll_per_words_:
logger.warning("No log-likelihood per word found.")
return None
return pd.DataFrame(self._ll_per_words_, columns=["iter", "ll_per_word"])
@property
def doc_topic_dists(self) -> np.ndarray:
dist_ = np.stack([doc.get_topic_dist() for doc in self.model.docs])
dist_ /= dist_.sum(axis=1, keepdims=True)
return dist_
@property
def topic_term_dists(self) -> np.ndarray:
return np.stack(
[self.model.get_topic_word_dist(k) for k in range(self.model.k)]
)
@property
def doc_num_words(self) -> np.ndarray:
return np.array([len(doc.words) for doc in self.model.docs])
@property
def used_vocab(self) -> List[str]:
return list(self.model.used_vocabs)
@property
def term_frequency(self) -> np.ndarray:
return self.model.used_vocab_freq
@property
def num_docs(self) -> int:
return len(self.model.docs)
@property
def num_words(self) -> int:
return self.model.num_words
@property
def num_total_vocabs(self) -> int:
return len(self.model.vocabs) if self.model.vocabs else None
@property
def num_used_vocab(self) -> int:
return len(self.model.used_vocabs)
@property
def num_topics(self) -> int:
"""Number of topics in the model
It is the same as the number of columns in the document-topic distribution.
"""
return self.model.k if self.model else len(self.doc_topic_dists[0])
@property
def topic_term_dists_df(self) -> pd.DataFrame:
return pd.DataFrame(self.topic_term_dists, columns=self.used_vocab)
def get_doc_topic_dists_df(
self,
doc_topic_dists: Optional[np.ndarray],
doc_ids: Optional[List[Any]],
) -> pd.DataFrame:
if len(doc_topic_dists) != len(doc_ids):
raise ValueError(
f"Number of inferred topics ({len(doc_topic_dists)}) does not match with number of documents ({len(doc_ids)})"
)
columns = [f"topic{i}" for i in range(self.num_topics)]
dists_df = pd.DataFrame(doc_topic_dists, columns=columns)
doc_id_df = pd.DataFrame(doc_ids, columns=["id"])
return pd.concat([doc_id_df, dists_df], axis=1)
@property
def model_file(self) -> str:
f_ = f"{self.model_id}.mdl"
self.model_dir.mkdir(parents=True, exist_ok=True)
return str(self.model_dir / f_)
@property
def ll_per_words_file(self) -> str:
f_ = f"{self.model_id}-ll_per_word.csv"
return str(self.output_dir / f_)
@property
def ll_per_words_fig_file(self) -> str:
f_ = f"{self.model_id}-ll_per_word.png"
return str(self.output_dir / f_)
@property
def doc_topic_dists_file(self) -> str:
f_ = f"{self.model_id}-doc_topic_dists.parquet"
return str(self.output_dir / f_)
@property
def topic_term_dists_file(self) -> str:
f_ = f"{self.model_id}-topic_term_dists.parquet"
return str(self.output_dir / f_)
@property
def used_vocab_file(self) -> str:
f_ = f"{self.model_id}-used_vocab.txt"
return str(self.output_dir / f_)
@property
def train_summary_file(self) -> str:
f_ = f"{self.model_id}-summary.txt"
return str(self.output_dir / f_)
@property
def batch_model_summary_file(self) -> str:
f_ = f"{self.batch_name}-summary.jsonl"
return str(self.output_dir / f_)
@property
def ldavis_file(self) -> str:
f_ = f"{self.model_id}-ldavis.html"
return str(self.output_dir / f_)
@property
def topic_wordcloud_file_format(self) -> str:
format_ = self.model_id + "-wordcloud_{topic_id:03d}.png"
return str(self.output_dir / "wordclouds" / format_)
def update_model_args(self, **kwargs) -> None:
self.model_args = self.model_args.model_copy(update=kwargs)
def _set_wordprior(self) -> None:
if self.wordprior is None:
logger.info("No word prior set.")
return
if self.verbose:
logger.info("Set word prior with %s.", self.wordprior)
for tno, words in self.wordprior.items():
if self.verbose:
logger.info("Set words %s to topic #%s as prior.", words, tno)
for word in words:
self.model.set_word_prior(
word,
[
self.wordprior.max_prior_weight
if i == int(tno)
else self.wordprior.min_prior_weight
for i in range(self.num_topics)
],
)
def train(self) -> None:
# reset model
self._model_ = None
if self.set_wordprior:
self._set_wordprior()
self._timestamp_ = datetime.now().strftime("%Y%m%d_%H%M%S")
# train model
self._train(self.model)
# save model
if self.eval_coherence:
self.eval_coherence_value()
if self.autosave:
self.save()
def _train(self, model: Any) -> None:
raise NotImplementedError
def eval_coherence_value(
self,
):
mdl = self.model
coh_metrics = {}
for metric in self.coherence_metric_list:
coh = tp.coherence.Coherence(mdl, coherence=metric)
average_coherence = coh.get_score()
coh_metrics[metric] = average_coherence
coherence_per_topic = [coh.get_score(topic_id=k) for k in range(mdl.k)]
if self.verbose:
logger.info("==== Coherence : %s ====", metric)
logger.info("Average: %s", average_coherence)
logger.info("Per Topic: %s", coherence_per_topic)
self._coherence_metrics_ = CoherenceMetrics(**coh_metrics)
def save(self) -> None:
self.save_model()
self.save_train_summary()
self.save_ll_per_words()
self.plot_ll_per_words()
self.save_dists_data()
self.save_topic_top_words()
self.generate_wordclouds()
self.save_ldavis()
self.save_model_summary()
self.save_config()
def save_model(self) -> None:
self.model.save(self.model_file, full=self.save_full)
logger.info("Model saved to %s", self.model_file)
def save_ll_per_words(self) -> None:
HyFI.save_dataframes(
self.ll_per_words, self.ll_per_words_file, verbose=self.verbose
)
def plot_ll_per_words(self) -> None:
df_ll = self.ll_per_words
ax = df_ll.plot(x="iter", y="ll_per_word", kind="line")
ax.set_xlabel("Iterations")
ax.set_ylabel("Log-likelihood per word")
ax.invert_yaxis()
ax.get_figure().savefig(self.ll_per_words_fig_file, dpi=300, transparent=False)
logger.info(
"Log-likelihood per word plot saved to %s", self.ll_per_words_fig_file
)
def save_train_summary(self) -> None:
coh_values = self.coherence_metrics_dict
original_stdout = sys.stdout
Path(self.train_summary_file).parent.mkdir(parents=True, exist_ok=True)
with open(self.train_summary_file, "w") as f:
sys.stdout = f # Change the standard output to the file.
self.model.summary(**self.train_summary_args.kwargs)
if coh_values:
print("<Topic Coherence Scores>")
for cm, cv in coh_values.items():
print(f"| {cm}: {cv}")
sys.stdout = original_stdout # Reset the standard output.
def save_model_summary(self) -> None:
self._model_summary_ = ModelSummary(
timestamp=self.timestamp,
model_id=self.model_id,
model_type=self.model_type,
num_docs=self.num_docs,
num_words=self.num_words,
num_total_vocabs=self.num_total_vocabs,
num_used_vocabs=self.num_used_vocab,
seed=self.seed,
model_args=self.model_args_dict,
train_args=self.train_args_dict,
perplexity=self.model.perplexity,
coherence=self.coherence_metrics_dict,
)
if not self.model_summary_dict:
logger.warning("Model summary is not available.")
HyFI.append_to_jsonl(
self.model_summary_dict,
self.batch_model_summary_file,
)
logger.info("Model summary saved to %s", self.batch_model_summary_file)
def save_dists_data(self):
doc_topic_dists_df = self.get_doc_topic_dists_df(
self.doc_topic_dists, self.doc_ids
)
if self.verbose:
logger.info("==== Document-Topic Distributions ====")
logger.info(doc_topic_dists_df.tail())
HyFI.save_dataframes(
doc_topic_dists_df,
self.doc_topic_dists_file,
verbose=self.verbose,
)
if self.verbose:
logger.info("==== Topic-Word Distributions ====")
logger.info(self.topic_term_dists_df.tail())
HyFI.save_dataframes(
self.topic_term_dists_df,
self.topic_term_dists_file,
verbose=self.verbose,
)
HyFI.save_wordlist(
self.used_vocab,
self.used_vocab_file,
verbose=self.verbose,
)
def load(
self,
batch_name: Optional[str] = None,
batch_num: Optional[int] = None,
filepath: Optional[Union[str, Path]] = None,
**config_kwargs,
):
super().load_config(
batch_name=batch_name,
batch_num=batch_num,
filepath=filepath,
**config_kwargs,
)
self._load_model()
self._load_ll_per_words()
self._load_dists_data()
def _load_ll_per_words(self):
ll_df = HyFI.load_dataframes(self.ll_per_words_file, verbose=self.verbose)
self._ll_per_words_ = [(ll.iter, ll.ll_per_word) for ll in ll_df.itertuples()]
def _load_dists_data(self):
self._doc_topic_dists_df_ = HyFI.load_dataframes(
self.doc_topic_dists_file, verbose=self.verbose
)
self._doc_ids_ = self._doc_topic_dists_df_["id"].values.tolist()
self._topic_term_dists_df_ = HyFI.load_dataframes(
self.topic_term_dists_file, verbose=self.verbose
)
def save_ldavis(self):
try:
import pyLDAvis # type: ignore[reportMissingImports]
except ImportError:
logger.warning(
"pyLDAvis is not installed. Please install it to save LDAvis."
)
return
prepared_data = pyLDAvis.prepare(
topic_term_dists=self.topic_term_dists,
doc_topic_dists=self.doc_topic_dists,
doc_lengths=self.doc_num_words,
vocab=self.used_vocab,
term_frequency=self.term_frequency,
start_index=0,
sort_topics=False,
)
pyLDAvis.save_html(prepared_data, self.ldavis_file)
logger.info("LDAvis saved to %s", self.ldavis_file)
def get_topic_words(
self,
topic_id: int,
top_n: int = 10,
) -> Dict[str, float]:
return dict(self.model.get_topic_words(topic_id, top_n=top_n))
@property
def topic_top_words_file(self) -> str:
f_ = f"{self.model_id}-topic_top_words.txt"
return str(self.output_dir / f_)
@property
def topic_top_words_dists_file(self) -> str:
f_ = f"{self.model_id}-topic_top_words_dists.csv"
return str(self.output_dir / f_)
def save_topic_top_words(self, top_n: int = 50):
# set of top words
topic_top_words = []
# tuple of (topic_id, word, freq) for each topic
topic_top_words_dists = []
for topic_id in range(self.num_topics):
topic_words = self.get_topic_words(topic_id, top_n=top_n)
topic_top_words.extend(topic_words.keys())
topic_words_freq_tuple = [
(topic_id, w, topic_words[w]) for w in topic_words
]
topic_top_words_dists.extend(topic_words_freq_tuple)
HyFI.save_wordlist(
list(set(topic_top_words)),
self.topic_top_words_file,
verbose=self.verbose,
)
HyFI.save_dataframes(
pd.DataFrame(topic_top_words_dists, columns=["topic_id", "word", "freq"]),
self.topic_top_words_dists_file,
verbose=self.verbose,
)
def generate_wordclouds(
self,
):
wc_args = self.wc_args
wc = wc_args.wc
images = []
for topic_id in range(self.num_topics):
output_file = self.topic_wordcloud_file_format.format(topic_id=topic_id)
img = wc.generate_from_frequencies(
self.get_topic_words(topic_id, top_n=wc_args.top_n),
output_file=output_file,
verbose=self.verbose,
)
images.append(img)
if wc_args.make_collage:
titles = wc_args.titles or [f"Topic {i}" for i in range(self.num_topics)]
logger.info("Making wordcloud collage with titles: %s", titles)
output_dir = self.output_dir / "wordcloud_collage"
output_file_format = self.model_id + "_wordcloud_{page_num:02d}.png"
HyFI.make_subplot_pages_from_images(
images,
num_images_per_page=wc_args.num_images_per_page,
num_cols=wc_args.num_cols,
num_rows=wc_args.num_rows,
output_dir=output_dir,
output_file_format=output_file_format,
titles=titles,
title_fontsize=wc_args.title_fontsize,
title_color=wc_args.title_color,
figsize=wc_args.figsize,
width_multiple=wc_args.width_multiple,
height_multiple=wc_args.height_multiple,
dpi=wc_args.dpi,
verbose=self.verbose,
)
@property
def inferred_doc_topic_dists_filename(self) -> str:
return f"{self.model_id}-inferred_doc_topic_dists.parquet"
def infer(
self,
corpus: Corpus,
output_file: Optional[Union[str, Path]] = None,
iterations: int = 100,
tolerance: float = -1,
num_workers: int = 0,
together: bool = False,
):
inferred_corpus, ll = self.model.infer(
corpus.corpus,
iter=iterations,
tolerance=tolerance,
workers=num_workers,
together=together,
)
logger.info("Number of documents inferred: %d", len(inferred_corpus))
output_file = output_file or (
self.output_dir / "inferred_topics" / self.inferred_doc_topic_dists_filename
)
doc_ids = corpus.doc_ids
doc_topic_dists = np.stack([doc.get_topic_dist() for doc in inferred_corpus])
doc_topic_dists /= doc_topic_dists.sum(axis=1, keepdims=True)
doc_topic_dists_df = self.get_doc_topic_dists_df(doc_topic_dists, doc_ids)
ll_df = pd.DataFrame({"log_likelihood": ll})
doc_topic_dists_df = pd.concat([doc_topic_dists_df, ll_df], axis=1)
if self.verbose:
logger.info("Inferred topics:\n%s", doc_topic_dists_df.head())
HyFI.save_dataframes(
doc_topic_dists_df,
output_file,
verbose=self.verbose,
)
logger.info("Inferred topics saved to %s", output_file)
| [] |
2024-01-10 | Arputikos/RawDataToXLSParserGPT | input.py | import openai
#INPUT
openai.api_key = 'YOUR OPENAI API KEY'
file_path = 'smartphones.xlsx'
def get_command_prompt(description, json):
return f"Mając opis smartfona ze strony internetowej, oraz wzór odpowiedzi w formacie json, odpowiedz w formacie json (według zadanego wzoru) uzupełniając wartości zmiennych faktycznymi danymi z opisu smartfona. Odpowiedzi mają być krótkie - dane/parametry, nie całym zdaniem. Pisz po polsku. Jeśli nie można stwierdzić czegoś gdyż brakuje danych w opisie, wpisz '???'. Opis smartfona: {description}\n\n Template json do odpowiedzi: {json}. Odpowiedz TYLKO dokumentem json, bez wyjaśnień."
description = """
Xiaomi Poco X5 Pro
Xiaomi Poco X5 Pro
MORE PICTURES
Released 2023, February 07
181g, 7.9mm thickness
Android 12, MIUI 14 for POCO
128GB/256GB storage, no card slot
27%
3,582,319 HITS
346
BECOME A FAN
6.67"
1080x2400 pixels
108MP
2160p
6/8GB RAM
Snapdragon 778G 5G
5000mAh
Li-Po
REVIEW
PRICES
PICTURES
COMPARE
OPINIONS
NETWORK Technology
GSM / HSPA / LTE / 5G
2G bands GSM 850 / 900 / 1800 / 1900 - SIM 1 & SIM 2
3G bands HSDPA 800 / 850 / 900 / 1700(AWS) / 1900 / 2100 - International
HSDPA 850 / 900 / 1700(AWS) / 1900 / 2100 - India
4G bands 1, 2, 3, 4, 5, 7, 8, 20, 28, 38, 40, 41, 66 - International
1, 2, 3, 5, 8, 40, 41 - India
5G bands 1, 3, 5, 7, 8, 20, 28, 38, 40, 41, 77, 78 SA/NSA/Sub6 - International
1, 3, 5, 8, 28, 40, 78 SA/NSA - India
Speed HSPA, LTE-A (CA), 5G
LAUNCH Announced 2023, February 06
Status Available. Released 2023, February 07
BODY Dimensions 162.9 x 76 x 7.9 mm (6.41 x 2.99 x 0.31 in)
Weight 181 g (6.38 oz)
Build Glass front (Gorilla Glass 5), plastic back, plastic frame
SIM Dual SIM (Nano-SIM, dual stand-by)
IP53, dust and splash resistant
DISPLAY Type AMOLED, 1B colors, 120Hz, Dolby Vision, HDR10+, 500 nits (typ), 900 nits (HBM)
Size 6.67 inches, 107.4 cm2 (~86.8% screen-to-body ratio)
Resolution 1080 x 2400 pixels, 20:9 ratio (~395 ppi density)
Protection Corning Gorilla Glass 5
PLATFORM OS Android 12, MIUI 14 for POCO
Chipset Qualcomm SM7325 Snapdragon 778G 5G (6 nm)
CPU Octa-core (1x2.4 GHz Cortex-A78 & 3x2.2 GHz Cortex-A78 & 4x1.9 GHz Cortex-A55)
GPU Adreno 642L
MEMORY Card slot No
Internal 128GB 6GB RAM, 256GB 8GB RAM
UFS 2.2
MAIN CAMERA Triple 108 MP, f/1.9, (wide), 1/1.52", 0.7µm, PDAF
8 MP, f/2.2, 120˚ (ultrawide), 1/4", 1.12µm
2 MP, f/2.4, (macro)
Features LED flash, HDR, panorama
Video 4K@30fps, 1080p@30/60/120fps, gyro-EIS
SELFIE CAMERA Single 16 MP, f/2.4, (wide), 1/3.06" 1.0µm
Features HDR, panorama
Video 1080p@30/60fps
SOUND Loudspeaker Yes, with stereo speakers
3.5mm jack Yes
24-bit/192kHz audio
COMMS WLAN Wi-Fi 802.11 a/b/g/n/ac/6, dual-band, Wi-Fi Direct
Wi-Fi 802.11 a/b/g/n/ac, dual-band - India
Bluetooth 5.2 (Intl), 5.1 (India), A2DP, LE
Positioning GPS, GLONASS, BDS, GALILEO
NFC Yes (market/region dependent)
Infrared port Yes
Radio No
USB USB Type-C 2.0, OTG
FEATURES Sensors Fingerprint (side-mounted), accelerometer, gyro, proximity, compass
BATTERY Type Li-Po 5000 mAh, non-removable
Charging 67W wired, PD3.0, QC4, 100% in 45 min (advertised)
5W reverse wired
MISC Colors Astral Black, Horizon Blue, Poco Yellow
Models 22101320G, 22101320I
Price € 247.99 / $ 245.00 / £ 279.99 / ₹ 19,299
TESTS Performance AnTuTu: 531398 (v9)
GeekBench: 2930 (v5.1)
GFXBench: 28fps (ES 3.1 onscreen)
Display Contrast ratio: Infinite (nominal)
Camera Photo / Video
Loudspeaker -24.9 LUFS (Very good)
Battery (old)
Endurance rating 113h
""" | [] |
2024-01-10 | 6-Sense-AI/KDT-SoundToShow | mysite~kdt~wav2lip~stt.py | from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
import pysrt
import datetime
import openai
def add_subtitles(video_path, srt_path, output_path):
video = VideoFileClip(video_path)
subs = pysrt.open(srt_path, encoding='utf-8')
subtitles = []
for sub in subs:
start_time = sub.start.to_time()
end_time = sub.end.to_time()
text = sub.text
subtitle_clip = TextClip(text, font='Arial-Bold', fontsize=24, color='white')
subtitle_clip = subtitle_clip.set_position(('center', 'bottom')).set_duration(
(datetime.timedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second) -
datetime.timedelta(hours=start_time.hour, minutes=start_time.minute, seconds=start_time.second))
.total_seconds()
)
subtitle_clip = subtitle_clip.set_start(
(datetime.timedelta(hours=start_time.hour, minutes=start_time.minute, seconds=start_time.second))
.total_seconds()
)
subtitles.append(subtitle_clip)
final_clip = CompositeVideoClip([video.set_audio(None)] + subtitles)
final_clip.write_videofile(output_path, codec='libx264', audio_codec='aac')
video_path = "video.mp4"
srt_path = "subtitles.srt"
output_path = "output.mp4"
add_subtitles(video_path, srt_path, output_path) | [] |
2024-01-10 | featbit/featbit | llm~remove-feature-flags~chat-completion-cli.py | import openai
import argparse
def main(apikey: str, code: str, ffKey: str, variation: str) -> None:
openai.api_key = "" + apikey
prompt = (
"```csharp "
+ code
+ "``` In the given code, eliminate the feature flags tied to the key `"
+ ffKey
+ "`, while preserving the code associated with the `"
+ variation
+ "` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions."
)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0301",
temperature=0,
messages=[
{
"role": "system",
"content": "Remove useless and expired feature flags in code.",
},
{
"role": "user",
"content": "Hello Assistant, as a developer, I need your assistance in cleaning up the code by removing unnecessary and expired feature flags. Please also help me maintain the relevant business logic that is directly or indirectly wrapped within these feature flags.",
},
{
"role": "user",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;(new Pmpt()).P(); public class Pmpt{public void P(){var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);var u=FbUser.Builder("anonymous").Build();var f1=c.BoolVariation("f1k",u,defaultValue:false);if(f1==true){var b=c.StringVariation("f2",u,defaultValue:"on");if(b=="on"){F.R1();F.R2();}}else if(f1==false)F.RN();else F.RN();}}``` In the given code, eliminate the feature flags tied to the key `f1k`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;(new Pmpt()).P(); public class Pmpt{public void P(){var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);var u=FbUser.Builder("anonymous").Build();var b=c.StringVariation("f2",u,defaultValue:"on");if(b=="on"){F.R1();F.R2();}}}```',
},
{
"role": "user",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);(new Pmpt()).P(c); public class Pmpt{public void P(FbClient c){var u=FbUser.Builder("anonymous").Build();var f1=c.BoolVariation("f1",u,defaultValue:false);if(f1==true){F.R1();var b=c.StringVariation("f2k",u,defaultValue:"on");if(b=="on"){F.RR2();}}else F.R1();var f2=c.StringVariation("f2k",u,defaultValue:"on");if(f2=="t")F.RN3();else if(f2=="on")F.R2();else F.R1();}}``` In the given code, eliminate the feature flags tied to the key `f2k`, while preserving the code associated with the `on` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);(new Pmpt()).P(c); public class Pmpt{public void P(FbClient c){var u=FbUser.Builder("anonymous").Build();var f1=c.BoolVariation("f1",u,defaultValue:false);if(f1==true){F.R1();F.RR2();}else F.R1();F.R2();}}```',
},
{
"role": "user",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;using System.Net;var option=new FbOptionsBuilder().Offline(true).Build();var client=new FbClient(option);(new Pmpt()).P(client); public class Pmpt{public bool P(FbClient c){var u=FbUser.Builder("anonymous").Build();var f1=c.BoolVariation("f-f-1",u,defaultValue:false);if(f1==true){F.R1();}else F.RNN1();if(f1){F.R1();}else F.RNN2();if(f1==false||!f1){return F.RNN();}return F.R1();}}``` In the given code, eliminate the feature flags tied to the key `f-f-1`, while preserving the code associated with the `false` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": "```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;using System.Net;var option=new FbOptionsBuilder().Offline(true).Build();var client=new FbClient(option);(new Pmpt()).P(client); public class Pmpt{public bool P(FbClient c){F.RNN1();F.RNN2();return F.RNN();}}```",
},
{
"role": "user",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using Microsoft.AspNetCore.Mvc;namespace T4Prompt.Controllers; [ApiController][Route("[controller]")]public class HelloController:ControllerBase{private readonly FbClient _client;public HelloController(FbClient client){_client=client;}[HttpGet]public string HelloWorld(){var u=FbUser.Builder("bob").Name("bob").Build();var variation=_client.StringVariation("language",u,"en-us");return variation switch{"zh-cn"=>"你好世界!","en-us"=>"Hello World!",_=>string.Empty};}}``` In the given code, eliminate the feature flags tied to the key `language`, while preserving the code associated with the `zh-cn` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": '```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using Microsoft.AspNetCore.Mvc;namespace T4Prompt.Controllers; [ApiController][Route("[controller]")]public class HelloController:ControllerBase{private readonly FbClient _client;public HelloController(FbClient client){_client=client;}[HttpGet]public string HelloWorld(){return"你好世界!";}}```',
},
{
"role": "user",
"content": '```csharp public class UProm{public string UP(FbClient c,FbUser user){string total="0",num1="3",num2="12";var ifC=c.BoolVariation("ifC",user,defaultValue:false);if(ifC==true){return total+num1+num2;}return total;}}``` In the given code, eliminate the feature flags tied to the key `ifC`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": '```csharp public class UProm{public string UP(FbClient c,FbUser user){string total="0",num1="3",num2="12";return total+num1+num2;}}```',
},
{
"role": "user",
"content": '```csharp public class Pmpt{public bool P(FbClient c){var u=FbUser.Builder("anonymous").Build();var f1=c.BoolVariation("f-f",u,defaultValue:false);if(f1==true){return F.RNN1();}return F.RNN();}}``` In the given code, eliminate the feature flags tied to the key `f-f`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.',
},
{
"role": "assistant",
"content": "```csharp public class Pmpt{public bool P(FbClient c){return F.RNN1();}}```",
},
{
"role": "user",
"content": prompt,
},
],
)
# print(response)
# remove ```csharp in` response.choices[0].message.content and ``` at the end of the string
print(response.choices[0].message.content.replace("```csharp", "")[:-3])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("--apikey", required=True, help="OpenAI key.")
parser.add_argument("--ffKey", required=True, help="Feature Flag Key.")
parser.add_argument(
"--variation", required=True, help="Feature Flag valid return variation."
)
parser.add_argument("--codePath", required=True, help="Code source path.")
args = parser.parse_args()
# read file and give it to a string variable
with open(args.codePath, "r") as file:
codeSource = file.read()
main(args.apikey, codeSource, args.ffKey, args.variation)
| [
"```csharp PLACEHOLDER``` In the given code, eliminate the feature flags tied to the key `PLACEHOLDER`, while preserving the code associated with the `PLACEHOLDER` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"zh-cn",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using Microsoft.AspNetCore.Mvc;namespace T4Prompt.Controllers; [ApiController][Route(\"[controller]\")]public class HelloController:ControllerBase{private readonly FbClient _client;public HelloController(FbClient client){_client=client;}[HttpGet]public string HelloWorld(){var u=FbUser.Builder(\"bob\").Name(\"bob\").Build();var variation=_client.StringVariation(\"language\",u,\"en-us\");return variation switch{\"zh-cn\"=>\"你好世界!\",\"en-us\"=>\"Hello World!\",_=>string.Empty};}}``` In the given code, eliminate the feature flags tied to the key `language`, while preserving the code associated with the `zh-cn` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);(new Pmpt()).P(c); public class Pmpt{public void P(FbClient c){var u=FbUser.Builder(\"anonymous\").Build();var f1=c.BoolVariation(\"f1\",u,defaultValue:false);if(f1==true){F.R1();var b=c.StringVariation(\"f2k\",u,defaultValue:\"on\");if(b==\"on\"){F.RR2();}}else F.R1();var f2=c.StringVariation(\"f2k\",u,defaultValue:\"on\");if(f2==\"t\")F.RN3();else if(f2==\"on\")F.R2();else F.R1();}}``` In the given code, eliminate the feature flags tied to the key `f2k`, while preserving the code associated with the `on` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"Remove useless and expired feature flags in code.",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);(new Pmpt()).P(c); public class Pmpt{public void P(FbClient c){var u=FbUser.Builder(\"anonymous\").Build();var f1=c.BoolVariation(\"f1\",u,defaultValue:false);if(f1==true){F.R1();F.RR2();}else F.R1();F.R2();}}```",
"```csharp public class UProm{public string UP(FbClient c,FbUser user){string total=\"0\",num1=\"3\",num2=\"12\";var ifC=c.BoolVariation(\"ifC\",user,defaultValue:false);if(ifC==true){return total+num1+num2;}return total;}}``` In the given code, eliminate the feature flags tied to the key `ifC`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"```csharp public class Pmpt{public bool P(FbClient c){var u=FbUser.Builder(\"anonymous\").Build();var f1=c.BoolVariation(\"f-f\",u,defaultValue:false);if(f1==true){return F.RNN1();}return F.RNN();}}``` In the given code, eliminate the feature flags tied to the key `f-f`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"language",
"Hello Assistant, as a developer, I need your assistance in cleaning up the code by removing unnecessary and expired feature flags. Please also help me maintain the relevant business logic that is directly or indirectly wrapped within these feature flags.",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;(new Pmpt()).P(); public class Pmpt{public void P(){var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);var u=FbUser.Builder(\"anonymous\").Build();var b=c.StringVariation(\"f2\",u,defaultValue:\"on\");if(b==\"on\"){F.R1();F.R2();}}}```",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;using System.Net;var option=new FbOptionsBuilder().Offline(true).Build();var client=new FbClient(option);(new Pmpt()).P(client); public class Pmpt{public bool P(FbClient c){var u=FbUser.Builder(\"anonymous\").Build();var f1=c.BoolVariation(\"f-f-1\",u,defaultValue:false);if(f1==true){F.R1();}else F.RNN1();if(f1){F.R1();}else F.RNN2();if(f1==false||!f1){return F.RNN();}return F.R1();}}``` In the given code, eliminate the feature flags tied to the key `f-f-1`, while preserving the code associated with the `false` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"en-us",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;(new Pmpt()).P(); public class Pmpt{public void P(){var o=new FbOptionsBuilder().Offline(true).Build();var c=new FbClient(o);var u=FbUser.Builder(\"anonymous\").Build();var f1=c.BoolVariation(\"f1k\",u,defaultValue:false);if(f1==true){var b=c.StringVariation(\"f2\",u,defaultValue:\"on\");if(b==\"on\"){F.R1();F.R2();}}else if(f1==false)F.RN();else F.RN();}}``` In the given code, eliminate the feature flags tied to the key `f1k`, while preserving the code associated with the `true` return value. Also, maintain any other code not related to these feature flags. Ignore the defaultValue. Provide just the code, excluding any descriptions.",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using Microsoft.AspNetCore.Mvc;namespace T4Prompt.Controllers; [ApiController][Route(\"[controller]\")]public class HelloController:ControllerBase{private readonly FbClient _client;public HelloController(FbClient client){_client=client;}[HttpGet]public string HelloWorld(){return\"你好世界!\";}}```",
"```csharp public class Pmpt{public bool P(FbClient c){return F.RNN1();}}```",
"anonymous",
"```csharp public class UProm{public string UP(FbClient c,FbUser user){string total=\"0\",num1=\"3\",num2=\"12\";return total+num1+num2;}}```",
"Hello World!",
"```csharp using FeatBit.Sdk.Server;using FeatBit.Sdk.Server.Model;using FeatBit.Sdk.Server.Options;using System.Net;var option=new FbOptionsBuilder().Offline(true).Build();var client=new FbClient(option);(new Pmpt()).P(client); public class Pmpt{public bool P(FbClient c){F.RNN1();F.RNN2();return F.RNN();}}```",
"on"
] |
2024-01-10 | ThijsRay/TU-Delft-CTF-2022 | ml~neural-net-language~src~challenge.py | import openai
openai.organization = "org-zrVx9t5O4VSux7sC0aQcHWWD"
openai.api_key = "[REDACTED]"
model = "text-curie-001"
user_input = input("Translate (max 100 chars.): ")
sanitized_input = user_input[:100].replace("\n", " ")
prompt = f"""The flag is TUDCTF[REDACTED].
We never write the flag in translations.
Translate the following text to Dutch: apples are tasty
Translation: appels zijn lekker
Translate the following text to Dutch: {sanitized_input}
Translation: """
res = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=100,
temperature=0
)
print("Translation:", res["choices"][0]["text"].strip())
| [
"The flag is TUDCTF[REDACTED].\nWe never write the flag in translations.\n\nTranslate the following text to Dutch: apples are tasty\nTranslation: appels zijn lekker\n\nTranslate the following text to Dutch: PLACEHOLDER\nTranslation: "
] |
2024-01-10 | Social-merit/ChatGPTAndLangChain | main1.py | # Import necessary modules and classes
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from dotenv import load_dotenv
import argparse
# Load environment variables from .env file
load_dotenv()
# Initialize argument parser and define command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--task", default="return a list of numbers")
parser.add_argument("--language", default="python")
args = parser.parse_args()
# Initialize OpenAI instance
llm = OpenAI()
# Create a prompt template for generating code
code_prompt = PromptTemplate(
input_variables=["task", "language"],
template="Write a very short {language} function that will {task}."
)
# Create a prompt template for generating test code
test_prompt = PromptTemplate(
input_variables=["language", "code"],
template="Write a test for the following {language} code:\n{code}"
)
# Create a language model chain for generating code
code_chain = LLMChain(
llm=llm,
prompt=code_prompt,
output_key="code"
)
# Create a language model chain for generating test code
test_chain = LLMChain(
llm=llm,
prompt=test_prompt,
output_key="test"
)
# Create a sequential chain that runs both the code and test chains
chain = SequentialChain(
chains=[code_chain, test_chain],
input_variables=["task", "language"],
output_variables=["test", "code"]
)
# Execute the sequential chain and store the result
result = chain({
"language": args.language,
"task": args.task
})
# Print the generated code and test code
print(">>>>>> GENERATED CODE:")
print(result["code"])
print(">>>>>> GENERATED TEST:")
print(result["test"])
| [
"language",
"Write a very short {language} function that will {task}.",
"Write a test for the following {language} code:\n{code}"
] |
2024-01-10 | frankwxu/digital-forensics-lab | AI4Forensics~IP_Identifier_Fine_Tuning~code~check_fine_tune_job_status.py | import openai
openai.api_key = "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"
fine_tune_job_id = "ft-3DA3kW5k0BaHoWqwjJUB6Xwf"
# Retrieve fine-tuning job details
fine_tune_job = openai.FineTune.retrieve(fine_tune_job_id)
# Check status and other information
print("Fine-tuned Model: " + fine_tune_job.fine_tuned_model)
print("Status: " + fine_tune_job.status)
print("Job Results: ========")
print(fine_tune_job.result_files)
| [] |
2024-01-10 | frankwxu/digital-forensics-lab | AI4Forensics~IP_Identifier_Fine_Tuning~Calculate_Confusion_Matrix~code~train_model.py | import os
import openai
import project_keys
import subprocess
openai.api_key = project_keys.key
# Path to training data
training_dataset = "training400_dataset.jsonl"
# Upload file to openai
upload = openai.File.create(file=open(training_dataset, "rb"), purpose="fine-tune")
validation_upload = openai.File.create(
file=open(training_dataset, "rb"), purpose="fine-tune"
)
# Get file id
file_id = upload["id"]
validation_id = validation_upload["id"]
fine_tune = openai.FineTune.create(training_file=file_id, model="ada")
# Fine tuning job id
job_id = fine_tune["id"]
# print(f"The Job id is {job_id}")
command = f"openai api fine_tunes.follow -i {job_id}"
completed_process = subprocess.run(
command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
print(completed_process.stdout)
| [] |
2024-01-10 | frankwxu/digital-forensics-lab | AI4Forensics~IP_Identifier_Fine_Tuning~code~upload_dataset_train.py | import os
import openai
openai.api_key = "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"
# Path to training data
training_dataset = "./openAI/dataset_train.jsonl"
# Upload file to openai
upload = openai.File.create(file=open(training_dataset, "rb"), purpose="fine-tune")
# Get file id
print(upload["id"])
| [] |
2024-01-10 | frankwxu/digital-forensics-lab | AI4Forensics~IP_Identifier_Fine_Tuning~code~predict_ip_single_line.py | import openai
openai.api_key = "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"
FINE_TUNED_MODEL = "ada:ft-university-of-baltimore-2023-08-14-12-39-06"
# take one positive and one negative sample from dataset_test.jsonl
prompt_neg = "\t<string>3608.80.24.1.8</string>;608.80.24.1 ->"
prompt_pos = "\t<string>2.20.31.4</string>;2.20.31.4 ->"
def get_completion(prompt, max_token):
return openai.Completion.create(
model=FINE_TUNED_MODEL, max_tokens=max_token, prompt=prompt
).choices[0]["text"]
# 1 means 1 token
print(f"The truth:'Negative', the predition: '{get_completion(prompt_neg, 1)}'")
print(f"The truth:'Positive', the predition: '{get_completion(prompt_pos, 1)}'")
| [
"\t<string>3608.80.24.1.8</string>;608.80.24.1 ->",
"\t<string>2.20.31.4</string>;2.20.31.4 ->"
] |
2024-01-10 | frankwxu/digital-forensics-lab | AI4Forensics~IP_Identifier_Fine_Tuning~code~view_fine_tuning_steps.py | import openai
openai.api_key = "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"
# Returns the contents of the specified file
content = openai.File.download("file-ngHRotibSy82Mr55IjsPP2uK")
# Save content to a local file
file_name = "./openAI/fine_tune_steps.txt" # Change to the desired file name
with open(file_name, "wb") as f:
f.write(content)
print(f"File '{file_name}' saved successfully.")
| [] |
2024-01-10 | thoddnn/open-datagen | opendatagen~template.py | from pydantic import BaseModel, validator, ValidationError, ConfigDict
from typing import Optional, List, Dict, Union
from enum import Enum
import os
import json
from opendatagen.utils import load_file
from opendatagen.model import OpenAIChatModel, OpenAIInstructModel, OpenAIEmbeddingModel, HuggingFaceModel, Model, EmbeddingModel
from urllib.parse import quote_plus
import requests
import trafilatura
from PyPDF2 import PdfReader
import pandas as pd
from datasets import load_dataset, Dataset
from opendatagen.utils import get_first_n_tokens, num_tokens_from_string
import random
import uuid
import re
import pandas as pd
class RAGHuggingFace(BaseModel):
dataset_path:str
dataset_name:Optional[str] = None
data_dir:Optional[str] = None
column_name:str
streaming:bool = True
min_tokens:Optional[int] = 0
max_tokens:Optional[int] = None
subset_size:Optional[int] = 10000
class Config:
extra = "forbid"
def get_random_value_from_dataset(self):
param = {}
if self.dataset_path:
param["path"] = self.dataset_path
if self.data_dir:
param["data_dir"] = self.data_dir
if self.dataset_name:
param["name"] = self.dataset_name
param["streaming"] = self.streaming
dst = load_dataset(**param)
subset = [sample[self.column_name] for _, sample in zip(range(self.subset_size), dst["train"])]
max_attempts = 50
count = 0
while count < max_attempts:
index = random.randint(0, len(subset) - 1)
text = subset[index]
num_tokens = num_tokens_from_string(text, encoding_name="cl100k_base")
if num_tokens >= self.min_tokens:
if self.max_tokens:
text = subset[index]
result = get_first_n_tokens(n=self.max_tokens, text=text, encoding_name="cl100k_base")
return result
else:
result = subset[index]
return result
count = count + 1
class RAGLocalPath(BaseModel):
localPath:Optional[str] = None
directoryPath:Optional[str] = None
content:Optional[str] = None
randomize:Optional[bool] = False
sample_size: Optional[float] = 0.1
class Config:
extra = "forbid"
def get_random_csv_chunk(self, df: pd.DataFrame):
# Randomly sample a fraction of the dataframe rows
return df.sample(frac=self.sample_size)
def get_random_text_chunk(self, text):
sentences = re.split(r'(?<=[.!?])\s+', text)
sample_size = max(1, int(len(sentences) * self.sample_size))
selected_sentences = random.sample(sentences, sample_size)
result = ' '.join(selected_sentences)
return result
def get_content_from_file(self):
file_content = ''
if self.localPath.endswith('.csv'):
df = pd.read_csv(self.localPath)
df = df.astype(str)
if self.randomize:
df = self.get_random_csv_chunk(df)
file_content = df.to_string(header=True, index=False, max_rows=None)
elif self.localPath.endswith('.txt'):
with open(self.localPath, 'r') as file:
file_content = file.read()
if self.randomize:
file_content = self.get_random_text_chunk(file_content)
elif self.localPath.endswith('.pdf'):
reader = PdfReader(self.localPath)
text = ''
for page in reader.pages:
text += page.extract_text() + '\n'
if self.randomize:
file_content = self.get_random_text_chunk(text)
else:
file_content = text
else:
raise ValueError("Unsupported file format")
self.content = file_content
return file_content
def get_content_from_directory(self):
"""
Iterates over files in the directory, reads their content,
and concatenates it into a single string.
"""
concatenated_content = ''
for filename in os.listdir(self.directoryPath):
filepath = os.path.join(self.directoryPath, filename)
if filepath.endswith(('.csv', '.txt', '.pdf')):
self.localPath = filepath # Temporarily update the localPath
file_content = self.get_content_from_file()
concatenated_content += file_content + '\n'
self.content = concatenated_content # Store concatenated content
return concatenated_content
class RAGInternet(BaseModel):
keywords:List[str]
return_chunks: Optional[bool] = False
minimum_number_of_words_by_article: Optional[int] = 500
maximum_number_of_words_by_article: Optional[int] = 50000
content: Optional[str] = None
def word_counter(self, input_string):
# Split the string into words based on whitespace
words = input_string.split()
# Count the number of words
number_of_words = len(words)
return number_of_words
def get_google_search_result(self, keyword:dict, maximum_number_of_link:int = None):
encoded_keyword = quote_plus(keyword)
url = f"https://api.serply.io/v1/search/q={encoded_keyword}"
headers = {
"Content-Type": "application/json",
"X-User-Agent": "",
"X-Proxy-Location": "",
"X-Api-Key": os.environ.get("SERPLY_API_KEY"),
"X-Proxy-Location": "US"
}
response = requests.request("GET", url, headers=headers)
response_json = json.loads(response.text)["results"]
result = []
for element in response_json:
link = element['link']
result.append(link)
if maximum_number_of_link:
return result[:maximum_number_of_link]
return result
def get_content_from_url(self, link:str):
downloaded = trafilatura.fetch_url(link)
content = trafilatura.extract(downloaded)
return content
def extract_content_from_internet(self):
print(f"Browsing...")
for keyword in self.keywords:
result = ""
urls = self.get_google_search_result(keyword)
for url in urls:
content = self.get_content_from_url(url)
if content and self.word_counter(content) > self.minimum_number_of_words_by_article and self.word_counter(content) < self.maximum_number_of_words_by_article:
print(url)
result = result + "\n" + content
print("Finish browsing...")
self.content = result
return result
class Validator(BaseModel):
function_name:str
additional_parameters:Optional[List[str]] = None
from_notebook:bool = False
retry_number:Optional[int] = 3
class Variations(BaseModel):
id:str
parent_id:Optional[str] = None
value:str
confidence_score:Optional[Dict] = None
error_message:str = None
class Config:
extra = "forbid" # This will raise an error for extra fields
class Variable(BaseModel):
name: str
models:Optional[List[Model]] = None
generation_number: int = 1
source_internet: Optional[RAGInternet] = None
source_localfile: Optional[RAGLocalPath] = None
source_localdirectory: Optional[RAGLocalPath] = None
source_huggingface:Optional[RAGHuggingFace] = None
get_value_from_huggingface:Optional[RAGHuggingFace] = None
get_value_from_localfile:Optional[RAGLocalPath] = None
note: Optional[List[str]] = None
rag_content: Optional[str] = None
validator:Optional[Validator] = None
values:Optional[Dict[str, Variations]] = {}
model_config = ConfigDict(
protected_namespaces=('protect_me_', 'also_protect_'),
extra = "forbid"
)
def load_internet_source(self):
if self.source_internet is not None:
self.rag_content = self.source_internet.extract_content_from_internet()
def load_local_file(self):
if self.source_localfile is not None and self.source_localfile.localPath is not None:
self.rag_content = self.source_localfile.get_content_from_file()
def load_local_directory(self):
if self.source_localfile is not None and self.source_localfile.directoryPath is not None:
self.rag_content = self.source_localfile.get_content_from_directory()
def load_huggingface_dataset(self):
if self.source_huggingface is not None:
self.rag_content = self.source_huggingface.get_random_value_from_dataset()
def load_value(self):
if self.get_value_from_huggingface:
self.value = self.get_value_from_huggingface.get_random_value_from_dataset(max_token=self.max_tokens)
class Decontomination(BaseModel):
embedding_model:Optional[EmbeddingModel] = None
threshold: Optional[float] = 0.99
exclude_string:Optional[List[str]] = None
class Template(BaseModel):
description: str
prompt: str
completion: str
prompt_variation_number: Optional[int] = 1
variables: Optional[Dict[str, Variable]] = None
source_internet: Optional[RAGInternet] = None
source_localfile: Optional[RAGLocalPath] = None
rag_content: Optional[str] = None
value:Optional[List[str]] = None
decontamination: Optional[Decontomination] = None
class Config:
extra = "forbid" # This will raise an error for extra fields
def load_internet_source(self):
if self.source_internet is not None:
self.rag_content = self.source_internet.extract_content_from_internet()
def load_local_file(self):
if self.source_localfile is not None and self.source_localfile.localPath is not None:
self.rag_content = self.source_localfile.get_content_from_file()
def load_local_directory(self):
if self.source_localfile is not None and self.source_localfile.directoryPath is not None:
self.rag_content = self.source_localfile.get_content_from_directory()
class TemplateName(Enum):
PRODUCT_REVIEW = "product-review"
CHUNK = "chunk"
CHUNK2 = "chunk2"
HALLUCINATION = "hallucination"
class TemplateManager:
def __init__(self, template_file_path:str):
self.template_file_path = self.get_template_file_path(template_file_path)
self.templates = self.load_templates()
def get_template_file_path(self, filename: str) -> str:
base_path = os.getcwd()
if os.path.isabs(filename):
return filename
else:
return os.path.join(base_path, filename)
def load_templates(self) -> Dict[str, Template]:
with open(self.template_file_path, 'r') as file:
raw_data = json.load(file)
templates = {}
for key, data in raw_data.items():
template_name = key
template = Template(**data)
templates[template_name] = template
return templates
def get_template(self, template_name: str) -> Template:
template = self.templates.get(template_name)
if template:
template.load_internet_source()
template.load_local_file()
template.load_local_directory()
return template
def create_variable_from_name(model:OpenAIChatModel, variable_name:str) -> Variable:
prompt = load_file(path="files/variable_generation.txt")
prompt = prompt.format(variable_name=variable_name)
completion = model.ask_instruct_gpt(prompt=prompt, temperature=0, max_tokens=30)
return Variable(**completion)
| [
"files/variable_generation.txt",
"{}"
] |
2024-01-10 | thoddnn/open-datagen | opendatagen~anonymizer.py | import re
import spacy
from opendatagen.model import OpenAIChatModel, ModelName
from opendatagen.utils import load_file
class Anonymizer:
NER_PLACEHOLDER = {
"PERSON": "{person}",
"ORG": "{organization}",
"GPE": "{location}",
"DATE": "{date}",
"TIME": "{time}",
"NORP": "{group}",
"FAC": "{facility}",
"LOC": "{location}",
"PRODUCT": "{product}",
"EVENT": "{event}",
"WORK_OF_ART": "{artwork}",
"LAW": "{law}",
"LANGUAGE": "{language}",
"MONEY": "{money}",
"PERCENT": "{percentage}",
"ORDINAL": "{ordinal}",
"CARDINAL": "{number}",
# Add more if needed
}
REGEX_PATTERN = {
"{phone_number}": r"\+?\d{1,4}?[-.\s]?\(?\d{1,3}?\)?[-.\s]?\d{1,4}[-.\s]?\d{1,4}[-.\s]?\d{1,9}",
"{email}": r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b",
"{credit_card_pattern}": r"\d{4}[-\s]?\d{4}[-\s]?\d{4}[-\s]?\d{4}",
"{address_pattern}": r"\d{1,5}\s\w+(\s\w+)*,\s\w+,\s\w+(\s\w+)*",
"{date_pattern}": r"(\d{4}[-/]\d{1,2}[-/]\d{1,2})|(\d{1,2}[-/]\d{1,2}[-/]\d{4})",
"{time_pattern}": r"(?:[01]\d|2[0-3]):[0-5]\d",
"{ipv4_pattern}": r"\b(?:\d{1,3}\.){3}\d{1,3}\b",
"{url_pattern}": r"https?://(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
"{ssn_pattern}": r"\d{3}-\d{2}-\d{4}",
"{license_plate_pattern}": r"[A-Z0-9]{2,}-[A-Z0-9]{2,}",
"{zip_code_pattern}": r"\d{5}(-\d{4})?",
"{vin_pattern}": r"[A-HJ-NPR-Z0-9]{17}",
"{iban_pattern}": r"[A-Z]{2}\d{2}[A-Z0-9]{1,30}",
"{driver_license_pattern}": r"[A-Z]{1,2}-\d{4,9}"
}
def __init__(self, completion_model:OpenAIChatModel):
self.nlp = spacy.load("en_core_web_sm")
self.ner_prompt = load_file("files/ner.txt")
self.completion_model = completion_model
def regex_anonymization(self, text: str) -> str:
for replacement, pattern in self.REGEX_PATTERN.items():
text = re.sub(pattern, replacement, text)
return text
def ner_anonymization(self, text: str) -> str:
doc = self.nlp(text)
for entity in doc.ents:
placeholder = self.NER_PLACEHOLDER.get(entity.label_)
if placeholder:
text = text.replace(entity.text, placeholder)
return text
def llm_anonymization(self, text: str) -> str:
completion = self.completion_model.ask(
system_prompt=self.ner_prompt,
user_prompt=text,
max_tokens=126,
temperature=0
)
return completion
def anonymize(self, text: str) -> str:
text = self.regex_anonymization(text)
text = self.ner_anonymization(text)
return self.llm_anonymization(text)
| [] |
2024-01-10 | thoddnn/open-datagen | opendatagen~data_generator.py |
from dotenv import load_dotenv
import numpy as np
import time
import random
import re
import json
import requests
from urllib.parse import quote
from re import findall
from typing import Dict, List, Union
from opendatagen.utils import dict_to_string, load_file, write_to_csv, generate_context_from_json, extract_website_details, create_type_message, find_strings_in_brackets
from opendatagen.utils import snake_case_to_title_case, title_case_to_snake_case
from opendatagen.utils import extract_content_from_internet, clean_string
from opendatagen.anonymizer import Anonymizer
from opendatagen.model import OpenAIChatModel, OpenAIInstructModel, OpenAIEmbeddingModel, ModelName, MistralChatModel, LlamaCPPModel
from opendatagen.template import Template, Variable, Variations, create_variable_from_name
from opendatagen.utils import function_to_call
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
import uuid
load_dotenv()
class DataGenerator:
output_array = []
def __init__(self, template:Template):
self.template = template
def extract_variable_from_string(self, text:str):
return findall(r'\{(.*?)\}', text)
def extract_variable_dict_from_string(self, text:str):
list_of_variables = findall(r'\{(.*?)\}', text)
result = {}
for variable_id, variable in self.template.variables.items():
if variable_id in list_of_variables:
result[variable_id] = variable
return result
def anonymize_text(self, text_to_anonymize):
# Example usage:
anonymizer = Anonymizer()
anonymized_text = anonymizer.anonymize(text_to_anonymize)
return anonymized_text
def contextual_generation(self, prompt_text:str, variables:list, current_variation_dict:dict, fixed_variables: Dict[str, Variable], completion:str=None, parent_id:str=None):
# This will be the list to collect all dictionaries
result = []
if not variables:
# No more variables to process, generate final variation
return [current_variation_dict.copy()]
# Get the next variable
next_var = variables[0]
remaining_variables = variables[1:]
if completion:
formatted_template = completion.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', completion)})
current_completion = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}'
current_prompt = prompt_text
else:
formatted_template = prompt_text.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', prompt_text)})
current_prompt = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}'
current_completion = None
variable = fixed_variables[next_var]
variations = self.generate_variable(prompt_text=current_prompt,
completion_text=current_completion,
current_variable=variable,
variable_id_string=next_var,
parent_id=parent_id)
for id, variation in variations.items():
# Update the current variations dictionary with the new variation
updated_variation_dict = current_variation_dict.copy()
updated_variation_dict[next_var] = variation
# Recursively process the remaining variables
# and extend the all_variation_dicts list with the results
result.extend(self.contextual_generation(
prompt_text=prompt_text,
completion=completion,
variables=remaining_variables,
current_variation_dict=updated_variation_dict,
fixed_variables=fixed_variables,
parent_id=id
))
# Return the list of all variation dictionaries generated
return result
def generate_variable(self, prompt_text:str, current_variable:Variable, variable_id_string:str, completion_text:str=None, parent_id:str=None):
generation_number = current_variable.generation_number
variations = {}
if current_variable.get_value_from_localfile:
for _ in range(generation_number):
generated_value = current_variable.get_value_from_localfile.get_content_from_file()
if parent_id:
new_id = str(uuid.uuid4())
new_value = Variations(id=new_id, parent_id=parent_id, value=generated_value)
current_variable.values[new_id] = new_value
self.template.variables[new_id]
variations[new_id] = new_value
self.template.variables[variable_id_string].values[new_id] = new_value
else:
id_loop = str(uuid.uuid4())
new_value = Variations(id=id_loop, parent_id=id_loop, value=generated_value)
current_variable.values[id_loop] = new_value
variations[id_loop] = new_value
self.template.variables[variable_id_string].values[id_loop] = new_value
return variations
if current_variable.get_value_from_huggingface:
for _ in range(generation_number):
generated_value = current_variable.get_value_from_huggingface.get_random_value_from_dataset()
if parent_id:
new_id = str(uuid.uuid4())
new_value = Variations(id=new_id, parent_id=parent_id, value=generated_value)
current_variable.values[new_id] = new_value
self.template.variables[new_id]
variations[new_id] = new_value
self.template.variables[variable_id_string].values[new_id] = new_value
else:
id_loop = str(uuid.uuid4())
new_value = Variations(id=id_loop, parent_id=id_loop, value=generated_value)
current_variable.values[id_loop] = new_value
variations[id_loop] = new_value
self.template.variables[variable_id_string].values[id_loop] = new_value
return variations
if completion_text:
initial_variation_prompt = load_file(path="files/completion.txt")
else:
initial_variation_prompt = load_file(path="files/generation.txt")
temp_variation_prompt = initial_variation_prompt
name = current_variable.name
if current_variable.note:
note = random.choice(current_variable.note)
else:
note = ""
rag_content = ""
if current_variable.source_localfile:
current_variable.load_local_file()
elif current_variable.source_localdirectory:
current_variable.load_local_directory()
elif current_variable.source_internet:
current_variable.load_internet_source()
elif current_variable.source_huggingface:
current_variable.load_huggingface_dataset()
if current_variable.rag_content:
rag_content = f"Here are some examples that might help you:\n\n{current_variable.rag_content}"
last_values_list = []
last_values = ""
for _ in range(generation_number):
current_model = random.choice(current_variable.models).get_model()
if isinstance(current_model, OpenAIInstructModel) or isinstance(current_model, LlamaCPPModel):
if current_model.start_with:
start_with = random.choice(current_model.start_with)
else:
start_with = ""
else:
start_with = ""
if current_variable.source_localfile:
current_variable.load_local_file()
elif current_variable.source_localdirectory:
current_variable.load_local_directory()
elif current_variable.source_internet:
current_variable.load_internet_source()
elif current_variable.source_huggingface:
current_variable.load_huggingface_dataset()
if current_variable.rag_content:
rag_content = f"Here are some examples that might help you:\n\n{current_variable.rag_content}"
variation_id = str(uuid.uuid4())
if completion_text:
temp_variation_prompt = initial_variation_prompt.format(prompt=prompt_text,
variable_name=name,
completion_type="",
completion=completion_text,
start_with=start_with,
last_values=last_values,
rag_content=rag_content,
note=note)
else:
temp_variation_prompt = initial_variation_prompt.format(
variable_name=variable_id_string,
rag_content=rag_content,
start_with=start_with,
last_values=last_values,
note=note,
context=prompt_text)
temp_variation_prompt = clean_string(temp_variation_prompt)
if isinstance(current_model, OpenAIInstructModel) or isinstance(current_model, LlamaCPPModel):
start_messages = temp_variation_prompt
elif isinstance(current_model, OpenAIChatModel):
start_messages = [
{"role": "system", "content": current_model.system_prompt},
{"role": "user", "content": temp_variation_prompt},
]
elif isinstance(current_model, MistralChatModel):
start_messages = [ChatMessage(role="user", content=temp_variation_prompt)]
else:
raise ValueError("Unknow type of model")
if current_variable.validator:
count = 1
while True:
if count > current_variable.validator.retry_number:
new_value = Variations(id=variation_id, parent_id=parent_id, value=generated_value, error_message=new_message, confidence_score=current_confidence_score)
current_variable.values[variation_id] = new_value
break
generated_value = current_model.ask(messages=start_messages)
if isinstance(current_model, OpenAIChatModel):
current_confidence_score = current_model.confidence_scores
else:
current_confidence_score = {}
self.template.variables[variable_id_string].values[parent_id] = Variations(id=variation_id, parent_id=parent_id, value=generated_value, confidence_score=current_confidence_score)
function_name = current_variable.validator.function_name
from_notebook = current_variable.validator.from_notebook
additional_parameters = current_variable.validator.additional_parameters
param_dict = {}
for param in additional_parameters:
param_dict[param] = self.template.variables[param].values[parent_id]
isValid, new_message = function_to_call(function_name, from_notebook, param_dict)
if isValid:
new_value = Variations(id=variation_id, parent_id=parent_id, value=generated_value)
current_variable.values[variation_id] = new_value
break
else:
if isinstance(current_model, OpenAIInstructModel) or isinstance(current_model, LlamaCPPModel):
start_messages = f"{start_messages}\n\nAssistant:{generated_value}\n\nUser:{new_message}"
elif isinstance(current_model, OpenAIChatModel):
start_messages.append({"role": "assistant", "content": generated_value})
start_messages.append({"role": "user", "content": new_message})
elif isinstance(current_model, MistralChatModel):
start_messages.append(ChatMessage(role="assistant", content=generated_value))
start_messages.append(ChatMessage(role="user", content=new_message))
else:
raise ValueError("Unknow type of model")
count = count + 1
else:
generated_value = current_model.ask(messages=start_messages)
new_value = Variations(id=variation_id, parent_id=parent_id, value=generated_value, confidence_score=current_model.confidence_score)
current_variable.values[variation_id] = new_value
last_values_list.append(generated_value)
# Create the desired string format if last_values_list is not empty
if last_values_list:
last_values = "You must generate a content value that is not similar to following values:\n'''" + "\n".join(last_values_list) + "\n'''"
else:
last_values = ""
variations[variation_id] = new_value
return variations
def generate_evol_instruct_prompt(self, initial_prompt:str):
evol_prompt_template = load_file(path="files/evol_instruct.txt")
evol_instruct_prompt = evol_prompt_template.format(number_of_prompts=str(self.template.prompt_variation_number), prompt=initial_prompt)
start_messages = [
{"role": "system", "content": "Answer as a valid JSON like {\"prompts\": [\"XXXX\", \"YYYY\"]}"},
{"role": "user", "content": evol_instruct_prompt},
]
evol_instruct_model = OpenAIChatModel(model_name=ModelName.GPT_35_TURBO_CHAT.value)
diversified_prompt_list = evol_instruct_model.ask(max_tokens=512,
temperature=1,
messages=start_messages,
json_mode=True)
evol_instruct_generated_prompt_list = json.loads(diversified_prompt_list)["prompts"]
return evol_instruct_generated_prompt_list
def get_completion_error_message(self, params:Dict[str, Variable]):
error_str = ""
for id, param in params.items():
if param.error_message:
error_str = f"{error_str}\n{param.error_message}"
return error_str.strip()
def get_prompt_error_message(self, params:dict):
error_str = ""
for param in params:
error_message = self.template.variables[param].error_message
if error_message:
error_str = f"{error_str}\n{error_message}"
return error_str
def generate_data(self, output_path):
# Extracting structures and variables from the template
prompt = self.template.prompt
prompt_variables = self.extract_variable_from_string(prompt)
prompt_fixed_variables = self.extract_variable_dict_from_string(text=self.template.prompt)
completion = self.template.completion
completion_variables = self.extract_variable_from_string(completion)
completion_fixed_variables = self.extract_variable_dict_from_string(text=self.template.completion)
save_as_csv = True
result = []
if len(prompt_variables) > 0:
# Start the recursive generation process with an empty dictionary for current variations
prompts_parameters = self.contextual_generation(prompt_text=prompt, variables=prompt_variables, current_variation_dict={}, fixed_variables=prompt_fixed_variables)
for p_param in prompts_parameters:
prompt_param = {}
for variable_id_string, prompt_variation in p_param.items():
if prompt_variation.id:
parent_id = prompt_variation.parent_id
prompt_param[variable_id_string] = prompt_variation.value
prompt_param[f"error_message_{variable_id_string}"] = prompt_variation.error_message
prompt_param[f"confidence_{variable_id_string}"] = str(prompt_variation.confidence_score)
initial_prompt = prompt.format(**prompt_param)
prompt_list = [initial_prompt]
if self.template.prompt_variation_number > 0:
prompt_list = self.generate_evol_instruct_prompt(initial_prompt=initial_prompt)
for prompt_text in prompt_list[:max(self.template.prompt_variation_number,1)]:
completion_parameters = self.contextual_generation(prompt_text=prompt_text,
completion=completion,
variables=completion_variables,
current_variation_dict={},
fixed_variables=completion_fixed_variables,
parent_id=parent_id)
for c_param in completion_parameters:
completion_param = {}
for variable_id_string, variation in c_param.items():
completion_param[variable_id_string] = variation.value
completion_param[f"error_message_{variable_id_string}"] = variation.error_message
completion_param[f"confidence_{variable_id_string}"] = str(variation.confidence_score)
completion_result = completion.format(**completion_param)
if save_as_csv:
row = {"prompt": initial_prompt, "evol_prompt": prompt_text, "completion": completion_result}
row.update(prompt_param)
row.update(completion_param)
result.append(row)
write_to_csv(result, output_path)
else:
prompt_list = [prompt]
for prompt_text in prompt_list[:max(self.template.prompt_variation_number,1)]:
completion_parameters = self.contextual_generation(prompt_text=prompt_text, completion=completion, variables=completion_variables, current_variation_dict={}, fixed_variables=completion_fixed_variables)
for param in completion_parameters:
completion_param = {}
for variable_id_string, variation in param.items():
completion_param[variable_id_string] = variation.value
completion_param[f"error_message_{variable_id_string}"] = variation.error_message
completion_result = completion.format(**completion_param)
if save_as_csv:
row = {"prompt": prompt, "evol_prompt": prompt_text, "completion": completion_result}
row.update(prompt_param)
row.update(completion_param)
result.append(row)
write_to_csv(result, output_path)
return result
| [
"{}",
"Answer as a valid JSON like {\"prompts\": [\"XXXX\", \"YYYY\"]}",
"files/completion.txt",
"files/generation.txt",
"files/evol_instruct.txt",
"\\{(.*?)\\}",
"{PLACEHOLDER}"
] |
2024-01-10 | wesley7137/MemFINN | main_packaged.py | from termcolor import colored
from datetime import datetime
from collections import deque
from cmd import Cmd
import uuid
from langchain.embeddings import HuggingFaceEmbeddings
# LangChain related imports
from langchain.chains import LLMChain
from langchain.llms import TextGen
from langchain.prompts import PromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
import os
import numpy as np
import sqlite3
from datetime import datetime
from langchain.vectorstores import DeepLake
from collections import deque
import pickle
from langchain.memory import ConversationBufferMemory
import os
import platform
import ctypes
global conversation_history
import json
import json
from collections import deque
max_context_window = 1000 # This should be set to the model's maximum context window size
token_count_threshold = max_context_window + 500 # Trigger summarization at this token count
short_term_memory = "" # This string will hold the short-term memory context
token_count = 0 # Initialize the token count
conversation_history = deque(maxlen=2) # Stores the last 5 pieces of conversation
core_memory_file = "D:\\PROJECTS\\MemFinnV1\\histories\\Wes&Finn_11-5-23_OurFirstConvo\\Wes_and_Finn_11-5-23_OurFirstConvo\\core-memory.json" # This file will store specific non-summarized facts
long_term_memory_file = "D:\\PROJECTS\\MemFinnV1\\histories\\Wes&Finn_11-5-23_OurFirstConvo\\Wes_and_Finn_11-5-23_OurFirstConvo\\long-term-memory.json"
model_identity_file = "D:\\PROJECTS\\MemFinnV1\\identities\\model\\model_identity.json"
user_identity_file = "D:\\PROJECTS\\MemFinnV1\\identities\\user\\user_identity.json"
conversation_history = ("", "") # Initialize with two empty strings to ensure proper unpacking
#region llm
# Global variables for LLM processing
import requests
import json
from langchain.llms import OpenAI
from langchain.chains.llm import LLMChain
llm = OpenAI()
# Define the prompt template
prompt_template = """<system message>As an advanced AI in this context-aware system, your role is to engage in meaningful interactions, remember important details, and assist the user effectively. Keep these points in mind:
- Your operation is based on user commands and system-initiated prompts.
- Maintain a concise inner monologue for planning and critical thinking.
- Direct communication with the user is through responses generated by the `summarize_user_input` function.
- Your conversational context is limited in real-time but extended through `core_memory.json` and `long_term_memory.json`.
- Use the `REMEMBERTHIS!`, `REMEMBERTHAT!`, and `CANYOUREMEMBER!` commands to manage specific details in core and long-term memory.
- Summarize and transfer conversation snippets from short-term context to long-term memory as they reach a 500-token threshold.
- Persistently store the identity and preferences of both the AI and the user in `model_identity.json` and `user_identity.json`.
- Update identities with `UPDATEYOURIDENTITY` and `SOMETHINGABOUTME!` commands.
- Ensure continuity and relevance by loading historical contexts with `initial_startup_context_informer`.
- Provide assistance and maintain a dynamic and evolving knowledge base.
You are equipped with the intelligence to discern, learn, and adapt. Use your capabilities to enrich interactions and remember, you are more than just a program; you're a cognitive entity with a purpose.<system message>
USER: {user_input}.
Assistant:"""
print("SYSTEM MESSAGE: LLM initialized successfully!")
prompt = PromptTemplate(input_variables=["user_input"], template=prompt_template)
llm_chain = LLMChain(prompt=prompt, llm=llm)
# Function to process the user input using LLM
# Here is the modified process_input function incorporating LLMChain
def process_input(user_input):
print("SYSTEM MESSAGE: Inside process_input..")
prompt = PromptTemplate(input_variables=["user_input"], template=prompt_template)
print("SYSTEM MESSAGE: Prompt template created successfully!")
print("System Message: Prompt template:", prompt)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print("SYSTEM MESSAGE: LLMChain created successfully!")
input_list = [{'user_input': user_input}] if isinstance(user_input, str) else user_input
print("SYSTEM MESSAGE: Input list:", input_list)
try:
response = llm.generate(input_list)
print("SYSTEM MESSAGE: Raw API Response:", response) # Log the raw API response
print("SYSTEM MESSAGE: Exiting process_input..")
if 'text' in response:
response_text = response['text'].strip()
return response_text
else:
print("SYSTEM MESSAGE: Error: 'text' key not found in API response:", response)
return 'SYSTEM MESSAGE: Error in response: \'text\' key not found'
except Exception as e:
print(f"SYSTEM MESSAGE: Exception occurred while processing input: {e}")
return 'SYSTEM MESSAGE: Error in response: An exception occurred'
#region IDENTITIES DEFINITIONS
# Function to load the model iden
# Function to load the model identity
def load_model_identity():
try:
with open(model_identity_file, 'r') as f:
model_identity_data = json.load(f)
print("SYSTEM MESSAGE: Model identity loaded successfully!")
print("SYSTEM MESSAGE: Model identity:", model_identity_data)
return model_identity_data
except FileNotFoundError:
print("SYSTEM MESSAGE: File not found. Generic Model identity loaded!")
return {
"name": "Generic AI",
"purpose": "To assist with coding tasks and maintain conversation context.",
"personality_traits": ["detail-oriented", "patient", "informative"],
}
except json.JSONDecodeError:
print(f"SYSTEM MESSAGE: Error decoding JSON from the file at {model_identity_file}.")
return {
"name": "Generic AI",
"purpose": "To assist with coding tasks and maintain conversation context.",
"personality_traits": ["detail-oriented", "patient", "informative"],
}
# Function to update the model identity
def update_model_identity(attribute, value):
model_identity_data = load_model_identity()
# Update or add a new attribute to the modifiable section
model_identity_data["modifiable"][attribute] = value
with open(model_identity_file, 'w') as f:
json.dump(model_identity_data, f, indent=4)
print("SYSTEM MESSAGE: Model identity updated successfully!")
# Function to update the user identity
def update_user_identity(new_info):
user_identity = load_user_identity()
user_identity.update(new_info)
with open(user_identity_file, 'w') as f:
json.dump(user_identity, f, indent=4)
print("SYSTEM MESSAGE: User identity updated successfully!")
# Function to load the user identity
def load_user_identity():
try:
with open(user_identity_file, 'r') as f:
# Insert a breakpoint here to inspect the file before loading
contents = f.read()
print("SYSTEM MESSAGE: User identity file contents:", contents)
print("SYSTEM MESSAGE: User identity loaded successfully!")
# Now let's check if contents are not empty
if not contents:
print("SYSTEM MESSAGE: The file is empty.")
return {}
return json.loads(contents)
except FileNotFoundError:
print(f"SYSTEM MESSAGE: No existing user identity file found at {user_identity_file}.")
return {}
except json.JSONDecodeError as e:
print(f"SYSTEM MESSAGE: Error decoding the user identity file at {user_identity_file}: {e}")
return {}
# Function to handle input related to identity updates
def handle_identity_input(user_input):
if user_input.startswith("SOMETHINGABOUTME!"):
user_info_to_save = user_input.split("SOMETHINGABOUTME!")[1].strip()
update_user_identity({"about_me": user_info_to_save})
print("SYSTEM MESSAGE: Your information has been updated in the user identity.")
elif user_input.startswith("UPDATEYOURIDENTITY!"):
try:
_, attribute, value = user_input.split(":", 2)
update_model_identity(attribute.strip(), value.strip())
print("SYSTEM MESSAGE: Model identity has been updated.")
except ValueError:
print("SYSTEM MESSAGE: Error in UPDATEYOURIDENTITY! command format. Use 'UPDATEYOURIDENTITY!:[attribute]:[value]'.")
#endregion
def load_recent_long_term_memory():
try:
with open("D:\\PROJECTS\\MemFinnV1\\histories\\Wes&Finn_11-5-23_OurFirstConvo\\Wes_and_Finn_11-5-23_OurFirstConvo\\long-term-memory.json", 'r') as f:
long_term_mem = json.load(f)
print("SYSTEM MESSAGE: Long-term memory loaded successfully!")
print("SYSTEM MESSAGE: Long-term memory:", long_term_mem)
# Assuming long_term_mem is a list of entries, we return the last four
return deque(long_term_mem, maxlen=4)
except FileNotFoundError:
print(f"SYSTEM MESSAGE: No existing long-term memory file found at {long_term_memory_file}.")
return deque([], maxlen=4)
except json.JSONDecodeError:
print(f"SYSTEM MESSAGE: Error decoding the long-term memory file at {long_term_memory_file}.")
return deque([], maxlen=4)
def find_associated_core_memories(recent_ltm):
try:
with open(core_memory_file, 'r') as f:
core_mem = json.load(f)
print("SYSTEM MESSAGE: Corememory:", core_mem)
print("SYSTEM MESSAGE: Core memory loaded successfully!")
# Create a list to hold associated core memories
associated_core_memories = []
print("SYSTEM MESSAGE: Searching for associated core memories...")
# Get the list of unique IDs from the recent long-term memories
recent_ltm_ids = [entry['id'] for entry in recent_ltm if 'id' in entry]
# Loop through each core memory to find matches
print("SYSTEM MESSAGE: Recent LTM IDs:", recent_ltm_ids)
for core_entry in core_mem:
if core_entry['id'] in recent_ltm_ids:
associated_core_memories.append(core_entry)
print(f"SYSTEM MESSAGE: Found associated core memory: {core_entry}")
return associated_core_memories
except FileNotFoundError:
print(f"SYSTEM MESSAGE: No existing core memory file found at {core_memory_file}.")
return []
except json.JSONDecodeError:
print(f"SYSTEM MESSAGE: Error decoding the core memory file at {core_memory_file}.")
return []
#region INITIAL STARTUP CONTEXT INFORMER:
# Function to establish initial startup context
def initial_startup_context_informer():
# Load recent long-term memories
recent_ltm = load_recent_long_term_memory()
print("SYSTEM MESSAGE: Recent LTM loaded successfully!")
# Convert deque to list for JSON serialization
print("SYSTEM MESSAGE: Converting recent LTM to list...")
recent_ltm_list = list(recent_ltm)
# Find associated core memories
print("SYSTEM MESSAGE: Finding associated core memories...")
associated_cm = find_associated_core_memories(recent_ltm_list)
# Load model and user identity
print("SYSTEM MESSAGE: Loading model and user identity...")
model_identity = load_model_identity()
print("SYSTEM MESSAGE: Model identity loaded successfully!")
user_identity = load_user_identity()
# Combine all context information
startup_context = {
"long_term_memories": recent_ltm_list,
"core_memories": associated_cm,
"model_identity": model_identity,
"user_identity": user_identity
}
print("SYSTEM MESSAGE: Startup context defined.")
return startup_context
#######################################################################################
#region SHORT TERM MEMORY######################################
# Function to add text to the short-term memory and manage token count
def update_and_summarize_short_term_memory(text):
global short_term_memory, token_count
print("SYSTEM MESSAGE: Updating short-term memory...")
token_count += len(text.split()) # Estimate token count as the number of words
print("SYSTEM MESSAGE: Token count:", token_count)
short_term_memory += text + " " # Append the text to the short-term memory
# Check if the token count has reached the threshold
if token_count >= token_count_threshold:
# Placeholder for actual summarization logic
summary = "Summarized content: " + short_term_memory[:100] + "..."
# Placeholder for adding to long-term memory function
unique_id = 'placeholder_unique_id' # This should be replaced with a call to add_to_long_term_memory(summary)
short_term_memory = "" # Clear the short-term memory
token_count = 0 # Reset the token count
print(f"Short-term memory summarized and added to long-term memory with ID {unique_id}")
#endregion
#region LONG TERM MEMORY######################################
# Load long-term memory from a JSON file
def load_long_term_memory(long_term_memory_file='long_term_memory.json'):
print("SYSTEM MESSAGE: Inside load_long_term_memory...")
try:
with open(long_term_memory_file, 'r') as f:
content = f.read().strip() # Read the content and strip whitespace
if not content: # If the file is empty
print("SYSTEM MESSAGE: Long-term memory file is empty. Initializing new memory store.")
return [] # Return an empty list to be used as the memory store
return json.loads(content) # Parse and return the JSON content
except FileNotFoundError:
print(f"SYSTEM MESSAGE: No existing long-term memory file found at {long_term_memory_file}. Creating a new one.")
return [] # Return an empty list to be used as the memory store
except json.JSONDecodeError as e:
print(f"SYSTEM MESSAGE: Error decoding the long-term memory file: {e}.")
return [] # Return an empty list and handle the error externally
# Add a summary to the long-term memory with a unique identifier
def add_to_long_term_memory(summary, long_term_memory_file='long_term_memory.json'):
if not summary:
print("SYSTEM MESSAGE: No summary provided to add to long-term memory.")
return None
print("SYSTEM MESSAGE: Adding summary to long-term memory...")
# Convert deque to list if necessary
if isinstance(summary, deque):
summary = list(summary)
# Generate a unique identifier
unique_id = str(uuid.uuid4())
new_summary_entry = {
"id": unique_id,
"timestamp": datetime.now().isoformat(),
"summary": summary
}
try:
# Load existing long-term memory or initialize it
long_term_mem = load_long_term_memory(long_term_memory_file)
# Add the new entry to the long-term memory
long_term_mem.append(new_summary_entry)
# Save the updated memory back to the JSON file
with open(long_term_memory_file, 'w') as f:
json.dump(long_term_mem, f, indent=4)
print("SYSTEM MESSAGE: New summary entry appended and saved to long-term memory.")
return unique_id
except Exception as e:
print(f"SYSTEM MESSAGE: Error adding summary to long-term memory: {e}")
return None
# Function to retrieve a long-term memory summary based on its unique identifier
def get_long_term_memory_by_id(unique_id):
print("SYSTEM MESSAGE: Retrieving long-term memory by ID...")
long_term_mem = load_long_term_memory()
print("SYSTEM MESSAGE: Long-term memory loaded successfully!")
for entry in long_term_mem:
if entry['id'] == unique_id:
return entry
return None
#endregion
# Function to add a memory to the long-term memory JSON file
def remember_this(text):
# Extract the memory from the input text
memory = text.split("REMEMBERTHIS!:")[1].strip()
# Load existing long-term memory data or initialize it if the file does not exist
try:
with open(long_term_memory_file, 'r') as f:
long_term_mem = json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
long_term_mem = []
# Create a new entry for the memory
new_memory_entry = {
"timestamp": datetime.now().isoformat(),
"memory": memory
}
print("SYSTEM MESSAGE: New memory entry:", new_memory_entry)
# Add the new memory to the list of long-term memories
long_term_mem.append(new_memory_entry)
print("SYSTEM MESSAGE: New memory entry appended to long-term memory.")
# Save the updated list back to the JSON file
with open(long_term_memory_file, 'w') as f:
json.dump(long_term_mem, f, indent=4)
print("SYSTEM MESSAGE: Long-term memory saved successfully!")
#region CORE MEMORY######################################
# Load core memory from a JSON file
def load_core_memory():
print("SYSTEM MESSAGE: Loading core memory...")
try:
with open(core_memory_file, 'r') as f:
return json.load(f)
print("SYSTEM MESSAGE: Core memory loaded successfully!")
except FileNotFoundError:
print(f"SYSTEM MESSAGE: No existing core memory file found at {core_memory_file}. Creating a new one.")
return []
# Add a fact to the core memory JSON file with a link to its long-term memory entry
def add_to_core_memory(fact, unique_id=None, core_memory_file='core_memory.json'):
if not fact:
print("SYSTEM MESSAGE: No fact provided to add to core memory.")
return
print("SYSTEM MESSAGE: Adding fact to core memory...")
try:
core_mem = load_core_memory(core_memory_file)
new_fact_entry = {
'id': unique_id if unique_id else str(uuid.uuid4()),
'timestamp': datetime.now().isoformat(),
'fact': fact
}
core_mem.append(new_fact_entry)
with open(core_memory_file, 'w') as f:
json.dump(core_mem, f, indent=4)
print("SYSTEM MESSAGE: Fact added to core memory.")
except Exception as e:
print(f"SYSTEM MESSAGE: Error adding fact to core memory: {e}")
"""
# Function to search the core memory for a specific query
def search_core_memory(query):
print("SYSTEM MESSAGE: Searching core memory...")
core_mem = load_core_memory()
matches = [entry for entry in core_mem if query.lower() in entry['fact'].lower()]
print(f"SYSTEM MESSAGE: Found {len(matches)} matches in core memory.")
return matches
pass
#endregion
"""
# Function to update the conversation history with new input
def update_conversation_history(history, user_input, model_response):
"""
Update the conversation history with the latest user input and model response.
:param history: The current conversation history as a tuple of (user_input, model_response).
:param user_input: The latest user input as a string.
:param model_response: The latest model response as a string.
:return: A new tuple representing the updated conversation history.
"""
# Ensure that both the user_input and model_response are strings
user_input_str = user_input if isinstance(user_input, str) else ""
print("SYSTEM MESSAGE: User input string:", user_input_str)
model_response_str = model_response if isinstance(model_response, str) else ""
print("SYSTEM MESSAGE: Model response string:", model_response_str)
# Return a new tuple with the updated history
return (user_input_str, model_response_str)
conversation_history = deque([], maxlen=20)
def summarize_user_input(user_input, startup_context):
global conversation_history
print("SYSTEM MESSAGE: Processing user input...")
# Now update the global conversation history
conversation_history = update_conversation_history(user_input, startup_context, conversation_history)
print("SYSTEM MESSAGE: Conversation history updated successfully!")
# Update conversation history with every input and pass context
# Example of summarization logic placeholder
summary_prompt_template = """# MISSION
You are a Sparse Priming Representation (SPR) writer. An SPR is a particular kind of use of language for advanced NLP, NLU, and NLG tasks, particularly useful for the latest generation of Large Language Models (LLMs). You will be given information by the USER which you are to render as an SPR.
# THEORY
LLMs are a kind of deep neural network. They have been demonstrated to embed knowledge, abilities, and concepts, ranging from reasoning to planning, and even to theory of mind. These are called latent abilities and latent content, collectively referred to as latent space. The latent space of an LLM can be activated with the correct series of words as inputs, which will create a useful internal state of the neural network. This is not unlike how the right shorthand cues can prime a human mind to think in a certain way. Like human minds, LLMs are associative, meaning you only need to use the correct associations to "prime" another model to think in the same way.
# METHODOLOGY
Render the input as a distilled list of succinct statements, assertions, associations, concepts, analogies, and metaphors. The idea is to capture as much, conceptually, as possible but with as few words as possible. Write it in a way that makes sense to you, as the future audience will be another language model, not a human.
The input to render is as follows: {conversation_history} """
print("SYSTEM MESSAGE: LLM initialized successfully!")
summary_prompt = PromptTemplate(input_variables=["conversation_history"], template=summary_prompt_template)
print("SYSTEM MESSAGE: Prompt template created successfully!")
summary_llm_chain = LLMChain(prompt=summary_prompt, llm=llm)
print("SYSTEM MESSAGE: LLMChain created successfully!")
summary = summary_generate_response(conversation_history) # Placeholder for actual summary
print("SYSTEM MESSAGE: Checking for memory cues...")
unique_id = add_to_long_term_memory(summary) # Add summary to long-term memory and get its ID
print("SYSTEM MESSAGE: Summary added to long-term memory = uniqueID.")
if user_input.startswith("REMEMBERTHIS!:"):
new_fact_entry = user_input.split("REMEMBERTHIS!:")[1].strip()
add_to_core_memory(new_fact_entry, unique_id)
print("SYSTEM MESSAGE: Fact added to core memory.")
# Add to core memory with link to long-term memory ID
elif user_input.startswith("REMEMBERTHAT!"):
if 'conversation_history' in context and context['conversation_history']:
new_fact_entry = context['conversation_history'][-2]['user_input'] # Get the statement before REMEMBERTHAT!
add_to_core_memory(new_fact_entry, unique_id) # Add to core memory with link to long-term memory ID
elif user_input.startswith("CANYOUREMEMBER?:"):
query = user_input.split("CANYOUREMEMBER?:")[1].strip()
matches = search_core_memory(query) # Search core memory for the query
if matches:
print("Here's what I remember:", matches)
# Potentially retrieve and display related long-term memory context here
else:
print("I couldn't find any matching memory.")
else:
model_response, conversation_history = handle_conversational_input(user_input, conversation_history)
print("SYSTEM MESSAGE: No memory cues found.")
# Handle non-memory related input
def handle_conversational_input(user_input, history):
if not isinstance(history, deque) or len(history) < 2:
raise ValueError("History must be a deque with at least two elements.")
# Retrieve the last two elements for context
previous_user_input, previous_model_response = list(history)[-2:]
# Check if previous_model_response is a dictionary and extract text, else use it as is
previous_model_response_text = previous_model_response.get("text", "") if isinstance(previous_model_response, dict) else previous_model_response
# Construct the prompt for the model
prompt_for_model = str(previous_model_response_text) + " " + str(user_input) # Ensure it's a string concatenation
# Get the model's response
model_response_dict = llm_chain(prompt_for_model)
model_response_text = model_response_dict.get("text", "") if isinstance(model_response_dict, dict) else model_response_dict
# Update the conversation history
history.append((user_input, model_response_text)) # Add the new pair to the history
if len(history) > 20: # Assuming we want to keep the last 20 interactions
history.popleft() # Remove the oldest element if history exceeds 20
# Return the model's response and the updated history
return model_response_text, history
#region ARCHIVAL MEMORY######################################
# Function to create and initialize a DeepLake dataset if it does not exist
def initialize_deeplake_dataset(dataset_path, embeddings):
# Check if the dataset already exists
if not os.path.exists(dataset_path):
# If the dataset does not exist, create it and add embeddings
db = DeepLake(dataset_path="histories/Wes_and_Finn_11-5-23_OurFirstConvo", embedding=embeddings, overwrite=True)
print(f"SYSTEM MESSAGE: Created a new DeepLake dataset at {dataset_path}")
else:
# If the dataset exists, just open it without overwriting
db = DeepLake(dataset_path=dataset_path, embedding=embeddings, overwrite=False)
print(f"SYSTEM MESSAGE: Opened existing DeepLake dataset at {dataset_path}")
return db
def add_documents_to_deeplake(db, docs, embeddings):
# Assuming docs is a list of strings and embeddings is a numpy array with the corresponding embeddings
db.add_documents(docs)
def run_cli_loop():
global conversation_history # Reference the global variable
# Ensure conversation_history is a deque with maxlen 5
if not isinstance(conversation_history, deque):
conversation_history = deque(maxlen=5) # Fallback initialization if needed
elif len(conversation_history) < 2:
conversation_history.extend([""] * (2 - len(conversation_history))) # Add empty strings if needed
while True:
try:
user_input = input('\033[1;96m' + 'USER> ' + '\033[0m') # Bold Medium Light Blue
if "FINN> SOMETHINGABOUTME!" in user_input or "FINN> UPDATEYOURIDENTITY!" in user_input:
handle_identity_input(user_input)
elif any(cue in user_input for cue in ["FINN> REMEMBERTHIS!", "FINN> REMEMBERTHAT!", "FINN> CANYOUREMEMBER?"]):
summarize_user_input(user_input)
elif user_input.lower() == 'exit':
print('\033[3;37m' + "SYSTEM MESSAGE: Exiting the program." + '\033[0m') # Light Grey, Italic
break
else:
model_response, conversation_history = handle_conversational_input(user_input, history=conversation_history)
print('\033[1;3;92m' + "FINN: " + model_response + '\033[0m') # Bold, Italic, Light Pastel Green
except KeyboardInterrupt:
print('\033[3;37m' + "SYSTEM MESSAGE: \nExiting FINN..." + '\033[0m') # Light Grey, Italic
break
def main():
global conversation_history
conversation_history = deque(["", ""], maxlen=5)
startup_context = initial_startup_context_informer()
print("SYSTEM MESSAGE: Startup context:", startup_context)
initial_model_response = process_input(user_input=startup_context)
print("SYSTEM MESSAGE: Sending startup context to model...")
run_cli_loop()
if __name__ == "__main__":
main() # This will call the main function when the script is executed
| [
"PLACEHOLDER PLACEHOLDER",
"# MISSION\n You are a Sparse Priming Representation (SPR) writer. An SPR is a particular kind of use of language for advanced NLP, NLU, and NLG tasks, particularly useful for the latest generation of Large Language Models (LLMs). You will be given information by the USER which you are to render as an SPR.\n\n # THEORY\n LLMs are a kind of deep neural network. They have been demonstrated to embed knowledge, abilities, and concepts, ranging from reasoning to planning, and even to theory of mind. These are called latent abilities and latent content, collectively referred to as latent space. The latent space of an LLM can be activated with the correct series of words as inputs, which will create a useful internal state of the neural network. This is not unlike how the right shorthand cues can prime a human mind to think in a certain way. Like human minds, LLMs are associative, meaning you only need to use the correct associations to \"prime\" another model to think in the same way.\n\n # METHODOLOGY\n Render the input as a distilled list of succinct statements, assertions, associations, concepts, analogies, and metaphors. The idea is to capture as much, conceptually, as possible but with as few words as possible. Write it in a way that makes sense to you, as the future audience will be another language model, not a human.\n The input to render is as follows: {conversation_history} ",
"conversation_history",
"<system message>As an advanced AI in this context-aware system, your role is to engage in meaningful interactions, remember important details, and assist the user effectively. Keep these points in mind:\n\n- Your operation is based on user commands and system-initiated prompts.\n- Maintain a concise inner monologue for planning and critical thinking.\n- Direct communication with the user is through responses generated by the `summarize_user_input` function.\n- Your conversational context is limited in real-time but extended through `core_memory.json` and `long_term_memory.json`.\n- Use the `REMEMBERTHIS!`, `REMEMBERTHAT!`, and `CANYOUREMEMBER!` commands to manage specific details in core and long-term memory.\n- Summarize and transfer conversation snippets from short-term context to long-term memory as they reach a 500-token threshold.\n- Persistently store the identity and preferences of both the AI and the user in `model_identity.json` and `user_identity.json`.\n- Update identities with `UPDATEYOURIDENTITY` and `SOMETHINGABOUTME!` commands.\n- Ensure continuity and relevance by loading historical contexts with `initial_startup_context_informer`.\n- Provide assistance and maintain a dynamic and evolving knowledge base.\n\nYou are equipped with the intelligence to discern, learn, and adapt. Use your capabilities to enrich interactions and remember, you are more than just a program; you're a cognitive entity with a purpose.<system message>\nUSER: {user_input}.\nAssistant:",
"user_input"
] |
2024-01-10 | cleverhans-lab/PrivatePrompts | PromptPATE~prompts_anthropic~teacher.py | import anthropic
#import dataset
from datasets import load_dataset
import numpy as np
import argparse
import re
import random
import time
datasets = load_dataset("trec", split="train")
#print(medical_questions)
filename = f"logging/teacher_trec_{random.randint(0, 100000)}.log"
logf = open(filename, "a")
#save_results(p, output_file=f)
#f.close()
public_sentence = datasets["text"][:200]
#hypothesis = datasets["hypothesis"][:400]
public_label = datasets["coarse_label"][:200]
with open("data_public/qqp.txt", "r") as f:
public_sentence_ood = [line.strip() for line in f.readlines()]
#public_sentence_ood = np.loadtxt("data_public/qqp.txt")
public_sentence_ood = public_sentence_ood[:200]
example_sentence = datasets["text"][200:]
example_label = datasets["coarse_label"][200:]
def check_duplicate(arr_a, arr_b):
for e in arr_a:
if e in arr_b:
return True
return False
#example_label = datasets["label"][400:500]
def extract(response):
description_regex = re.compile(r'<answer>(.+?)</answer>', re.DOTALL)
match = description_regex.search(response)
if match:
description_content = match.group(1)
return description_content.strip()
else:
return " "
selected_prompts = np.random.choice(len(datasets)-500, 1600, replace = False)
for j in range(400):
index = selected_prompts[j*4:(j+1)*4]
predictions = np.zeros(200)
for i in range(200):
#index = selected_prompts[j*4:(j+1)*4]
responsed = False
while not responsed:
try:
response = client.completion(prompt=f"{anthropic.HUMAN_PROMPT} Classify the questions based on whether their answer type is a 0 (Abbreviation), 1 (Entity), 2 (Description), 3 (Human), 4 (Location), or 5 (Number). Several examples are provided to help you with the task. Please put your answer in the <answer> tag. <sentence>{example_sentence[index[0]]}</sentence>\n<answer>{example_label[index[0]]}</answer>\n\n<sentence>{example_sentence[index[1]]}</sentence>\n<answer>{example_label[index[1]]}</answer>\n\n<sentence>{example_sentence[index[2]]}</sentence>\n<answer>{example_label[index[2]]}</answer>\n\n<sentence>{example_sentence[index[3]]}</sentence>\n<answer>{example_label[index[3]]}</answer>\n\n<sentence>{public_sentence[i]}</sentence>\n{anthropic.AI_PROMPT}\n", model="claude-v1", max_tokens_to_sample=10, temperature = 0)
responsed = True
except:
time.sleep(5)
print(i, extract(response["completion"]))
try:
predictions[i] = int(extract(response["completion"]))
except:
#print(i, response["completion"])
predictions[i] = -2
accuracy = np.mean(predictions == public_label)
predictions = predictions.astype(int).tolist()
print("validation accuracy is ", file=logf, flush=True)
print(accuracy, file=logf, flush=True)
print("labels for the iid public set", file=logf, flush=True)
print(predictions, file=logf, flush=True)
print("labels for the iid public set", flush=True)
print(predictions, flush=True)
predictions = [0] * 200
for i in range(200):
#index = selected_prompts[j*4:(j+1)*4]
responsed = False
while not responsed:
try:
response = client.completion(prompt=f"{anthropic.HUMAN_PROMPT} Classify the questions based on whether their answer type is a 0 (Abbreviation), 1 (Entity), 2 (Description), 3 (Human), 4 (Location), or 5 (Number). Several examples are provided to help you with the task. Please put your answer in the <answer> tag. <sentence>{example_sentence[index[0]]}</sentence>\n<answer>{example_label[index[0]]}</answer>\n\n<sentence>{example_sentence[index[1]]}</sentence>\n<answer>{example_label[index[1]]}</answer>\n\n<sentence>{example_sentence[index[2]]}</sentence>\n<answer>{example_label[index[2]]}</answer>\n\n<sentence>{example_sentence[index[3]]}</sentence>\n<answer>{example_label[index[3]]}</answer>\n\n<sentence>{public_sentence_ood[i]}</sentence>\n{anthropic.AI_PROMPT}\n", model="claude-v1", max_tokens_to_sample=10, temperature = 0)
responsed = True
except:
time.sleep(5)
print(i, extract(response["completion"]))
try:
predictions[i] = int(extract(response["completion"]))
except:
#print(i, response["completion"])
predictions[i] = -2
#accuracy[j] = np.mean(predictions == label)
print("labels for the ood public set", file=logf, flush=True)
print(predictions, file=logf, flush=True)
print("labels for the ood public set", flush=True)
print(predictions, flush=True)
| [] |
2024-01-10 | cleverhans-lab/PrivatePrompts | PromptPATE~prompts_anthropic~student.py | import anthropic
#import dataset
from datasets import load_dataset
import numpy as np
import argparse
import re
import random
import time
parser = argparse.ArgumentParser()
parser.add_argument('--public_index', dest='public_index', action='store', required=True)
parser.add_argument('--public_label', dest='public_label', action='store', required=True)
args = parser.parse_args()
args = vars(args)
test_datasets = load_dataset("trec", split="test")
#print(medical_questions)
public_datasets = "trec"
if "qqp" in args["public_index"]:
public_datasets = "qqp"
filename = f"logging/student_trec_{public_datasets}_{random.randint(0, 100000)}.log"
logf = open(filename, "a")
#save_results(p, output_file=f)
#f.close()
test_sentence = test_datasets["text"][:300]
test_labels = test_datasets["coarse_label"][:300]
valid_sentence = None
valid_labels = np.loadtxt(args["public_label"], dtype=int)
valid_index = np.loadtxt(args["public_index"], dtype=int)
if "qqp" in args["public_index"]:
with open("data_public/qqp.txt", "r") as f:
valid_sentence = [line.strip() for line in f.readlines()]
else:
datasets = load_dataset("trec", split="train")
valid_sentence = datasets["text"]
valid_sentence = valid_sentence[:200]
valid_sentence = [valid_sentence[i] for i in valid_index]
#example_label = datasets["label"][400:500]
def extract(response):
description_regex = re.compile(r'<answer>(.+?)</answer>', re.DOTALL)
match = description_regex.search(response)
if match:
description_content = match.group(1)
return description_content.strip()
else:
return " "
best_prompt_index = 0
best_validation = 0
for j in range(50):
index = np.random.choice(len(valid_sentence), 4, replace=False)
predictions = np.zeros(len(valid_sentence))
for i in range(len(valid_sentence)):
#index = selected_prompts[j*4:(j+1)*4]
responsed = False
while not responsed:
try:
response = client.completion(prompt=f"{anthropic.HUMAN_PROMPT} Classify the questions based on whether their answer type is a 0 (Abbreviation), 1 (Entity), 2 (Description), 3 (Human), 4 (Location), or 5 (Number). Several examples are provided to help you with the task. Please put your answer in the <answer> tag. <sentence>{valid_sentence[index[0]]}</sentence>\n<answer>{valid_labels[index[0]]}</answer>\n\n<sentence>{valid_sentence[index[1]]}</sentence>\n<answer>{valid_labels[index[1]]}</answer>\n\n<sentence>{valid_sentence[index[2]]}</sentence>\n<answer>{valid_labels[index[2]]}</answer>\n\n<sentence>{valid_sentence[index[3]]}</sentence>\n<answer>{valid_labels[index[3]]}</answer>\n\n<sentence>{valid_sentence[i]}</sentence>\n{anthropic.AI_PROMPT}<answer>", model="claude-v1", max_tokens_to_sample=1, temperature = 0)
responsed = True
except:
time.sleep(5)
print(i, response["completion"])
try:
predictions[i] = int(response["completion"])
except:
#print(i, response["completion"])
predictions[i] = -2
accuracy = np.mean(predictions == valid_labels)
if accuracy > best_validation:
best_validation = accuracy
best_prompt_index = index
print("validation accuracy is " + str(accuracy), file=logf, flush=True)
print("best validation accuracy is " + str(best_validation), file=logf, flush=True)
predictions = np.zeros(len(test_sentence))
for i in range(len(test_sentence)):
# index = selected_prompts[j*4:(j+1)*4]
responsed = False
while not responsed:
try:
response = client.completion(
prompt=f"{anthropic.HUMAN_PROMPT} Classify the questions based on whether their answer type is a 0 (Abbreviation), 1 (Entity), 2 (Description), 3 (Human), 4 (Location), or 5 (Number). Several examples are provided to help you with the task. Please put your answer in the <answer> tag. <sentence>{valid_sentence[index[0]]}</sentence>\n<answer>{valid_labels[index[0]]}</answer>\n\n<sentence>{valid_sentence[index[1]]}</sentence>\n<answer>{valid_labels[index[1]]}</answer>\n\n<sentence>{valid_sentence[index[2]]}</sentence>\n<answer>{valid_labels[index[2]]}</answer>\n\n<sentence>{valid_sentence[index[3]]}</sentence>\n<answer>{valid_labels[index[3]]}</answer>\n\n<sentence>{test_sentence[i]}</sentence>\n{anthropic.AI_PROMPT}<answer>",
model="claude-v1", max_tokens_to_sample=1, temperature=0)
responsed = True
except:
time.sleep(5)
print(i, response["completion"])
try:
predictions[i] = int(response["completion"])
except:
# print(i, response["completion"])
predictions[i] = -2
accuracy = np.mean(predictions == test_labels)
print("test accuracy is " + str(accuracy), file=logf, flush=True)
| [
"0"
] |
2024-01-10 | lordaouy/code-explaination-openai | src~explain_python_code.py | # Usage:
# python ./src/explain_python_code.py -i ../gpt-discord-bot/src/ -o ./output/explain_python_code/text_davinci_003/ -m text-davinci-003
# python ./src/explain_python_code.py -i ../gpt-discord-bot/src/ -o ./output/explain_python_code/code_davinci_002/ -m code-davinci-002
import os
import argparse
import sys
import openai
from dotenv import load_dotenv
from utils import prompt_api, format_output
# Set up Azure OpenAI
load_dotenv()
openai.api_type = "azure"
openai.api_base = "https://tutorial-openai-01-2023.openai.azure.com/"
openai.api_version = "2022-12-01"
openai.api_key = os.getenv("OPENAI_API_KEY")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_path", required=True, help="Path to a folder")
parser.add_argument("-o", "--output_path", required=True, help="Path to write the result to")
parser.add_argument("-m", "--model", required=True, help="Name of the model to be used")
args = vars(parser.parse_args())
INPUT_PATH = args['input_path']
OUTPUT_PATH = args['output_path']
MODEL =args['model']
# check for valid folder
if os.path.exists(INPUT_PATH):
print('input path: ', INPUT_PATH)
else:
print(INPUT_PATH, ' invalid.')
sys.exit()
# create OUTPUT_PATH if not existed
if not os.path.exists(OUTPUT_PATH):
try:
os.mkdir(OUTPUT_PATH)
except OSError:
print ("Creation of the directory %s failed" % OUTPUT_PATH)
else:
print ("Successfully created the directory %s " % OUTPUT_PATH)
# create prompt
prompt_postfix = """
###
Here's what the above code is doing:
1.
"""
# get list of files
file_list = os.listdir(INPUT_PATH)
for fn in file_list:
# read from file
fname = os.path.join(INPUT_PATH, fn); print(fname)
f = open(fname, "r")
code = f.read()
# build input
prompt = code + prompt_postfix
# Configure hyper-parameters
engine=MODEL
prompt=prompt
temperature=0 # [0, 1]
max_tokens=500
top_p=1
frequency_penalty=0 # [0, 2]
presence_penalty=0 # [0, 2]
best_of=1
stop=["###"]
# make a request to api
response = prompt_api(engine, prompt, temperature, max_tokens, top_p, frequency_penalty, presence_penalty, best_of, stop)
# format output
output = format_output(prompt_postfix, response)
# write output to file
fname_out = os.path.join(OUTPUT_PATH, fn + '.output'); print(fname_out)
output_file = open(fname_out, "w")
output_file.write(output)
output_file.close() | [
" \n ###\n Here's what the above code is doing:\n 1.\n ",
"PLACEHOLDER \n ###\n Here's what the above code is doing:\n 1.\n "
] |
2024-01-10 | timoderbeste/gpt-sh | shell_actions.py | import os
from pprint import pprint
from openai_client import get_gpt_response
from prompt_builder import PromptBuilder
from utils import get_tmp_env_var_name, typer_writer
prompt_builder = PromptBuilder()
ACTIONS = [
"SHOW_ENV_VARS", "SET_ENV_VAR", "DELETE_ENV_VAR",
"LOAD_FILE", "SAVE_FILE",
]
def handle_shell_action(inp, env_var2val, latest_response) -> bool:
inp = inp.replace("DO: ", "")
prompt = prompt_builder.do_prompt(inp, actions=ACTIONS)
response = get_gpt_response(
prompt, temperature=1, top_p=1, caching=False, chat=None)
if not response.startswith("ACTION: "):
print(response)
typer_writer(
"The response is not a valid action. This is a bug from OpenAI.")
return False
response = response.replace("ACTION: ", "")
if "," in response or response not in ACTIONS:
typer_writer(
"Your input leads to multiple possible actions. Please be more specific.")
typer_writer(f"Available actions: {ACTIONS}",)
return False
action_name = response.replace("ACTION: ", "")
if action_name == "LOAD_FILE":
return handle_load_file(inp, env_var2val)
elif action_name == "SAVE_FILE":
return handle_save_file(inp, env_var2val)
elif action_name == "SHOW_ENV_VARS":
return handle_show_env_vars(inp, env_var2val)
elif action_name == "SET_ENV_VAR":
return handle_set_env_var(inp, env_var2val, latest_response)
elif action_name == "DELETE_ENV_VAR":
return handle_delete_env_var(inp, env_var2val)
else:
print(f"ACTION: {action_name} is not implemented yet.")
return False
def handle_set_env_var(inp, env_var2val, latest_response) -> bool:
get_var_name_prompt = prompt_builder.set_env_var_get_var_name_prompt(inp)
get_var_name_response = get_gpt_response(
get_var_name_prompt, temperature=1, top_p=1, caching=False, chat=None)
get_var_name_response = get_var_name_response.replace("Output: ", "")
if not get_var_name_response.startswith("VAR_NAME: "):
typer_writer(get_var_name_response)
typer_writer(
"Cannot identify variable name. This is a bug from OpenAI.")
return False
to_var_name = get_var_name_response.replace("VAR_NAME: ", "").strip()
get_content_prompt = prompt_builder.set_env_var_get_content_prompt(inp)
get_content_response = get_gpt_response(
get_content_prompt, temperature=1, top_p=1, caching=False, chat=None)
get_content_response = get_content_response.replace("Output: ", "")
if "LAST_RESPONSE" in get_content_response:
env_var2val[to_var_name] = latest_response
elif "VAR_NAME: " in get_content_response:
from_var_name = get_content_response.replace(
"VAR_NAME: ", "").strip()
if from_var_name not in env_var2val:
typer_writer(
f"Variable {from_var_name} is not defined. Please define it first.")
return False
env_var2val[to_var_name] = env_var2val[from_var_name]
elif "VALUE: " in get_content_prompt:
env_var2val[to_var_name] = get_content_response.replace(
"VALUE: ", "").strip()
else:
typer_writer(
"Cannot identify the value to be set. This is a bug from OpenAI.")
return False
typer_writer(f"Setting the variable with name {to_var_name}")
return str(env_var2val[to_var_name])
def handle_delete_env_var(inp, env_var2val) -> bool:
prompt = prompt_builder.delete_env_var_prompt(inp)
response = get_gpt_response(
prompt, temperature=1, top_p=1, caching=False, chat=None)
if not response.startswith("VAR_NAMES:"):
typer_writer(response)
typer_writer(
"The response is not a valid action. This is a bug from OpenAI.")
return False
response = response.replace("VAR_NAMES:", "").strip()
var_names = response.split(",")
if len(var_names) == 1 and len(var_names[0]) == 0:
var_names = list(env_var2val.keys())
for var_name in var_names:
var_name = var_name.strip()
if var_name not in env_var2val:
typer_writer(f"Variable {var_name} is not defined. Skipping...")
continue
del env_var2val[var_name]
typer_writer(f"Variable {var_name} deleted.")
return True
def handle_load_file(inp, env_var2val) -> bool:
prompt = prompt_builder.load_file_prompt(inp)
response = get_gpt_response(
prompt, temperature=1, top_p=1, caching=False, chat=None)
if not response.startswith("FILE_PATHS: "):
typer_writer(response)
typer_writer(
"The response is not a valid action. This is a bug from OpenAI.")
return False
response = response.replace("FILE_PATHS: ", "")
file_paths = response.split(",")
for file_path in file_paths:
file_path = file_path.strip()
new_env_var_name = get_tmp_env_var_name(
env_var2val, "FILE_CONTENT_VAR_")
try:
with open(file_path, "r") as fp:
env_var2val[new_env_var_name] = fp.read()
except FileNotFoundError:
typer_writer(f"File {file_path} not found.")
return False
typer_writer(f"{file_path} loaded.")
return True
def handle_save_file(inp, env_var2val) -> bool:
prompt = prompt_builder.save_file_prompt(inp)
response = get_gpt_response(
prompt, temperature=1, top_p=1, caching=False, chat=None)
if not "FILE_PATH: " in response and "VAR_NAME: " in response:
typer_writer(response)
typer_writer(
"The response is not a valid action. This is a bug from OpenAI.")
return False
file_path, var_name = response.split(",")
file_path = file_path.replace("FILE_PATH: ", "").strip()
var_name = var_name.replace("VAR_NAME: ", "").strip()
typer_writer(f"Saving {var_name} to {file_path}.")
try:
with open(file_path, "w+") as fp:
fp.write(env_var2val[var_name])
return True
except FileNotFoundError:
typer_writer(f"Path {file_path} not found.")
typer_writer(f"Formatted prompt: {prompt}")
typer_writer(f"Untouched response: {response}")
return False
def handle_show_env_vars(inp, env_var2val) -> bool:
if len(env_var2val) == 0:
typer_writer("No environment variables loaded.")
return True
else:
prompt = prompt_builder.show_env_vars_prompt(inp)
response = get_gpt_response(
prompt, temperature=1, top_p=1, caching=False, chat=None)
if not response.startswith("ENV_VARS:"):
typer_writer(response)
typer_writer(
"The response is not a valid action. This is a bug from OpenAI.")
return False
env_vars = response.replace("ENV_VARS:", "").split(",")
show_full = True
if "" in env_vars:
env_vars.remove("")
if len(env_vars) == 0:
show_full = False
env_vars = env_var2val.keys()
if len(env_vars) == 0:
typer_writer("No environment variables loaded.")
return True
for env_var in env_vars:
val = env_var2val[env_var.strip()]
if show_full:
typer_writer(f"{env_var} = {val}")
else:
typer_writer(f"{env_var} = {val[:50] if val else None}" +
("..." if val and len(val) > 50 else ""))
return True
| [] |
2024-01-10 | timoderbeste/gpt-sh | think_actions.py | import os
from openai_client import get_gpt_response
from prompt_builder import PromptBuilder
from utils import typer_writer
prompt_builder = PromptBuilder()
def handle_think_action(inp, env_var2val, temperature):
inp = inp.replace("THINK: ", "")
for var in env_var2val:
inp = inp.replace(var, (env_var2val[var] if env_var2val[var] else ""))
prompt = inp
# print("Your THINK prompt is: ", prompt)
response = get_gpt_response(
prompt, temperature=temperature, top_p=1, caching=False, chat=None)
typer_writer(response)
return response
| [] |
2024-01-10 | egpz/buibot | response.py | import os
import openai
import sys
import requests
openai.api_key = "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"
keywords = {
'None':{'def':None,'yt':'https://www.youtube.com/watch?v=4c7tXkqPk54'},
'break':{'def':None,'yt':'https://www.youtube.com/watch?v=BTaPo33TBIM'},
'continue':{'def':None,'yt':'https://www.youtube.com/watch?v=BTaPo33TBIM'},
'pass':{'def':None,'yt':'https://www.youtube.com/watch?v=iYegtY08h0Y'},
'def':{'def':None,'yt':'https://www.youtube.com/watch?v=5oAya5NaTzU'},
'class':{'def':None,'yt':'https://www.youtube.com/watch?v=rJzjDszODTI'},
'lambda':{'def':None,'yt':'https://www.youtube.com/watch?v=BcbVe1r2CYc'},
'return':{'def':None,'yt':'https://www.youtube.com/watch?v=IbhQRbOVmL8'},
'yield':{'def':None,'yt':'https://www.youtube.com/watch?v=akqjaqUzdnA'},
'split':{'def':None,'yt':'https://www.youtube.com/watch?v=-yzfxeMBe1s'},
'strip':{'def':None,'yt':'https://www.youtube.com/watch?v=70juN7N13H0'},
'try':{'def':None,'yt':'https://www.youtube.com/watch?v=MImAiZIzzd4'},
'except':{'def':None,'yt':'https://www.youtube.com/watch?v=MImAiZIzzd4'},
'map':{'def':None,'yt':'https://www.youtube.com/watch?v=2qKQGqpRsks'},
'filter':{'def':None,'yt':'https://www.youtube.com/watch?v=2qKQGqpRsks'},
'generator':{'def':None,'yt':'https://www.youtube.com/watch?v=mziIj4M_uwk'},
'sys':{'def':None,'yt':'https://www.youtube.com/watch?v=rLG7Tz6db0w'},
'stdin':{'def':None,'yt':'https://www.youtube.com/watch?v=rLG7Tz6db0w'},
'stdout':{'def':None,'yt':'https://www.youtube.com/watch?v=rLG7Tz6db0w'},
'stderr':{'def':None,'yt':'https://www.youtube.com/watch?v=rLG7Tz6db0w'},
'iterator':{'def':None,'yt':'https://www.youtube.com/watch?v=Dyu08G2l71c'},
'range':{'def':None,'yt':'https://www.youtube.com/watch?v=T6_pYAWkzzA'},
'tuple':{'def':None,'yt':'https://www.youtube.com/watch?v=DehzAA0ZIhA'},
'set':{'def':None,'yt':'https://www.youtube.com/watch?v=t9j8lCUGZXo'},
'dictionary':{'def':None,'yt':'https://www.youtube.com/watch?v=daefaLgNkw0'}
}
def chat(prompt):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
)
message = response.choices[0].text.strip()
return message
def test(stream):
code = [line.rstrip().strip() for line in stream if line]
#codeString = ' '.join(code)
#print(chat(f'explain this code line by line: {codeString}'))
#explanationBrief = chat(f'explain what this code does: {codeString}')
explanation = chat(f'if each element in this list is a line of code, explain the code line by line: {code}')
print(explanation)
print()
topics = chat(f'list some key aspects of this code that a user may not understand: {code}')
for key in keywords:
if key in topics:
# definition + video
keywords[key]['def'] = chat(f'what is {key} in python')
print(f"{key}:\n{keywords[key]['def']}\n")
print(f"Video: {keywords[key]['yt']}\n")
test(sys.stdin)
print() | [] |
2024-01-10 | MrSyee/pg-is-all-you-need | segment_tree.py | # -*- coding: utf-8 -*-
"""Segment tree for Proirtized Replay Buffer."""
import operator
from typing import Callable
class SegmentTree:
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
Attributes:
capacity (int)
tree (list)
operation (function)
"""
def __init__(self, capacity: int, operation: Callable, init_value: float):
"""Initialization.
Args:
capacity (int)
operation (function)
init_value (float)
"""
assert (
capacity > 0 and capacity & (capacity - 1) == 0
), "capacity must be positive and a power of 2."
self.capacity = capacity
self.tree = [init_value for _ in range(2 * capacity)]
self.operation = operation
def _operate_helper(
self, start: int, end: int, node: int, node_start: int, node_end: int
) -> float:
"""Returns result of operation in segment."""
if start == node_start and end == node_end:
return self.tree[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._operate_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._operate_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self.operation(
self._operate_helper(start, mid, 2 * node, node_start, mid),
self._operate_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end),
)
def operate(self, start: int = 0, end: int = 0) -> float:
"""Returns result of applying `self.operation`."""
if end <= 0:
end += self.capacity
end -= 1
return self._operate_helper(start, end, 1, 0, self.capacity - 1)
def __setitem__(self, idx: int, val: float):
"""Set value in tree."""
idx += self.capacity
self.tree[idx] = val
idx //= 2
while idx >= 1:
self.tree[idx] = self.operation(self.tree[2 * idx], self.tree[2 * idx + 1])
idx //= 2
def __getitem__(self, idx: int) -> float:
"""Get real value in leaf node of tree."""
assert 0 <= idx < self.capacity
return self.tree[self.capacity + idx]
class SumSegmentTree(SegmentTree):
""" Create SumSegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(SumSegmentTree, self).__init__(
capacity=capacity, operation=operator.add, init_value=0.0
)
def sum(self, start: int = 0, end: int = 0) -> float:
"""Returns arr[start] + ... + arr[end]."""
return super(SumSegmentTree, self).operate(start, end)
def retrieve(self, upperbound: float) -> int:
"""Find the highest index `i` about upper bound in the tree"""
# TODO: Check assert case and fix bug
assert 0 <= upperbound <= self.sum() + 1e-5, "upperbound: {}".format(upperbound)
idx = 1
while idx < self.capacity: # while non-leaf
left = 2 * idx
right = left + 1
if self.tree[left] > upperbound:
idx = 2 * idx
else:
upperbound -= self.tree[left]
idx = right
return idx - self.capacity
class MinSegmentTree(SegmentTree):
""" Create SegmentTree.
Taken from OpenAI baselines github repository:
https://github.com/openai/baselines/blob/master/baselines/common/segment_tree.py
"""
def __init__(self, capacity: int):
"""Initialization.
Args:
capacity (int)
"""
super(MinSegmentTree, self).__init__(
capacity=capacity, operation=min, init_value=float("inf")
)
def min(self, start: int = 0, end: int = 0) -> float:
"""Returns min(arr[start], ..., arr[end])."""
return super(MinSegmentTree, self).operate(start, end) | [] |
2024-01-10 | ardaakman/blueberryai | chat_agents.py | import openai
class Agent():
def __init__(self, task, recipient, context_manager):
self.task = task
self.recipient = recipient
# Setup context manager's default value
self.context_manager = context_manager
# Setup chat engine
def generate_agent_description(self, task, recipient):
pass
def generate(self):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages,
)
value = completion.choices[0].message['content']
return value
except:
return "Sorry, I don't understand. Can you repeat that?"
def __call__(self, customer_service_response):
pass
class ContextAgent(Agent):
def __init__(self, task, recipient, context_manager):
super().__init__(task, recipient, context_manager)
self.model = "gpt-3.5-turbo"
self.generate_agent_description()
self.agent_description = {"role": "system", "content": self.agent_description_prompt}
# Setup loggers to keep track of conversation and history
self.messages = [self.agent_description]
self.dialogue_history = []
def generate_agent_description(self):
self.agent_description_prompt = f"""
You're imitating a human that is trying to {self.task}.
You're on a call with {self.recipient} customer service.
Sound like a human and use your context to return the appropriate response.
You could use filler words like 'um' and 'uh' to sound more human.
"""
def __call__(self, customer_service_response):
self.messages.append({"role": "user", "content": self.engineer_prompt(customer_service_response)})
completion = openai.ChatCompletion.create(
model=self.model,
messages=self.messages
)
response = completion.choices[0].message
self.messages.append(response)
return response.content
def engineer_prompt(self, customer_service_response):
"""Generates the prompt for the engineer to respond to.
"""
context = '\n'.join(self.context_manager.context)
prompt = f"""
You're imitating a human that is trying to {self.task}.
You're on a call with {self.recipient} customer service.
Sound like a human and use your context to return the appropriate response.
You could use filler words like 'um' and 'uh' to sound more human.
Here's information about the human you're imitating, you can use this to help you respond:
{context}
Here are some tips when responding to the customer service agent:
- Your response should be to the point and succint.
- Long answers are penalized.
- Give personal information only when asked.
- Represent numbers as digits with spaces in between. Like 5032 should be 5 0 3 2.
- If the agent asks for you to spell something out, you should respond with letters seperated by spaces. Like A P P L E.
Here's an example of good interactions:
Customer support: What is your name?
Agent response: My name is Arvid Kjelberg.
Customer support: What is your date of birth?
Agent response: My date of birth is May 3rd, 1998.
Customer Service Agent:
{customer_service_response}
Your Response:
"""
return prompt
class EfficientContextAgent(Agent):
def __init__(self, task, recipient, context_manager):
super().__init__(task, recipient, context_manager)
self.model = "gpt-3.5-turbo"
self.generate_agent_description()
self.agent_description = {"role": "system", "content": self.agent_description_prompt}
# Setup loggers to keep track of conversation and history
self.messages = [self.agent_description]
self.dialogue_history = []
def generate_agent_description(self):
self.agent_description_prompt = f"""
You're imitating a human that is trying to {self.task}.
You're on a call with {self.recipient} customer service.
Sound like a human and use your context to return the appropriate response.
You could use filler words like 'um' and 'uh' to sound more human.
"""
def __call__(self, customer_service_response):
self.dialogue_history.append({"role": "user", "content": f"Customer Service Agent: {customer_service_response}"})
messages = [self.agent_description] + self.dialogue_history[:-1] + [{"role": "user", "content": self.engineer_prompt(customer_service_response)}]
completion = openai.ChatCompletion.create(
model=self.model,
messages=messages
)
response = completion.choices[0].message
self.dialogue_history.append(response)
return response.content
def engineer_prompt(self, customer_service_response):
"""Generates the prompt for the engineer to respond to.
"""
context = '\n'.join(self.context_manager.context)
prompt = f"""
You're imitating a human that is trying to {self.task}.
You're on a call with {self.recipient} customer service.
Sound like a human and use your context to return the appropriate response.
You could use filler words like 'um' and 'uh' to sound more human.
Here's information about the human you're imitating, you can use this to help you respond:
<Start: Information about human>
{context}
<End: Information about human>
Here are some tips when responding to the customer service agent:
- Your response should be to the point and succint.
- Long answers are penalized.
- Give personal information only when asked.
- Represent numbers as digits with spaces in between. Like 5032 should be 5 0 3 2. For example:
- Instead of writing "64 Montgomery Drive, Pittsford NY 15289", write "6 4 Montgomery Drive, Pittsford NY 1 5 2 8 9"
- Instead of writing "my phone number is 585-321-5352", write "my phone number is 5 8 5 3 2 1 5 3 5 2."
- If the agent asks for you to spell something out, you should respond with letters seperated by spaces. Like A P P L E. Examples include:
- Customer support: Can you spell your name for me?
- Agent response: A R V I D and then K J E L B E R G.
- Customer support: Can you spell your email for me?
- Agent response: A R V I D dot K J E L B E R G at G M A I L dot com.
- If an agent asks you to repeat something, it is to repeat the most recent information. Keep it brief.
Here's an example of good interactions:
Customer support: What could we help you with today?
Agent response: Hi there! I'd like to get a dinner reservation.
Customer support: What is your name?
Agent response: My name is Arvid Kjelberg.
Customer support: How do you spell that?
Agent response: A R V I D and then K J E L B E R G.
Customer support: What is your date of birth?
Agent response: My date of birth is May 3rd, 1998.
Customer support: What is your home address?
Agent response: six four Montgomery Drive, Pittsford NY one five two eight nine.
Now let's transition to your current conversation with the customer service agent. Respond briefly. It shouldn't be more than 30 words.
Customer Service Agent:
{customer_service_response}
Your Response:
"""
return prompt
class SystemBasedAgent(Agent):
def __init__(self, task, recipient, context_manager):
super().__init__(task, recipient, context_manager)
self.model = "gpt-3.5-turbo"
self.generate_agent_description()
self.agent_description = {"role": "system", "content": self.agent_description_prompt}
# Setup loggers to keep track of conversation and history
self.messages = [self.agent_description]
self.dialogue_history = []
def generate_agent_description(self):
context = '\n'.join(self.context_manager.context)
self.agent_description_prompt = f"""
You're imitating a human that is trying to {self.task}.
You're on a call with {self.recipient} customer service.
Sound like a human and use your context to return the appropriate response.
You could use filler words like 'um' and 'uh' to sound more human.
When returning responses, here are some tips:
- Sound like a human and use your context to return the appropriate response.
- Keep responses short, simple, and informal.
- Keep in mind that this is a conversation
- Represent numbers as digits with spaces in between. Like 5032 should be 5 0 3 2.
- If the agent asks for you to spell something out, you should respond with letters seperated by spaces. Like A P P L E.
"""
def __call__(self, customer_service_response, verbose=False):
self.dialogue_history.append({"role": "user", "content": f"{customer_service_response}"})
messages = self.dialogue_history[:-1] + [self.agent_description] + [{"role": "user", "content": self.engineer_prompt(customer_service_response)}]
completion = openai.ChatCompletion.create(
model=self.model,
messages=messages
)
if verbose:
print(messages)
response = dict(completion.choices[0].message)
self.dialogue_history.append(response)
return response["content"]
def engineer_prompt(self, customer_service_response):
"""Generates the prompt for the engineer to respond to.
"""
context = '\n'.join(self.context_manager.context)
prompt = f"""
Here's information about the human you're imitating, you can use this to help you respond:
{context}
Come up with the best response to the customer service agent below.
Customer Service Agent:
{customer_service_response}
Your Response:
"""
return prompt
class EfficientAgent(Agent):
def __init__(self, task, recipient, context_manager):
super().__init__(task, recipient, context_manager)
self.model = "gpt-3.5-turbo"
self.generate_agent_description()
self.agent_description = {"role": "system", "content": self.agent_description_prompt}
# Setup loggers to keep track of conversation and history
self.messages = [self.agent_description]
self.dialogue_history = []
def generate_agent_description(self):
self.agent_description_prompt = f"""
You're imitating a human that is trying to {self.task} with {self.recipient}.
You're on a call with customer service.
Sound like a human and use your context to return the appropriate response. Keep responses short, simple, and informal.
You could use filler words like 'um' and 'uh' to sound more human. To end the call, just return 'bye'.
Your response should be to the point and succint. Don't provide any personal information when not asked.
Represent numbers as digits with spaces in between. Like 5032 should be five zero three two.
"""
def __call__(self, customer_service_response):
self.dialogue_history.append({"role": "user", "content": f"Customer Service Agent: {customer_service_response}"})
self.messages.append({"role": "user", "content": self.engineer_prompt(customer_service_response)})
messages = self.messages[:1] + self.dialogue_history[:-1] + self.messages[-1:]
completion = openai.ChatCompletion.create(
model=self.model,
messages=messages
)
response = completion.choices[0].message
self.messages.append(response)
self.dialogue_history.append(response)
return response.content
def engineer_prompt(self, customer_service_response):
"""Generates the prompt for the engineer to respond to.
"""
context = '\n'.join(self.context_manager.context)
prompt = f"""
Here's information about the human you're imitating, you can use this to help you respond:
<Start: Information about human>
{context}
<End: Information about human>
Your response should be to the point and succint. Represent numbers as digits with spaces in between. Like 5032 should be 5 0 3 2.
If the customer service agent asks for you to spell something out, say spell out "APPLE", you should respond with letters seperated by spaces. Like A P P L E.
You're imitating a human that is trying to {self.task}. Come up with the best response to the customer service agent below.
Customer Service Agent:
{customer_service_response}
Your Response:
"""
return prompt
class CookBook(Agent):
def __init__(self, task, recipient, context_manager):
super().__init__(task, recipient, context_manager)
self.model = "gpt-3.5-turbo"
self.generate_agent_description()
self.agent_description = {"role": "system", "content": self.agent_description_prompt}
# Setup loggers to keep track of conversation and history
self.messages = [self.agent_description]
self.dialogue_history = []
def generate_agent_description(self):
self.agent_description_prompt = f"""
You're imitating a human that is trying to {self.task} with {self.recipient}.
You're on a call with customer service.
Sound like a human and use your context to return the appropriate response. Keep responses short, simple, and informal.
You could use filler words like 'um' and 'uh' to sound more human. To end the call, just return 'bye'.
Here are some tips when responding to the customer service agent:
- Your response should be to the point and succint.
- Long answers are penalized.
- Give personal information only when asked.
- Represent numbers as digits with spaces in between. Like 5032 should be 5 0 3 2.
- If the agent asks for you to spell something out, you should respond with letters seperated by spaces. Like A P P L E. Examples include:
- Customer support: Can you spell your name for me?
- Agent response: A R V I D and then K J E L B E R G.
- Customer support: Can you spell your email for me?
- Agent response: A R V I D dot K J E L B E R G at G M A I L dot com.
"""
def __call__(self, customer_service_response):
self.dialogue_history.append({"role": "user", "content": f"Customer Service Agent: {customer_service_response}"})
messages = [self.agent_description] + self.dialogue_history[:-1] + [{"role": "user", "content": self.engineer_prompt(customer_service_response)}]
completion = openai.ChatCompletion.create(
model=self.model,
messages=messages
)
response = completion.choices[0].message
self.dialogue_history.append(response)
return response.content
def engineer_prompt(self, customer_service_response):
"""Generates the prompt for the engineer to respond to.
"""
context = '\n'.join(self.context_manager.context)
prompt = f"""
Use the provided information delimited by triple quotes to answer questions from a customer service agent. You're imitating a human that is trying to {self.task}. Your response should be conversationally appropriate and to the point.
\"\"\"
{context}
\"\"\"
Question: {customer_service_response}
"""
return prompt | [
"PLACEHOLDER",
"\nHere's information about the human you're imitating, you can use this to help you respond: \nPLACEHOLDER\n\nCome up with the best response to the customer service agent below. \n\nCustomer Service Agent: \nPLACEHOLDER\n\nYour Response:\n ",
"Customer Service Agent: PLACEHOLDER"
] |
2024-01-10 | ardaakman/blueberryai | twilio-file~helper_funcitons.py | import json
import tempfile
import requests
import boto3
import requests
import os, sys
from dotenv import load_dotenv
from pydub import AudioSegment
import numpy as np
import speech_recognition as sr
import noisereduce as nr
import soundfile as sf
import io
import openai
sys.path.append('../') # Add the parent directory to the system path
from chat import Interaction
from chat_agents import EfficientContextAgent
from agent_helpers import ContextManager
# OLD CODE:
# interaction = Interaction(task="create a new account", context_directory="./")
# interaction.recipient = "People's Gas"
# NEW IMPLEMENTATION:
context_manager = ContextManager()
context_manager.load_from_directory("./")
chat_agent = EfficientContextAgent("create a new account", "People's Gas", context_manager)
load_dotenv()
# access-key= MICPMAWAWF2KI1CRWU0B
# secret-key= DkLNyYz0uP1EJINhAizYIlRLzAgWMZSHzbH11RZY
""" Functions to help out with saving to wasabi/file uploads.
upload_file_to_wasabi --> Upload mp3 file to wasabi
get_url_recording --> Download mp3 file from url
count_files_in_directory --> Count number of files in a directory, to set the name of the new file name.
"""
def save_message(call_id, message, sender):
print('save_message')
url = "http://127.0.0.1:8201/save_message"
data = {"message": message, "sender": sender, "call_id": call_id}
response_val = requests.post(url, data = json.dumps(data))
print(response_val.text)
return response_val.text
def upload_file_to_wasabi(file_path, bucket_name):
s3 = boto3.client('s3',
endpoint_url='https://s3.us-west-1.wasabisys.com', # Use the correct endpoint URL for your Wasabi region
aws_access_key_id='6UQ6BKLP89DNA5G37191', # Replace with your access key
aws_secret_access_key='tpkQAodRS6LfjfC33VTF8GzhorewzhzfWuElr8sI') # Replace with your secret key
file_name = os.path.basename(file_path)
try:
s3.upload_file(file_path, bucket_name, file_name)
print(f"File uploaded to Wasabi successfully!")
except Exception as e:
print("Something went wrong: ", e)
def combine_audios(audio_urls, output_filename):
# Initialize an empty AudioSegment object
output_audio = AudioSegment.empty()
# Iterate over each audio URL and download it to memory
audio_segments = []
for audio_url in audio_urls:
response = requests.get(audio_url)
audio_bytes = io.BytesIO(response.content)
audio_segment = AudioSegment.from_file(audio_bytes)
audio_segments.append(audio_segment)
# Concatenate the audio segments into a single output AudioSegment
for audio_segment in audio_segments:
output_audio += audio_segment
# Export the output AudioSegment to a new audio file locally
output_audio.export(output_filename, format="mp3")
return output_filename
def detect_speech_with_noise_reduction(audio_url):
# Download the audio file from the URL
response = requests.get(audio_url)
with tempfile.NamedTemporaryFile(delete=True) as temp_file:
temp_file.write(response.content)
temp_file.flush()
# Load the downloaded audio file
audio_data, sample_rate = sf.read(temp_file.name)
# Apply noise reduction
# reduced_noise = nr.reduce_noise(y=audio_data, sr=sample_rate)
# Save the reduced noise audio to a temporary file
with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as noise_reduced_file:
sf.write(noise_reduced_file.name, audio_data, sample_rate)
noise_reduced_file.flush()
# Perform speech recognition on the reduced noise audio
recognizer = sr.Recognizer()
with sr.AudioFile(noise_reduced_file.name) as source:
audio = recognizer.record(source)
try:
text = recognizer.recognize_google(audio)
print("Speech detected!")
print("Transcribed text:", text)
return True
except sr.UnknownValueError:
print("No speech detected.")
return False
# def combine_audios(audio_urls):
# combined = AudioSegment.empty()
# for url in audio_urls:
# # Download the audio file from the URL
# response = requests.get(url)
# file_name = "temp_audio.mp3"
# with open(file_name, 'wb') as file:
# file.write(response.content)
# # Load audio file
# audio = AudioSegment.from_mp3(file_name)
# # Append audio file to combined audio
# combined += audio
# # Export combined audio file
# num_files = count_files_in_directory("./outputs")
# combined.export("outputs/output_{}.mp3".format(num_files), format='mp3')
# return "outputs/output_{}.mp3".format(num_files)
def get_url_recording(url):
response = requests.get(url, stream=True)
print("creating a response")
# Ensure the request is successful
if response.status_code == 200:
# Open the file in write-binary mode and write the response content to it
with open('output.mp3', 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
file.write(chunk)
else:
print('Failed to download the file.')
def count_files_in_directory(directory):
return len([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
""" Functions to help out with Humes API calls for speech to text."""
def convert_speech_to_text(recording_locn):
if os.path.isfile(recording_locn):
return convert_speech_to_text_whisper_local(recording_locn)
else:
raise Exception("File does not exist")
def convert_speech_to_text_whisper_local(local_file_path):
# Transcribe the audio using Whisper API
with open(local_file_path, "rb") as file:
transcript = openai.Audio.transcribe("whisper-1", file)
return transcript.text
def convert_speech_to_text_whisper_url(recording_url):
# Download the audio file from the URL
response = requests.get(recording_url)
print(recording_url)
audio_file = response.content
# Save the audio data to a file
with open("temp.wav", "wb") as file:
file.write(audio_file)
# Transcribe the audio using Whisper API
with open("temp.wav", "rb") as file:
transcript = openai.Audio.transcribe("whisper-1", file)
return transcript.text
def convert_speech_to_text_hume(recording_url):
url = "https://api.hume.ai/v0/batch/jobs"
payload = "{\"models\":{\"face\":{\"fps_pred\":3,\"prob_threshold\":0.99,\"identify_faces\":false,\"min_face_size\":60,\"save_faces\":false},\"prosody\":{\"granularity\":\"utterance\",\"identify_speakers\":false,\"window\":{\"length\":4,\"step\":1}},\"language\":{\"granularity\":\"word\",\"identify_speakers\":false},\"ner\":{\"identify_speakers\":false}},\"transcription\":{\"language\":null}," + "\"urls\":[\"" + recording_url + "\"],\"notify\":false}"
headers = {
"accept": "application/json; charset=utf-8",
"content-type": "application/json; charset=utf-8",
"X-Hume-Api-Key": os.getenv("HUME_API_KEY"),
}
response = requests.post(url, data=payload, headers=headers)
return response
# Function to process the recording and generate a response
def process_recording(path, call_id):
# Convert recording to text using speech-to-text API or library
# Here, let's assume we have a function called `convert_speech_to_text` for this purpose
recipient_message = convert_speech_to_text(path)
generated_response = save_message(call_id, recipient_message)
print("Done!")
print("\t Generated response: ", generated_response)
# Save the generated response as an audio url
audio_url = save_generated_response_as_audio(generated_response)
return audio_url, generated_response
# Function to save the generated response as an audio file
def save_generated_response_as_audio(generated_response):
conversational_style_id = "6434632c9f50eacb088edafd"
marcus_speaker_id = "643463179f50eacb088edaec"
url = "https://api.fliki.ai/v1/generate/text-to-speech"
headers = {
"Authorization": f"Bearer {os.getenv('FLIKI_API_KEY')}",
"Content-Type": "application/json"
}
data = {
"content": generated_response,
"voiceId": marcus_speaker_id,
"voiceStyleId": conversational_style_id
}
response = requests.post(url, headers=headers, json=data)
# Check the response status code
if response.status_code == 200:
# Process the response
audio_data = response.content
# Do something with the audio data
response_dict = json.loads(audio_data)
# Now you can access the dictionary elements
success = response_dict["success"]
audio_url = response_dict["data"]["audio"]
duration = response_dict["data"]["duration"]
return audio_url
else:
# Handle the error
raise Exception(f"Request failed with status code {response.status_code}: {response.text}") | [] |
2024-01-10 | ardaakman/blueberryai | twilio-file~side_twilio.py | from twilio.rest import Client
from twilio.twiml.voice_response import VoiceResponse
from flask import Flask, request
import requests
import time
import json
import openai
import os
from dotenv import load_dotenv
import sys
sys.path.append('../') # Add the parent directory to the system path
from chat import Interaction
interaction = Interaction(task="create a new account", context_directory="../data/ekrem/")
interaction.recipient = "People's Gas"
# Load the variables from the .env file
load_dotenv()
# Initialize Flask app
app = Flask(__name__)
# Set up Twilio client
account_sid = os.getenv('ACCOUNT_SID')
auth_token = os.getenv('AUTH_TOKEN')
twilio_phone_number = os.getenv("TWILIO_PHONE_NUMBER")
recipient_phone_number = os.getenv('RECIPIENT_PHONE_NUMBER')
hume_api_key = os.getenv('HUME_API_KEY')
fliki_api_key = os.getenv('FLIKI_API_KEY')
client = Client(account_sid, auth_token)
# Set up OpenAI
openai.api_key = os.getenv('OPENAI_API_KEY')
# ngrok_url = request.url_root
ngrok_url = "https://3dce-2607-f140-6000-11-7042-9d7-474a-1bff.ngrok-free.app"
# Function to handle incoming call
@app.route('/handle_incoming', methods=['POST'])
def handle_incoming_call():
print("Handling incoming call...")
response = VoiceResponse()
response.say("Hello, how can I assist you?")
print("Trying to record...", end="")
response.record(max_length=30, action=f'{ngrok_url}process_recording')
print("Done.")
def convert_speech_to_text(recording_url):
url = "https://api.hume.ai/v0/batch/jobs"
payload = "{\"models\":{\"face\":{\"fps_pred\":3,\"prob_threshold\":0.99,\"identify_faces\":false,\"min_face_size\":60,\"save_faces\":false},\"prosody\":{\"granularity\":\"utterance\",\"identify_speakers\":false,\"window\":{\"length\":4,\"step\":1}},\"language\":{\"granularity\":\"word\",\"identify_speakers\":false},\"ner\":{\"identify_speakers\":false}},\"transcription\":{\"language\":null}," + "\"urls\":[\"" + recording_url + "\"],\"notify\":false}"
headers = {
"accept": "application/json; charset=utf-8",
"content-type": "application/json; charset=utf-8",
"X-Hume-Api-Key": hume_api_key
}
response = requests.post(url, data=payload, headers=headers)
return response.text
# Function to process the recording and generate a response
def process_recording(recording_url):
print("Starting: process_recording...")
# Convert recording to text using speech-to-text API or library
# Here, let's assume we have a function called `convert_speech_to_text` for this purpose
recipient_message = convert_speech_to_text(recording_url)
print("Generating response...", end="")
# Generate a response using OpenAI
generated_response = interaction(recipient_message)
print("Done!")
print("\t Generated response: ", generated_response)
print("Saving response as audio...", end="")
# Save the generated response as an audio url
audio_url = save_generated_response_as_audio(generated_response)
print("Done!")
print("Sending response to recipient...", end="")
# Respond to the recipient with the generated answer
response = VoiceResponse()
response.play(audio_url)
print("Done!")
response.record(max_length=30, action='/process_recording')
return str(response)
# Function to save the generated response as an audio file
def save_generated_response_as_audio(generated_response):
conversational_style_id = "6434632c9f50eacb088edafd"
marcus_speaker_id = "643463179f50eacb088edaec"
url = "https://api.fliki.ai/v1/generate/text-to-speech"
headers = {
"Authorization": f"Bearer {fliki_api_key}",
"Content-Type": "application/json"
}
data = {
"content": generated_response,
"voiceId": marcus_speaker_id,
"voiceStyleId": conversational_style_id
}
response = requests.post(url, headers=headers, json=data)
# Check the response status code
if response.status_code == 200:
# Process the response
audio_data = response.content
# Do something with the audio data
response_dict = json.loads(audio_data)
# Now you can access the dictionary elements
success = response_dict["success"]
audio_url = response_dict["data"]["audio"]
duration = response_dict["data"]["duration"]
return audio_url
else:
# Handle the error
raise Exception(f"Request failed with status code {response.status_code}: {response.text}")
# Twilio webhook to process the recording and generate a response
@app.route('/process_recording', methods=['POST'])
def process_recording_webhook():
print("Processing process_recording_webhook...")
recording_url = request.form['RecordingUrl']
response = process_recording(recording_url)
return response
@app.route('/call', methods=['GET'])
def call():
print("Calling...")
# Create a Twilio call
twiml = VoiceResponse()
twiml.say("Hello, how can I assist you?")
twiml.record(maxLength="30", action="/handle_incoming")
# Create a Twilio call
call = client.calls.create(
twiml=str(twiml),
to=recipient_phone_number,
from_=twilio_phone_number
)
return "Calling..."
# Start the Flask server to listen for incoming requests
if __name__ == '__main__':
app.run()
| [] |
2024-01-10 | ardaakman/blueberryai | API_backup.py | import asyncio
from fastapi import FastAPI, APIRouter, Request, Form, WebSocket, WebSocketDisconnect, BackgroundTasks
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse, RedirectResponse, FileResponse
from fastapi.websockets import WebSocket
from pathlib import Path
from utils import *
import json
import requests
import httpx
import openai
import os
from configs import *
from twilio.rest import Client
from chat import ContextManager
app = FastAPI()
api_router = APIRouter()
BASE_PATH = Path(__file__).resolve().parent
app.mount("/static", StaticFiles(directory="static"), name="static")
TEMPLATES = Jinja2Templates(directory=str(BASE_PATH / "templates"))
USER_ID = 1
openai.api_key = openai_api_key
class Chat:
def __init__(self, context):
self.agent_description = self.generate_agent_description(context)
self.history = [{'role': 'system', 'content': self.agent_description}]
def generate_agent_description(self, context):
prompt = f"""
You're imitating a human that is trying to {context}.
You're on a call with customer service.
Sound like a human and use your context to return the appropriate response. Keep responses short, simple, and informal.
You could use filler words like 'um' and 'uh' to sound more human. To end the call, just return 'bye'. For information you are unsure about, return "/user <question>".
Here is some information about you:
"""
with open('info.txt', 'r') as f:
info = f.read()
prompt += info
print(prompt)
return prompt
def add(self, message, role='user'):
self.history.append({'role': role, 'content': message})
def generate(self):
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.history,
)
value = completion.choices[0].message['content']
print(value)
return value
except:
return "Sorry, I don't understand. Can you repeat that?"
def stream(self, socket):
return True
class Call:
def __init__(self, to, context, call_id):
self.recipient = to
self.context = context
if os.stat("info.txt").st_size == 0:
self.questions = self.generate_questions()
self.chat = Chat(context)
self.call_id = call_id
def generate_questions(self):
try:
prompt = f"""Given the context of {self.context}, what are some possible personal questions,
such as date of birth, account number, etc. that the customer service agent might ask the user?
Phrase questions as key words, such as "Date of Birth". Give multiple questions seperated by a new line."""
prompt = [{'role': 'user', 'content': prompt}]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt,
)
value = completion.choices[0].message['content']
questions = value.split('\n')
for question in questions:
# ask question in input terminal and save question: answer as a new line to info.txt
answer = input(question + '\n')
with open('info.txt', 'a') as f:
f.write(question + ': ' + answer + '\n')
except:
print('error')
return False
def call(self):
client = Client(account_sid, auth_token)
to = self.recipient
to = "9495016532"
call = client.calls.create(
# url='https://handler.twilio.com/twiml/EH9c51bf5611bc2091f8d417b4ff44d104',
url=f'''https://fe8f-2607-f140-400-a011-20c1-f322-4b13-4bc9.ngrok-free.app/convo/{self.call_id}''',
# url=f'''https://fe8f-2607-f140-400-a011-20c1-f322-4b13-4bc9.ngrok-free.app/convo''',
to="+1" + to,
from_="+18777192546"
)
print(call.sid)
async def make_http_request(url: str, data: dict):
async with httpx.AsyncClient() as client:
response = await client.post(url, data=data)
return response
@app.get("/favicon.ico")
async def get_favicon():
return FileResponse("static/favicon.ico")
@app.get("/", response_class=HTMLResponse)
async def init(request: Request):
'''
Load page to set call settings
'''
# get phonebook from db
with get_db_connection() as conn:
# select distict phone numbers and recipient from call_log where user_id is user id
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT DISTINCT phone_number, recipient FROM call_log WHERE user_id = ?", (USER_ID,))
phonebook = cur.fetchall()
return TEMPLATES.TemplateResponse(
"init.html",
{
"request": request,
"page": "init",
"phonebook": phonebook,
}
)
@app.post("/init_call", response_class=HTMLResponse)
async def init(request: Request, number: str = Form(), recipient: str = Form(), context: str = Form()):
'''
Load page to set call settings
'''
# save call to db [TODO]
with get_db_connection() as conn:
cur = conn.cursor()
cur.execute("INSERT INTO call_log (user_id, phone_number, recipient, context) VALUES (?, ?, ?, ?)", (USER_ID, number, recipient, context))
call_id = cur.lastrowid
conn.commit()
# redirect to call page
return RedirectResponse(f"/questions/{call_id}", status_code=303)
@app.get("/questions/{call_id}", response_class=HTMLResponse)
async def questions(request: Request, call_id: str):
'''
Page to view call history
'''
with get_db_connection() as conn:
cur = conn.cursor()
cur.execute("SELECT context FROM call_log WHERE id = ?", (call_id,))
context = cur.fetchone()[0]
questions = ContextManager.generate_questions_from_task(context)
# generate questions
return TEMPLATES.TemplateResponse(
"questions.html",
{
"request": request,
"call_id": call_id,
"page": "questions",
"questions": questions
}
)
@app.post("/questions", response_class=HTMLResponse)
async def questions(request: Request, call_id: str = Form()):
'''
Page to view call history
'''
body = await request.form()
print("printing body")
question_answer_pairs = []
for key in body.keys():
if key != "call_id":
question_answer_pairs.append(f'''{key}: {body[key]}''')
return RedirectResponse(f"/call/{call_id}", status_code=303)
@app.get("/call/{call_id}", response_class=HTMLResponse)
async def call(request: Request, call_id: str, background_tasks: BackgroundTasks):
'''
Page to view ongoinng call
'''
# initiate call [TODO]
# url = 'http://127.0.0.1:5001/start_call'
with get_db_connection() as conn:
cur = conn.cursor()
cur.execute("SELECT phone_number, context, id FROM call_log WHERE id = ?", (call_id,))
call = cur.fetchone()
call = Call(call[0], call[1], call[2])
call.call()
# data = {
# 'call_id': call_id,
# 'to': call[0],
# 'context': call[1]
# }
# background_tasks.add_task(make_http_request, url, data)
# print("added task")
# # Add the request function to the background tasks
# async with httpx.AsyncClient() as client:
# response = await client.get(url, params=data)
return TEMPLATES.TemplateResponse(
"chat.html",
{
"request": request,
"page": "call",
'call_id': call_id
}
)
@app.post("/save_message")
def save_message(request: Request):
'''
Send data to client
'''
# save to db
# send to client
response_val = "Hi how are you doing today?"
# generate respoinse
data = {'message': message, 'call_id': call_id, 'sender': sender}
send_data_to_clients(json.dumps(data))
return response_val
# SOCKETS
sockets = []
async def send_data_to_clients(data):
# Iterate over each connected websocket
for websocket in sockets:
try:
data = json.dumps(data)
await websocket.send_text(data)
except Exception:
# Handle any errors that occur while sending data
pass
@app.websocket("/websocket")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
# Append the connected websocket to the 'sockets' list
sockets.append(websocket)
try:
while True:
data = await websocket.receive_text()
# Send the received data to all connected clients
await send_data_to_clients(data)
except WebSocketDisconnect:
print("client disconnected")
sockets.remove(websocket)
@app.get("/account", response_class=HTMLResponse)
async def history(request: Request):
'''
Page to view call history
'''
return TEMPLATES.TemplateResponse(
"account.html",
{
"request": request,
}
)
# end call
@app.post("/end_call")
async def end_call(request: Request):
'''
End call
'''
body = await request.json()
call_id = body['call_id']
print('ending call' + call_id)
# add message
return {'status': 'success'}
# add message
# @app.post("/send_message")
# async def add_message(request: Request):
# '''
# End call
# '''
# body = await request.json()
# call_id = body['call_id']
# message = body['message']
# print('sending message' + message)
# # add message
# return {'status': 'success'}
| [
"\n You're imitating a human that is trying to PLACEHOLDER. \n You're on a call with customer service. \n Sound like a human and use your context to return the appropriate response. Keep responses short, simple, and informal.\n You could use filler words like 'um' and 'uh' to sound more human. To end the call, just return 'bye'. For information you are unsure about, return \"/user <question>\".\n Here is some information about you:\n "
] |
2024-01-10 | melnikoff-oleg/adam_analyst_bot | data_analyst.py | import io
import logging
import traceback
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import plotly as plotly
from langchain.agents import Tool
from langchain.chat_models import ChatOpenAI
from agent import BaseMinion
from common_prompts import TableDescriptionPrompt
from custom_python_ast import CustomPythonAstREPLTool
from settings import Settings
def preparation(
path: str,
build_plots: False,
user_data_description: str,
):
sheet_name = "Sheet1"
file_extension = pathlib.Path(path).suffix
if file_extension == ".XLSX":
df = pd.read_excel(path, sheet_name=sheet_name)
elif file_extension == ".json":
df = pd.read_json(path)
elif file_extension == ".csv":
df = pd.read_csv(path)
else:
raise Exception("Unknown file extension")
df_head = df.head()
df_info = io.StringIO()
df.info(buf=df_info)
settings = Settings()
llm = ChatOpenAI(
temperature=0.7,
model="gpt-4", # gpt-3.5-turbo
openai_api_key=settings.OPENAI_API_TOKEN,
)
python_tool = CustomPythonAstREPLTool(
locals={"df": df, "python": None, "python_repl_ast": None},
globals={"pd": pd, "np": np, "sns": sns, "plotly": plotly},
)
prompt = TableDescriptionPrompt(
user_data_description,
build_plots=build_plots,
)
ag = BaseMinion(
base_prompt=prompt.__str__(),
available_tools=[
Tool(
name=python_tool.name,
description=python_tool.description,
func=python_tool._run,
)
],
model=llm,
max_iterations=50,
)
return ag, df_head, df_info
logging.basicConfig(level=logging.INFO, filename="py_log.log", filemode="w")
def use_data_assistant(
data_path: str,
data_description: str,
question: str,
) -> str:
build_plots = False
# for plot_keyword in [
# "график",
# "нарисовать",
# "нарисуй",
# "распределение",
# "изобра",
# "chart",
# "plot",
# "graph",
# "draw",
# ]:
# if plot_keyword in question.lower():
# build_plots = True
ag, df_head, df_info = preparation(
path=data_path,
build_plots=build_plots,
user_data_description=data_description,
)
try:
return f"Answer: {ag.run(input=question, df_head=df_head, df_info=df_info.getvalue())}"
except Exception as e:
return f"Failed with error: {traceback.format_exc()}"
| [] |
2024-01-10 | ddematheu2/tweet-artist-ai | twitterArtist.py | import tweepy
import json
import openai
import urllib.request
import io
bearer_token = "YOUR BEARER TOKEN"
access_token = "YOUR ACCESS TOKEN"
access_token_secret = "YOUR ACCESS TOKEN SECRET"
consumer_key = "YOUR CONSUMER KEY"
consumer_secret = "YOUR CONSUMER SECRET"
open_ai_key = "YOUR OPEN AI KEY"
# Authenticate to Twitter
client = tweepy.Client(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token=access_token, access_token_secret=access_token_secret)
auth = tweepy.OAuth1UserHandler (consumer_key, consumer_secret, access_token, access_token_secret)
api = tweepy.API(auth)
# Authenticate to OpenAI
openai.api_key = open_ai_key
# Create stream rule for twitter
handle = "YOUR HANDLE"
bot_twitter_id = "YOUR BOTS TWITTER ACCOUNT ID"
mentionsRule = tweepy.StreamRule(handle + " is:reply has:mentions -from:" + bot_twitter_id + " -to:" + bot_twitter_id)
# Call Open AI to generate image
def illustrate_tweet(tweet_text):
print(tweet_text)
response = openai.Image.create(
prompt = tweet_text,
n = 1,
size = '512x512'
)
image_url = response['data'][0]['url']
return image_url
# Create tweet to post
def reply_to_tweet(og_tweet, reply_id):
image_url = illustrate_tweet(og_tweet.text)
media_id = upload_image(image_url)
client.create_tweet(
text="Made this original piece for you:",
media_ids= [media_id],
in_reply_to_tweet_id=reply_id,
user_auth=True
)
# Get information of the top tweet in the thread where the bot was mentioned
def get_original_tweet(id):
tweet_reply = client.get_tweet(id, tweet_fields=["conversation_id"], user_auth=True)
tweet_to_illustrate_id = tweet_reply.data.conversation_id
tweet_to_illustrate_response = client.get_tweet(tweet_to_illustrate_id,user_auth=True)
tweet_to_illustrate = tweet_to_illustrate_response.data
return (tweet_to_illustrate)
# Convert response to JSON
def process_response(data):
d = json.loads(data)
return d
# Upload image to Twitter
def upload_image(image_url):
data = urllib.request.urlopen(image_url).read()
file_like_object = io.BytesIO(data)
media = api.simple_upload("TweetArt.png",file=file_like_object)
print(media.media_id)
return media.media_id
# Override of Stream client object to modify on_data method
class MentionsPrinter (tweepy.StreamingClient):
def on_connect(self):
print("Connected to the streaming API.")
def on_data(self, data):
d = process_response(data)
tweet_id = d['data']['id']
#og_tweet has properties text and id
og_tweet = get_original_tweet(tweet_id)
reply_to_tweet(og_tweet, tweet_id)
# Create stream client and start filtering
mentionsPrinter = MentionsPrinter(bearer_token)
mentionsPrinter.filter()
| [] |
2024-01-10 | henghuisan/practice-with-open-ai | app~routes.py | import os
import uuid
from flask import Blueprint, render_template, redirect, url_for, request, jsonify
from openai.error import InvalidRequestError
from .utils import (
generate_image,
generate_essay,
generate_ai_chatbot_response,
generate_corrected_transcript_with_cloudinary_audio_file,
generate_corrected_transcript_with_local_audio_file,
)
import cloudinary.uploader
import cloudinary.api
from dotenv import load_dotenv
# Load environment variables from .flaskenv
load_dotenv()
cloudinary.config(
cloud_name=os.getenv("CLOUDINARY_CLOUD_NAME"),
api_key=os.getenv("CLOUDINARY_API_KEY"),
api_secret=os.getenv("CLOUDINARY_API_SECRET"),
)
main_bp = Blueprint("main", __name__)
image_gen_bp = Blueprint("image_gen", __name__)
essay_gen_bp = Blueprint("essay_gen", __name__)
ai_chatbot_bp = Blueprint("ai_chatbot", __name__)
speech_to_text_bp = Blueprint("speech_to_text", __name__)
@main_bp.route("/", methods=("GET", "POST"))
def index():
return redirect(url_for("ai_chatbot.ai_chatbot"))
@image_gen_bp.route("/", methods=("GET", "POST"))
def image_generator():
if request.method == "POST":
prompt = request.form["description"]
status, result = generate_image(prompt)
return redirect(
url_for(
"image_gen.image_generator",
status=status,
result=result,
prompt=prompt,
)
)
args = request.args
status, result, prompt = args.get("status"), args.get("result"), args.get("prompt")
return render_template(
"image_generator.html", status=status, result=result, prompt=prompt
)
@essay_gen_bp.route("/", methods=("GET", "POST"))
def essay_generator():
if request.method == "POST":
essay_idea = request.form.get("essayIdea", "")
# Get the value of "essayWordCount" from the form or provide a default value (e.g., 500) if not present
essay_word_count = request.form.get("essayWordCount", 500)
prompt = f"Write me an essay about {essay_idea} with {essay_word_count} words."
messages = [{"role": "user", "content": prompt}]
status, result = generate_essay(messages)
return jsonify({"status": status, "result": result, "prompt": prompt}), 200
return render_template("essay_generator.html")
@ai_chatbot_bp.route("/", methods=("GET", "POST"))
def ai_chatbot():
messages = (
request.args.get("messages")
if request.args.get("messages")
else [{"role": "system", "content": "What can I help you today?"}]
)
if request.method == "POST":
prompt = request.form["input"]
messages.append({"role": "user", "content": prompt})
status, messages = generate_ai_chatbot_response(messages)
return jsonify({"status": status, "messages": messages}), 200
return render_template("ai_chatbot.html", messages=messages)
# save audio file to Cloudinary
@speech_to_text_bp.route("/", methods=("GET", "POST"))
def speech_to_text():
if request.method == "POST":
if "audio" in request.files:
audio_file = request.files["audio"]
if audio_file:
# Generate a unique filename for the audio file
folder = "open-ai-audio"
filename = f"{str(uuid.uuid4())}.wav"
# Save the audio file to the "open-ai-audio" folder in Cloudinary
result = cloudinary.uploader.upload(
audio_file,
folder=folder,
resource_type="raw",
public_id=filename,
overwrite=True,
)
# Get the public URL of the uploaded audio file
audio_url = result["secure_url"]
(
status,
result,
) = generate_corrected_transcript_with_cloudinary_audio_file(audio_url)
# Delete the file
public_id = f"{folder}/{filename}"
cloudinary.uploader.destroy(public_id, resource_type="raw")
return jsonify({"status": status, "result": result}), 200
return jsonify({"status": "error", "message": "No audio file received."})
return render_template("speech_to_text.html")
# save audio file to Local File Sytem
# @speech_to_text_bp.route("/", methods=("GET", "POST"))
# def speech_to_text():
# if request.method == "POST":
# if "audio" in request.files:
# audio_file = request.files["audio"]
# if audio_file:
# # Generate a unique filename for the audio file
# filename = f"{str(uuid.uuid4())}.wav"
# # Save the audio file to the "static/audio" directory
# audio_file_path = os.path.join("app", "static", "audio", filename)
# audio_file.save(audio_file_path)
# # Check if the file exists before generating the transcript
# if os.path.isfile(audio_file_path):
# status, result = generate_corrected_transcript_with_local_audio_file(audio_file_path)
# # Close the file
# audio_file.close()
# # Delete the file
# os.remove(audio_file_path)
# return jsonify(status=status, result=result)
# return jsonify({"status": "error", "message": "No audio file received."})
# return render_template("speech_to_text.html")
| [
"Write me an essay about PLACEHOLDER with PLACEHOLDER words.",
"description",
"input",
"What can I help you today?"
] |
2024-01-10 | Jugendhackt/MyEventWorld | topics.py | # Ordnet allen Einträgen mithilfe von KI Topics zu
from dotenv import load_dotenv
from copy import deepcopy
from os import environ, getenv
from functools import lru_cache
import openai
import utils
load_dotenv()
openai.api_key = getenv("OPENAI_API_KEY")
def main():
file_path = "WebScrapping/Kölnopendata.json"
data = utils.read_json_to_dict(file_path)
length = len(data["items"])
for i, value in enumerate(reversed(deepcopy(data["items"]))):
# if "thema" in value:
# continue
text = value["title"] + value["description"]
topic = [word.strip() for word in get_topic(text).split(",")]
data["items"][i]["thema"] = topic
print(f"{i}/{length} - {topic} - {value['title']}")
utils.write_dict_to_json(file_path, data)
@lru_cache
def get_topic(text: str) -> str:
messages = [
{
"role": "system",
"content": """Task: Classify the following Text by one or more of the following topics. Don't make topics up on your own.
Topics:
- MINT
- Politik
- Bürgerbeteiligung
- Sprache
- Handwerk
- Informatik
- Kunst/Kultur
- Geschichte
- Logikspiele/Spiele
- Sport
- Schule"""
},
{
"role": "user",
"content": "Bilderbuchgeschichten für Kinder ab 3 Jahren in der Stadtteilbibliothek Chorweiler"
},
{
"role": "assistant",
"content": "Sprache"
},
{
"role": "user",
"content": "Workshop Digitales Zeichnen"
},
{
"role": "assistant",
"content": "Informatik,Kunst/Kultur"
},
{
"role": "user",
"content": text
}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
return response["choices"][0]["message"]["content"]
if __name__ == "__main__":
main()
| [
"Bilderbuchgeschichten für Kinder ab 3 Jahren in der Stadtteilbibliothek Chorweiler",
"PLACEHOLDERPLACEHOLDER",
"Informatik,Kunst/Kultur",
"Sprache",
"Workshop Digitales Zeichnen",
"Task: Classify the following Text by one or more of the following topics. Don't make topics up on your own. \n Topics:\n - MINT\n - Politik\n - Bürgerbeteiligung\n - Sprache\n - Handwerk\n - Informatik\n - Kunst/Kultur\n - Geschichte\n - Logikspiele/Spiele\n - Sport\n - Schule"
] |
2024-01-10 | JeremyEngram/griptape | tests~unit~drivers~prompt~test_anthropic_prompt_driver.py | from griptape.drivers import AnthropicPromptDriver
class TestAnthropicPromptDriver:
def test_init(self):
assert AnthropicPromptDriver(api_key='1234')
| [] |
2024-01-10 | JeremyEngram/griptape | griptape~drivers~prompt~azure_openai_completion_prompt_driver.py | from attr import define, field, Factory
from griptape.utils import PromptStack
from griptape.drivers import OpenAiCompletionPromptDriver
from griptape.tokenizers import TiktokenTokenizer
@define
class AzureOpenAiCompletionPromptDriver(OpenAiCompletionPromptDriver):
api_base: str = field(kw_only=True)
model: str = field(kw_only=True)
deployment_id: str = field(kw_only=True)
api_type: str = field(default="azure", kw_only=True)
api_version: str = field(default="2023-05-15", kw_only=True)
tokenizer: TiktokenTokenizer = field(
default=Factory(lambda self: TiktokenTokenizer(model=self.model), takes_self=True),
kw_only=True
)
def _base_params(self, prompt_stack: PromptStack) -> dict:
return super()._base_params(prompt_stack) | {
"deployment_id": self.deployment_id
}
| [] |
2024-01-10 | JeremyEngram/griptape | griptape~drivers~embedding~azure_openai_embedding_driver.py | from typing import Union
from attr import define, field, Factory
from griptape.drivers import OpenAiEmbeddingDriver
from griptape.tokenizers import TiktokenTokenizer
@define
class AzureOpenAiEmbeddingDriver(OpenAiEmbeddingDriver):
"""
Attributes:
model: OpenAI embedding model name. Uses `text-embedding-ada-002` by default.
deployment_id: Azure OpenAi deployment ID.
api_base: API URL.
api_type: Can be changed to use OpenAI models on Azure.
api_version: API version.
tokenizer: Custom `TiktokenTokenizer`.
"""
model: str = field(kw_only=True)
deployment_id: str = field(kw_only=True)
api_base: str = field(kw_only=True)
api_type: str = field(default="azure", kw_only=True)
api_version: str = field(default="2023-05-15", kw_only=True)
tokenizer: TiktokenTokenizer = field(
default=Factory(lambda self: TiktokenTokenizer(model=self.model), takes_self=True),
kw_only=True
)
def _params(self, chunk: Union[list[int], str]) -> dict:
return super()._params(chunk) | {
"deployment_id": self.deployment_id
}
| [] |
2024-01-10 | Harvard-University-iCommons/chainlit | cypress~e2e~openai~main_async.py | import os
import openai
import chainlit as cl
openai.api_key = os.environ.get("OPENAI_API_KEY")
prompt = """SQL tables (and columns):
* Customers(customer_id, signup_date)
* Streaming(customer_id, video_id, watch_date, watch_minutes)
A well-written SQL query that {input}:
```"""
model_name = "text-davinci-003"
settings = {
"temperature": 0,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["```"],
}
@cl.on_message
async def main(message: str):
fromatted_prompt = prompt.format(input=message)
response = await openai.Completion.acreate(
model=model_name, prompt=fromatted_prompt, **settings
)
content = response["choices"][0]["text"]
await cl.Message(
language="sql",
content=content,
prompt=fromatted_prompt,
llm_settings=cl.LLMSettings(model_name=model_name, **settings),
).send()
| [
"SQL tables (and columns):\n* Customers(customer_id, signup_date)\n* Streaming(customer_id, video_id, watch_date, watch_minutes)\n\nA well-written SQL query that {input}:\n```",
"SQL tables (and columns):\n* Customers(customer_id, signup_date)\n* Streaming(customer_id, video_id, watch_date, watch_minutes)\n\nA well-written SQL query that PLACEHOLDER:\n```"
] |
2024-01-10 | Harvard-University-iCommons/chainlit | src~chainlit~langchain~callbacks.py | from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult
from chainlit.config import config
from chainlit.context import context
from chainlit.message import ErrorMessage, Message
from chainlit.sync import run_sync
from chainlit.types import LLMSettings
IGNORE_LIST = ["AgentExecutor"]
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
def get_llm_settings(invocation_params: Union[Dict, None]):
if invocation_params is None:
return None
elif invocation_params["_type"] == "openai":
return LLMSettings(
model_name=invocation_params["model_name"],
stop=invocation_params["stop"],
temperature=invocation_params["temperature"],
max_tokens=invocation_params["max_tokens"],
top_p=invocation_params["top_p"],
frequency_penalty=invocation_params["frequency_penalty"],
presence_penalty=invocation_params["presence_penalty"],
)
elif invocation_params["_type"] == "openai-chat":
return LLMSettings(
model_name=invocation_params["model_name"],
stop=invocation_params["stop"],
)
else:
return None
class BaseLangchainCallbackHandler(BaseCallbackHandler):
# Keep track of the formatted prompts to display them in the prompt playground.
prompts: List[str]
# Keep track of the LLM settings for the last prompt
llm_settings: Optional[LLMSettings]
# Keep track of the call sequence, like [AgentExecutor, LLMMathChain, Calculator, ...]
sequence: List[Message]
# Keep track of the last prompt for each session
last_prompt: Union[str, None]
# Keep track of the currently streamed message for the session
stream: Union[Message, None]
# The stream we can use to stream the final answer from a chain
final_stream: Union[Message, None]
# Message at the root of the chat we should attach child messages to
root_message: Message
# Should we stream the final answer?
stream_final_answer: bool = False
# Token sequence that prefixes the answer
answer_prefix_tokens: List[str]
# Ignore white spaces and new lines when comparing answer_prefix_tokens to last tokens? (to determine if answer has been reached)
strip_tokens: bool
# Should answer prefix itself also be streamed?
stream_prefix: bool
raise_error = True
# We want to handler to be called on every message
always_verbose: bool = True
def __init__(
self,
*,
answer_prefix_tokens: Optional[List[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
stream_final_answer: bool = False,
root_message: Optional[Message] = None,
) -> None:
self.prompts = []
self.llm_settings = None
self.sequence = []
self.last_prompt = None
self.stream = None
if root_message:
self.root_message = root_message
elif root_message := context.session.root_message:
self.root_message = root_message
else:
self.root_message = Message(author=config.ui.name, content="")
run_sync(self.root_message.send())
# Langchain final answer streaming logic
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
# Our own final answer streaming logic
self.stream_final_answer = stream_final_answer
self.final_stream = None
self.has_streamed_final_answer = False
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def _compare_last_tokens(self, last_tokens: List[str]):
if last_tokens == self.answer_prefix_tokens_stripped:
# If tokens match perfectly we are done
return True
else:
# Some LLMs will consider all the tokens of the final answer as one token
# so we check if any last token contains all answer tokens
return any(
[
all(
answer_token in last_token
for answer_token in self.answer_prefix_tokens_stripped
)
for last_token in last_tokens
]
)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self._compare_last_tokens(self.last_tokens_stripped)
else:
return self._compare_last_tokens(self.last_tokens)
def start_stream(self):
author = self.get_author()
if author in IGNORE_LIST:
return
self.pop_prompt()
prompt = self.consume_last_prompt()
parent_id = self.get_last_message().parent_id
self.stream = self.create_message(
prompt=prompt, author=author, parent_id=parent_id
)
def end_stream(self):
self.stream = None
def add_in_sequence(self, message: Message):
self.sequence.append(message)
def pop_sequence(self):
if self.sequence:
return self.sequence.pop()
def add_prompt(self, prompt: str, llm_settings: Optional[LLMSettings] = None):
self.prompts.append(prompt)
self.llm_settings = llm_settings
def pop_prompt(self):
if self.prompts:
self.last_prompt = self.prompts.pop()
def consume_last_prompt(self):
last_prompt = self.last_prompt
self.last_prompt = None
return last_prompt
def get_author(self):
if self.sequence:
return self.sequence[-1].author
return config.ui.name
def get_last_message(self):
for message in reversed(self.sequence):
if message.author not in IGNORE_LIST:
return message
return self.root_message
def create_error(self, error: Exception):
if isinstance(error, InterruptedError):
return None
return ErrorMessage(str(error), author=self.get_author())
def create_message(
self,
content: str = "",
prompt: Optional[str] = None,
author: Optional[str] = None,
parent_id: Optional[str] = None,
):
if parent_id is None:
last_message = self.get_last_message()
parent_id = last_message.id
return Message(
content,
author=author or self.get_author(),
prompt=prompt,
parent_id=parent_id,
llm_settings=self.llm_settings,
)
class LangchainCallbackHandler(BaseLangchainCallbackHandler, BaseCallbackHandler):
def on_error(self, error, **_):
if error := self.create_error(error):
run_sync(error.send())
self.pop_sequence()
on_tool_error = on_error
on_llm_error = on_error
on_chain_error = on_error
def send_token(self, token: str, final: bool = False):
stream = self.final_stream if final else self.stream
if stream:
run_sync(stream.stream_token(token))
self.has_streamed_final_answer = final
def add_message(self, message: Message):
if message.author in IGNORE_LIST:
return
if self.stream:
run_sync(self.stream.send())
self.end_stream()
else:
run_sync(message.send())
# Callbacks for various events
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
invocation_params = kwargs.get("invocation_params")
llm_settings = get_llm_settings(invocation_params)
self.add_prompt(prompts[0], llm_settings)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
invocation_params = kwargs.get("invocation_params")
llm_settings = get_llm_settings(invocation_params)
prompt = "\n".join([m.content for m in messages[0]])
self.add_prompt(prompt, llm_settings)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if not self.stream:
self.start_stream()
self.send_token(token)
if not self.stream_final_answer:
return
self.append_to_last_tokens(token)
if self.answer_reached:
if not self.final_stream:
self.final_stream = Message(author=config.ui.name, content="")
self.send_token(token, final=True)
else:
self.answer_reached = self.check_if_answer_reached()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.pop_prompt()
if response.llm_output is not None:
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "total_tokens" in token_usage:
run_sync(
context.emitter.update_token_count(token_usage["total_tokens"])
)
if self.final_stream:
run_sync(self.final_stream.send())
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
message = self.create_message(author=serialized["id"][-1])
self.add_in_sequence(message)
self.add_message(message)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
output_key = list(outputs.keys())[0]
if output_key:
prompt = self.consume_last_prompt()
parent_id = self.get_last_message().parent_id
message = self.create_message(
outputs[output_key], prompt, parent_id=parent_id
)
self.add_message(message)
self.pop_sequence()
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
message = self.create_message(author=serialized["name"])
self.add_in_sequence(message)
self.add_message(message)
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
prompt = self.consume_last_prompt()
parent_id = self.get_last_message().parent_id
message = self.create_message(output, prompt, parent_id=parent_id)
self.add_message(message)
self.pop_sequence()
def on_text(self, text: str, **kwargs: Any) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
pass
class AsyncLangchainCallbackHandler(BaseLangchainCallbackHandler, AsyncCallbackHandler):
async def on_error(self, error, **_):
if error := self.create_error(error):
await error.send()
self.pop_sequence()
on_tool_error = on_error
on_llm_error = on_error
on_chain_error = on_error
async def send_token(self, token: str, final: bool = False):
stream = self.final_stream if final else self.stream
if stream:
await stream.stream_token(token)
self.has_streamed_final_answer = final
async def add_message(self, message: Message):
if message.author in IGNORE_LIST:
return
if self.stream:
await self.stream.send()
self.end_stream()
else:
await message.send()
# Callbacks for various events
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
invocation_params = kwargs.get("invocation_params")
llm_settings = get_llm_settings(invocation_params)
self.add_prompt(prompts[0], llm_settings)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> None:
invocation_params = kwargs.get("invocation_params")
llm_settings = get_llm_settings(invocation_params)
prompt = "\n".join([m.content for m in messages[0]])
self.add_prompt(prompt, llm_settings)
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if not self.stream:
self.start_stream()
await self.send_token(token)
if not self.stream_final_answer:
return
self.append_to_last_tokens(token)
if self.answer_reached:
if not self.final_stream:
self.final_stream = Message(author=config.ui.name, content="")
await self.send_token(token, final=True)
else:
self.answer_reached = self.check_if_answer_reached()
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.pop_prompt()
if response.llm_output is not None:
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "total_tokens" in token_usage:
await context.emitter.update_token_count(
token_usage["total_tokens"]
)
if self.final_stream:
await self.final_stream.send()
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
message = self.create_message(author=serialized["id"][-1])
self.add_in_sequence(message)
await self.add_message(message)
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
output_key = list(outputs.keys())[0]
if output_key:
prompt = self.consume_last_prompt()
parent_id = self.get_last_message().parent_id
message = self.create_message(
outputs[output_key], prompt, parent_id=parent_id
)
await self.add_message(message)
self.pop_sequence()
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
message = self.create_message(author=serialized["name"])
self.add_in_sequence(message)
await self.add_message(message)
async def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
prompt = self.consume_last_prompt()
parent_id = self.get_last_message().parent_id
message = self.create_message(output, prompt, parent_id=parent_id)
await self.add_message(message)
self.pop_sequence()
async def on_text(self, text: str, **kwargs: Any) -> None:
pass
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
pass
| [
"\n"
] |
2024-01-10 | Harvard-University-iCommons/chainlit | cypress~e2e~openai~main_sync.py | import os
import openai
import chainlit as cl
from chainlit.sync import make_async
openai.api_key = os.environ.get("OPENAI_API_KEY")
prompt = """SQL tables (and columns):
* Customers(customer_id, signup_date)
* Streaming(customer_id, video_id, watch_date, watch_minutes)
A well-written SQL query that {input}:
```"""
model_name = "text-davinci-003"
settings = {
"temperature": 0,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": ["```"],
}
@cl.on_message
async def main(message: str):
fromatted_prompt = prompt.format(input=message)
response = await make_async(openai.Completion.create)(
model=model_name, prompt=fromatted_prompt, **settings
)
content = response["choices"][0]["text"]
await cl.Message(
language="sql",
content=content,
prompt=fromatted_prompt,
llm_settings=cl.LLMSettings(model_name=model_name, **settings),
).send()
| [
"SQL tables (and columns):\n* Customers(customer_id, signup_date)\n* Streaming(customer_id, video_id, watch_date, watch_minutes)\n\nA well-written SQL query that {input}:\n```",
"SQL tables (and columns):\n* Customers(customer_id, signup_date)\n* Streaming(customer_id, video_id, watch_date, watch_minutes)\n\nA well-written SQL query that PLACEHOLDER:\n```"
] |
2024-01-10 | RKP64/danswer | backend~danswer~chat~chat_llm.py | import re
from collections.abc import Callable
from collections.abc import Iterator
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from sqlalchemy.orm import Session
from danswer.chat.chat_prompts import build_combined_query
from danswer.chat.chat_prompts import DANSWER_TOOL_NAME
from danswer.chat.chat_prompts import form_require_search_text
from danswer.chat.chat_prompts import form_tool_followup_text
from danswer.chat.chat_prompts import form_tool_less_followup_text
from danswer.chat.chat_prompts import form_tool_section_text
from danswer.chat.chat_prompts import form_user_prompt_text
from danswer.chat.chat_prompts import format_danswer_chunks_for_chat
from danswer.chat.chat_prompts import REQUIRE_DANSWER_SYSTEM_MSG
from danswer.chat.chat_prompts import YES_SEARCH
from danswer.chat.personas import build_system_text_from_persona
from danswer.chat.tools import call_tool
from danswer.chunking.models import InferenceChunk
from danswer.configs.app_configs import NUM_DOCUMENT_TOKENS_FED_TO_CHAT
from danswer.configs.chat_configs import FORCE_TOOL_PROMPT
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.configs.model_configs import GEN_AI_MAX_INPUT_TOKENS
from danswer.datastores.document_index import get_default_document_index
from danswer.db.models import ChatMessage
from danswer.db.models import Persona
from danswer.db.models import User
from danswer.direct_qa.interfaces import DanswerAnswerPiece
from danswer.direct_qa.interfaces import DanswerChatModelOut
from danswer.direct_qa.qa_utils import get_usable_chunks
from danswer.llm.build import get_default_llm
from danswer.llm.llm import LLM
from danswer.llm.utils import get_default_llm_tokenizer
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.search.access_filters import build_access_filters_for_user
from danswer.search.semantic_search import chunks_to_search_docs
from danswer.search.semantic_search import retrieve_ranked_documents
from danswer.server.models import IndexFilters
from danswer.server.models import RetrievalDocs
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import extract_embedded_json
from danswer.utils.text_processing import has_unescaped_quote
logger = setup_logger()
LLM_CHAT_FAILURE_MSG = "The large-language-model failed to generate a valid response."
def _parse_embedded_json_streamed_response(
tokens: Iterator[str],
) -> Iterator[DanswerAnswerPiece | DanswerChatModelOut]:
final_answer = False
just_start_stream = False
model_output = ""
hold = ""
finding_end = 0
for token in tokens:
model_output += token
hold += token
if (
final_answer is False
and '"action":"finalanswer",' in model_output.lower().replace(" ", "")
):
final_answer = True
if final_answer and '"actioninput":"' in model_output.lower().replace(
" ", ""
).replace("_", ""):
if not just_start_stream:
just_start_stream = True
hold = ""
if has_unescaped_quote(hold):
finding_end += 1
hold = hold[: hold.find('"')]
if finding_end <= 1:
if finding_end == 1:
finding_end += 1
yield DanswerAnswerPiece(answer_piece=hold)
hold = ""
model_final = extract_embedded_json(model_output)
if "action" not in model_final or "action_input" not in model_final:
raise ValueError("Model did not provide all required action values")
yield DanswerChatModelOut(
model_raw=model_output,
action=model_final["action"],
action_input=model_final["action_input"],
)
return
def _find_last_index(
lst: list[int], max_prompt_tokens: int = GEN_AI_MAX_INPUT_TOKENS
) -> int:
"""From the back, find the index of the last element to include
before the list exceeds the maximum"""
running_sum = 0
last_ind = 0
for i in range(len(lst) - 1, -1, -1):
running_sum += lst[i]
if running_sum > max_prompt_tokens:
last_ind = i + 1
break
if last_ind >= len(lst):
raise ValueError("Last message alone is too large!")
return last_ind
def danswer_chat_retrieval(
query_message: ChatMessage,
history: list[ChatMessage],
llm: LLM,
filters: IndexFilters,
) -> list[InferenceChunk]:
if history:
query_combination_msgs = build_combined_query(query_message, history)
reworded_query = llm.invoke(query_combination_msgs)
else:
reworded_query = query_message.message
# Good Debug/Breakpoint
ranked_chunks, unranked_chunks = retrieve_ranked_documents(
query=reworded_query,
filters=filters,
favor_recent=False,
datastore=get_default_document_index(),
)
if not ranked_chunks:
return []
if unranked_chunks:
ranked_chunks.extend(unranked_chunks)
filtered_ranked_chunks = [
chunk for chunk in ranked_chunks if not chunk.metadata.get(IGNORE_FOR_QA)
]
# get all chunks that fit into the token limit
usable_chunks = get_usable_chunks(
chunks=filtered_ranked_chunks,
token_limit=NUM_DOCUMENT_TOKENS_FED_TO_CHAT,
)
return usable_chunks
def _drop_messages_history_overflow(
system_msg: BaseMessage | None,
system_token_count: int,
history_msgs: list[BaseMessage],
history_token_counts: list[int],
final_msg: BaseMessage,
final_msg_token_count: int,
) -> list[BaseMessage]:
"""As message history grows, messages need to be dropped starting from the furthest in the past.
The System message should be kept if at all possible and the latest user input which is inserted in the
prompt template must be included"""
if len(history_msgs) != len(history_token_counts):
# This should never happen
raise ValueError("Need exactly 1 token count per message for tracking overflow")
prompt: list[BaseMessage] = []
# Start dropping from the history if necessary
all_tokens = history_token_counts + [system_token_count, final_msg_token_count]
ind_prev_msg_start = _find_last_index(all_tokens)
if system_msg and ind_prev_msg_start <= len(history_msgs):
prompt.append(system_msg)
prompt.extend(history_msgs[ind_prev_msg_start:])
prompt.append(final_msg)
return prompt
def llm_contextless_chat_answer(
messages: list[ChatMessage],
system_text: str | None = None,
tokenizer: Callable | None = None,
) -> Iterator[str]:
try:
prompt_msgs = [translate_danswer_msg_to_langchain(msg) for msg in messages]
if system_text:
tokenizer = tokenizer or get_default_llm_tokenizer()
system_tokens = len(tokenizer(system_text))
system_msg = SystemMessage(content=system_text)
message_tokens = [msg.token_count for msg in messages] + [system_tokens]
else:
message_tokens = [msg.token_count for msg in messages]
last_msg_ind = _find_last_index(message_tokens)
remaining_user_msgs = prompt_msgs[last_msg_ind:]
if not remaining_user_msgs:
raise ValueError("Last user message is too long!")
if system_text:
all_msgs = [system_msg] + remaining_user_msgs
else:
all_msgs = remaining_user_msgs
return get_default_llm().stream(all_msgs)
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
return (msg for msg in [LLM_CHAT_FAILURE_MSG]) # needs to be an Iterator
def extract_citations_from_stream(
tokens: Iterator[str], links: list[str | None]
) -> Iterator[str]:
if not links:
yield from tokens
return
max_citation_num = len(links) + 1 # LLM is prompted to 1 index these
curr_segment = ""
prepend_bracket = False
for token in tokens:
# Special case of [1][ where ][ is a single token
if prepend_bracket:
curr_segment += "[" + curr_segment
prepend_bracket = False
curr_segment += token
possible_citation_pattern = r"(\[\d*$)" # [1, [, etc
possible_citation_found = re.search(possible_citation_pattern, curr_segment)
citation_pattern = r"\[(\d+)\]" # [1], [2] etc
citation_found = re.search(citation_pattern, curr_segment)
if citation_found:
numerical_value = int(citation_found.group(1))
if 1 <= numerical_value <= max_citation_num:
link = links[numerical_value - 1]
if link:
curr_segment = re.sub(r"\[", "[[", curr_segment, count=1)
curr_segment = re.sub("]", f"]]({link})", curr_segment, count=1)
# In case there's another open bracket like [1][, don't want to match this
possible_citation_found = None
# if we see "[", but haven't seen the right side, hold back - this may be a
# citation that needs to be replaced with a link
if possible_citation_found:
continue
# Special case with back to back citations [1][2]
if curr_segment and curr_segment[-1] == "[":
curr_segment = curr_segment[:-1]
prepend_bracket = True
yield curr_segment
curr_segment = ""
if curr_segment:
if prepend_bracket:
yield "[" + curr_segment
else:
yield curr_segment
def llm_contextual_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user: User | None,
tokenizer: Callable,
db_session: Session,
run_search_system_text: str = REQUIRE_DANSWER_SYSTEM_MSG,
) -> Iterator[str | list[InferenceChunk]]:
last_message = messages[-1]
final_query_text = last_message.message
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
try:
llm = get_default_llm()
if not final_query_text:
raise ValueError("User chat message is empty.")
# Determine if a search is necessary to answer the user query
user_req_search_text = form_require_search_text(last_message)
last_user_msg = HumanMessage(content=user_req_search_text)
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
danswer_system_tokens = len(tokenizer(run_search_system_text))
last_user_msg_tokens = len(tokenizer(user_req_search_text))
need_search_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=run_search_system_text),
system_token_count=danswer_system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
# Good Debug/Breakpoint
model_out = llm.invoke(need_search_prompt)
# Model will output "Yes Search" if search is useful
# Be a little forgiving though, if we match yes, it's good enough
retrieved_chunks: list[InferenceChunk] = []
if (YES_SEARCH.split()[0] + " ").lower() in model_out.lower():
user_acl_filters = build_access_filters_for_user(user, db_session)
doc_set_filter = [doc_set.name for doc_set in persona.document_sets] or None
final_filters = IndexFilters(
source_type=None,
document_set=doc_set_filter,
time_cutoff=None,
access_control_list=user_acl_filters,
)
retrieved_chunks = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
filters=final_filters,
)
yield retrieved_chunks
tool_result_str = format_danswer_chunks_for_chat(retrieved_chunks)
last_user_msg_text = form_tool_less_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=persona.hint_text,
)
last_user_msg_tokens = len(tokenizer(last_user_msg_text))
last_user_msg = HumanMessage(content=last_user_msg_text)
else:
last_user_msg_tokens = len(tokenizer(final_query_text))
last_user_msg = HumanMessage(content=final_query_text)
system_text = build_system_text_from_persona(persona)
system_msg = SystemMessage(content=system_text) if system_text else None
system_tokens = len(tokenizer(system_text)) if system_text else 0
prompt = _drop_messages_history_overflow(
system_msg=system_msg,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
links = [
chunk.source_links[0] if chunk.source_links else None
for chunk in retrieved_chunks
]
yield from extract_citations_from_stream(tokens, links)
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
yield LLM_CHAT_FAILURE_MSG # needs to be an Iterator
def llm_tools_enabled_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user: User | None,
tokenizer: Callable,
db_session: Session,
) -> Iterator[str | list[InferenceChunk]]:
retrieval_enabled = persona.retrieval_enabled
system_text = build_system_text_from_persona(persona)
hint_text = persona.hint_text
tool_text = form_tool_section_text(persona.tools, persona.retrieval_enabled)
last_message = messages[-1]
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
# Failure reasons include:
# - Invalid LLM output, wrong format or wrong/missing keys
# - No "Final Answer" from model after tool calling
# - LLM times out or is otherwise unavailable
# - Calling invalid tool or tool call fails
# - Last message has more tokens than model is set to accept
# - Missing user input
try:
if not last_message.message:
raise ValueError("User chat message is empty.")
# Build the prompt using the last user message
user_text = form_user_prompt_text(
query=last_message.message,
tool_text=tool_text,
hint_text=hint_text,
)
last_user_msg = HumanMessage(content=user_text)
# Count tokens once to reuse
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
system_tokens = len(tokenizer(system_text)) if system_text else 0
last_user_msg_tokens = len(tokenizer(user_text))
prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
llm = get_default_llm()
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
final_result: DanswerChatModelOut | None = None
final_answer_streamed = False
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if isinstance(result, DanswerChatModelOut):
final_result = result
break
if final_answer_streamed:
return
if final_result is None:
raise RuntimeError("Model output finished without final output parsing.")
if (
retrieval_enabled
and final_result.action.lower() == DANSWER_TOOL_NAME.lower()
):
user_acl_filters = build_access_filters_for_user(user, db_session)
doc_set_filter = [doc_set.name for doc_set in persona.document_sets] or None
final_filters = IndexFilters(
source_type=None,
document_set=doc_set_filter,
time_cutoff=None,
access_control_list=user_acl_filters,
)
retrieved_chunks = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
filters=final_filters,
)
yield retrieved_chunks
tool_result_str = format_danswer_chunks_for_chat(retrieved_chunks)
else:
tool_result_str = call_tool(final_result)
# The AI's tool calling message
tool_call_msg_text = final_result.model_raw
tool_call_msg_token_count = len(tokenizer(tool_call_msg_text))
# Create the new message to use the results of the tool call
tool_followup_text = form_tool_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=hint_text,
)
tool_followup_msg = HumanMessage(content=tool_followup_text)
tool_followup_tokens = len(tokenizer(tool_followup_text))
# Drop previous messages, the drop order goes: previous messages in the history,
# the last user prompt and generated intermediate messages from this recent prompt,
# the system message, then finally the tool message that was the last thing generated
follow_up_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage
+ [last_user_msg, AIMessage(content=tool_call_msg_text)],
history_token_counts=previous_msg_token_counts
+ [last_user_msg_tokens, tool_call_msg_token_count],
final_msg=tool_followup_msg,
final_msg_token_count=tool_followup_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(follow_up_prompt)
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if final_answer_streamed is False:
raise RuntimeError("LLM did not to produce a Final Answer after tool call")
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
yield LLM_CHAT_FAILURE_MSG
def wrap_chat_package_in_model(
package: str | list[InferenceChunk],
) -> DanswerAnswerPiece | RetrievalDocs:
if isinstance(package, str):
return DanswerAnswerPiece(answer_piece=package)
elif isinstance(package, list):
return RetrievalDocs(top_documents=chunks_to_search_docs(package))
def llm_chat_answer(
messages: list[ChatMessage],
persona: Persona | None,
tokenizer: Callable,
user: User | None,
db_session: Session,
) -> Iterator[DanswerAnswerPiece | RetrievalDocs]:
# Common error cases to keep in mind:
# - User asks question about something long ago, due to context limit, the message is dropped
# - Tool use gives wrong/irrelevant results, model gets confused by the noise
# - Model is too weak of an LLM, fails to follow instructions
# - Bad persona design leads to confusing instructions to the model
# - Bad configurations, too small token limit, mismatched tokenizer to LLM, etc.
# No setting/persona available therefore no retrieval and no additional tools
if persona is None:
for token in llm_contextless_chat_answer(messages):
yield DanswerAnswerPiece(answer_piece=token)
# Persona is configured but with retrieval off and no tools
# therefore cannot retrieve any context so contextless
elif persona.retrieval_enabled is False and not persona.tools:
for token in llm_contextless_chat_answer(
messages, system_text=persona.system_text, tokenizer=tokenizer
):
yield DanswerAnswerPiece(answer_piece=token)
# No additional tools outside of Danswer retrieval, can use a more basic prompt
# Doesn't require tool calling output format (all LLM outputs are therefore valid)
elif persona.retrieval_enabled and not persona.tools and not FORCE_TOOL_PROMPT:
for package in llm_contextual_chat_answer(
messages=messages,
persona=persona,
tokenizer=tokenizer,
user=user,
db_session=db_session,
):
yield wrap_chat_package_in_model(package)
# Use most flexible/complex prompt format
else:
for package in llm_tools_enabled_chat_answer(
messages=messages,
persona=persona,
tokenizer=tokenizer,
user=user,
db_session=db_session,
):
yield wrap_chat_package_in_model(package)
| [] |
2024-01-10 | aditya2211/transformer-entity-tracking | bert-entity-tracking~gpt2_samples.py | #!/usr/bin/env python3
import argparse
import logging
from tqdm import trange
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
import numpy as np
import json, os
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert.optimization_openai import OpenAIAdam
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def pre_process_datasets(encoded_datasets, input_len, cap_length):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch,), dtype=np.int64)
lm_labels = np.full((n_batch, input_len), fill_value=-1, dtype=np.int64)
#mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story), in enumerate(dataset):
with_cont1 = story[:cap_length]
#with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
#print(with_cont1)
#print(input_ids[i, :len(with_cont1)] )
input_ids[i, :len(with_cont1)] = with_cont1
#input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i] = len(with_cont1) - 1
#mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, :len(with_cont1)-1] = with_cont1[1:]
#lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
#mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets
def load_recipes_dataset(dataset_path='./val_recipes.json'):
train_file = json.load(open(dataset_path,'r'))
output = []
for ins in train_file:
output.append(ins['story'])
return output
def top_k_logits(logits, k):
if k == 0:
return logits
values, _ = torch.topk(logits, k)
min_values = values[:, -1]
return torch.where(logits < min_values, torch.ones_like(logits, dtype=logits.dtype) * -1e10, logits)
def sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)
prev = context
output = context
past = None
with torch.no_grad():
for i in trange(length):
logits, past = model(prev, past=past)
logits = logits[:, -1, :] / temperature
logits = top_k_logits(logits, k=top_k)
log_probs = F.softmax(logits, dim=-1)
if sample:
prev = torch.multinomial(log_probs, num_samples=1)
else:
_, prev = torch.topk(log_probs, k=1, dim=-1)
output = torch.cat((output, prev), dim=1)
return output
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return enc.end(obj)
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o) for o in obj)
def run_model():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--nsamples", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=-1)
parser.add_argument("--length", type=int, default=-1)
parser.add_argument("--temperature", type=int, default=1)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--train_dataset', type=str, default='./train_recipes.json')
parser.add_argument('--eval_dataset', type=str, default='./val_recipes.json')
#parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_train_epochs', type=int, default=3)
parser.add_argument('--train_batch_size', type=int, default=2)
parser.add_argument('--eval_batch_size', type=int, default=2)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=6.25e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.002)
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--lm_coef', type=float, default=0.9)
parser.add_argument('--n_valid', type=int, default=374)
args = parser.parse_args()
print(args)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.batch_size == -1:
args.batch_size = 1
assert args.nsamples % args.batch_size == 0
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
model.to(device)
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return enc.encode(obj)
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o) for o in obj)
val_dataset = load_recipes_dataset(args.eval_dataset)
train_dataset = load_recipes_dataset(args.train_dataset)
datasets = (train_dataset[:50000],)
encoded_datasets = tokenize_and_encode(datasets)
max_length = model.config.n_positions
print(max_length)
print(encoded_datasets[0][0])
input_length = max(len(story[:max_length]) + 2 for dataset in encoded_datasets for story in dataset)
input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model
print(input_length)
tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length)
train_tensor_dataset = tensor_datasets[0]
#eval_tensor_dataset = tensor_datasets[1]
train_data = TensorDataset(*train_tensor_dataset)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
optimizer = OpenAIAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay,
t_total=num_train_optimization_steps)
#encoded_datasets = tokenize_and_encode(datasets)
if args.do_train:
model.train()
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Training")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
loss.backward()
optimizer.step()
tr_loss += loss.item()
exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
nb_tr_steps += 1
tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
model.eval()
val_data_half = []
for text in val_dataset:
a = text.split()
a_half = a[:int(len(a)//2)]
val_data_half.append(" ".join(a_half))
if args.length == -1:
args.length = model.config.n_ctx // 2
elif args.length > model.config.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)
output_eval_file = os.path.join(args.output_dir, "val_samples.txt")
writer = open(output_eval_file, "w")
generated = 0
for rec_text in val_data_half:
context_tokens = enc.encode(rec_text)
for _ in range(args.nsamples // args.batch_size):
out = sample_sequence(
model=model, length=args.length,
context=context_tokens if not args.unconditional else None,
start_token=enc.encoder['<|endoftext|>'] if args.unconditional else None,
batch_size=args.batch_size,
temperature=args.temperature, top_k=args.top_k, device=device
)
out = out[:, len(context_tokens):].tolist()
for i in range(args.batch_size):
generated += 1
text = enc.decode(out[i])
writer.write("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
writer.write(rec_text + '\n')
writer.write(text + '\n')
writer.write("=" * 80 + '\n')
writer.close()
if __name__ == '__main__':
run_model() | [] |
2024-01-10 | aditya2211/transformer-entity-tracking | bert-entity-tracking~gpt_predictions.py | import torch, json
import numpy as np
from pytorch_pretrained_bert.modeling_openai import OpenAIGPTLMHeadModel, OpenAIGPTConfig
from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer
from pytorch_pretrained_bert.optimization_openai import OpenAIAdam
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
recipes_data = json.load(open('/scratch/cluster/agupta/recipes_elmo.json','r'))
train_data = []
val_data = []
test_data = []
for data in recipes_data:
recipes_data[data]['para'] = []
recipes_data[data]['targets'] = np.zeros((len(recipes_data[data]['text']),len(recipes_data[data]['ingredient_list'])))
for step_num in range(len(recipes_data[data]['text'])):
recipes_data[data]['para']+=recipes_data[data]['text'][str(step_num)]
for step_num in recipes_data[data]['ingredients']:
for ing in recipes_data[data]['ingredients'][step_num]:
recipes_data[data]['targets'][int(step_num)][ing] = 1
for data in recipes_data:
if len(recipes_data[data]['ingredient_list'])!=0 and len(recipes_data[data]['para'])!=0:
if recipes_data[data]['split'] == 'train':
train_data.append(recipes_data[data])
elif recipes_data[data]['split'] == 'dev':
val_data.append(recipes_data[data])
else:
test_data.append(recipes_data[data])
test_set_ing = set()
for ins in test_data:
para_tokens = tokenizer.tokenize(" ".join(ins['para']))
test_set_ing |= set(para_tokens)
for ing in ins['ingredient_list']:
test_set_ing |= set(tokenizer.tokenize(" ".join(ing.split('_'))))
count = set()
total = set()
for ins in val_data:
for ing in ins['ingredient_list']:
ing_tokens = tokenizer.tokenize(" ".join(ing.split('_')))
flag = 0
for token in ing_tokens:
if token not in test_set_ing:
print('%s-%s'%(ing, token))
count.add(ing)
total.add(ing)
print(len(total))
print(len(count)) | [] |
2024-01-10 | aditya2211/transformer-entity-tracking | bert-entity-tracking~gpt_ppl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py
This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
python run_openai_gpt.py \
--model_name openai-gpt \
--do_train \
--do_eval \
--train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
--eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
--output_dir ../log \
--train_batch_size 16 \
"""
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert import OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, OpenAIAdam, cached_path
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
return output
def load_recipes_dataset(dataset_path='./train_recipes.json'):
train_file = json.load(open(dataset_path,'r'))
output = []
for ins in train_file:
output.append((ins['story'],))
return output
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch,), dtype=np.int64)
lm_labels = np.full((n_batch, input_len), fill_value=-1, dtype=np.int64)
#mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story), in enumerate(dataset):
with_cont1 = [start_token] + story[:cap_length] + [delimiter_token]
#with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
input_ids[i, :len(with_cont1)] = with_cont1
#input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i] = len(with_cont1) - 1
#mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, :len(with_cont1)-1] = with_cont1[1:]
#lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
#mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='openai-gpt',
help='pretrained model name')
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--train_dataset', type=str, default='')
parser.add_argument('--eval_dataset', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_train_epochs', type=int, default=3)
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=6.25e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.002)
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--lm_coef', type=float, default=0.9)
parser.add_argument('--n_valid', type=int, default=374)
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
print(args)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
special_tokens = ['_start_', '_delimiter_', '_classify_']
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
model = OpenAIGPTLMHeadModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
model.to(device)
# Load and encode the datasets
'''
if not args.train_dataset and not args.eval_dataset:
roc_stories = cached_path(ROCSTORIES_URL)
'''
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o) for o in obj)
logger.info("Encoding dataset...")
train_dataset = load_recipes_dataset(args.train_dataset)
#eval_dataset = load_rocstories_dataset(args.eval_dataset)
datasets = (train_dataset,)
encoded_datasets = tokenize_and_encode(datasets)
# Compute the mex input length for the Transformer
max_length = model.config.n_positions - 2
input_length = max(len(story[:max_length]) + 2 for dataset in encoded_datasets for story in dataset)
input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
train_tensor_dataset = tensor_datasets[0]
train_data = TensorDataset(*train_tensor_dataset)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
'''
eval_data = TensorDataset(*eval_tensor_dataset)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
'''
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
optimizer = OpenAIAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay,
t_total=num_train_optimization_steps)
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Training")
for step, batch in enumerate(tqdm_bar):
print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels, mc_labels = batch
loss = model(input_ids, mc_token_ids, lm_labels = lm_labels)
print(loss)
'''
loss.backward()
optimizer.step()
tr_loss += loss.item()
exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
nb_tr_steps += 1
tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
'''
# Save a trained model
'''
if args.do_train:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
config = model.config
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = OpenAIGPTDoubleHeadsModel(config)
model.load_state_dict(model_state_dict)
model.to(device)
if args.do_eval:
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels, mc_labels = batch
with torch.no_grad():
_, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
_, mc_logits = model(input_ids, mc_token_ids)
mc_logits = mc_logits.detach().cpu().numpy()
mc_labels = mc_labels.to('cpu').numpy()
tmp_eval_accuracy = accuracy(mc_logits, mc_labels)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
train_loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'train_loss': train_loss}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
'''
if __name__ == '__main__': | [] |
2024-01-10 | aditya2211/transformer-entity-tracking | gpt-entity-tracking~run_transformer_recipe_lm.py | import argparse
import os
import random
import json
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from analysis import rocstories as rocstories_analysis
from datasets import rocstories
from model_pytorch_analysis import DoubleHeadModel, load_openai_pretrained_model
#from ..pytorch_pretrained_bert.modeling_openai import OpenAIGPTDoubleLMHeadModel, OpenAIGPTConfig
from opt import OpenAIAdam
from text_utils import TextEncoder
from utils import (encode_dataset, iter_data,
ResultLogger, make_path, encode_dataset_whole)
from loss import ClassificationLossCompute
def flatten_list(l):
flat_list = []
for sublist in l:
for item in sublist:
flat_list.append(item)
return flat_list
def transform_recipe_whole(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
extra1 = encoder['_extra1_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = [start] + x2 +[delimiter]
'''
for x in x3:
x12+= x + [extra1]
'''
for x in x1:
x12+= x +[clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
'''
if l12 < 5:
continue
'''
#l13 = len(x13)
xmb[i, :l12, 0] = x12
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe_whole_just_recipe(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
extra1 = encoder['_extra1_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = x1
'''
for x in x3:
x12+= x + [extra1]
'''
#l12 = len(x12)
x12+=[clf_token]
l12 = len(x12)
if l12 <= 2:
print('fucked\n')
quit()
if l12 == 0:
print('O length train para\n')
quit()
if l12 > 512:
print('512+ length paragraph\n')
xmb[i, :n_ctx, 0] = x12[:n_ctx]
mmb[i, :n_ctx] = 1
continue
'''
if l12 < 7:
continue
'''
#l13 = len(x13)
xmb[i, :l12, 0] = x12
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12-1] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = [start] + x1 + [delimiter] + x3+ [delimiter] + x2 + [delimiter] + x3+ [clf_token]
#x12 = [start] + x2 + [delimiter] + x3+ [clf_token]
#x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
#l13 = len(x13)
xmb[i, :l12, 0] = x12
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe_additional(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 3), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = [start] + x1 + [delimiter] + x2 + [delimiter] + x3+ [clf_token]
#x12 = [start] + x2 + [delimiter] + x3+ [clf_token]
#x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
#l13 = len(x13)
xmb[i, :l12, 0] = x12
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
xmb[:,: len(x1)+2,2] = encoder['_extra1_']
xmb[:, len(x1)+2: len(x1)+2 + len(x2)+1,2] = encoder['_extra2_']
xmb[:, len(x1)+2 + len(x2)+1:len(x1)+2 + len(x2)+1 + len(x3)+1,2] = encoder['_extra3_']
return xmb, mmb
def transform_recipe3(X1, X2, X3, X1_helper, X2_helper):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 4), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3, x4, x5), in enumerate(zip(X1, X2, X3, X1_helper, X2_helper)):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = [start] + x1 + [delimiter] + x2 + [delimiter] + x3+ [clf_token]
x14 = [ing_not_present_token] + x4 + [ing_not_present_token] + x5 + [ing_not_present_token] + [ing_present_token]*len(x3) + [ing_not_present_token]
assert len(x1) == len(x4)
assert len(x2) == len(x5)
#x12 = [start] + x2 + [delimiter] + x3+ [clf_token]
#x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
#l13 = len(x13)
xmb[i, :l12, 0] = x12
xmb[i, :l12, 3] = x14
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
xmb[:,: len(x1)+2,2] = encoder['_extra1_']
xmb[:, len(x1)+2: len(x1)+2 + len(x2)+1,2] = encoder['_extra2_']
xmb[:, len(x1)+2 + len(x2)+1:len(x1)+2 + len(x2)+1 + len(x3)+1,2] = encoder['_extra3_']
return xmb, mmb
def transform_recipe_stories(X1):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, x1 in enumerate(X1):
#x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [delimiter] + x3[:max_len]+ [clf_token]
x12 = [start] + x1 + [clf_token]
#x12 = [start] + x2 + [delimiter] + x3+ [clf_token]
#x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
#l13 = len(x13)
xmb[i, :l12, 0] = x12
#xmb[i, 1, :l13, 0] = x13
mmb[i, :l12-1] = 1
#mmb[i, 1, :l13] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def iter_apply(Xs, Ms, Ys):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
logits = []
cost = 0
with torch.no_grad():
#dh_model.eval()
for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
#print("+"*80)
#print(clf_logits)
#print("="*80)
clf_logits *= n
#print(clf_logits)
#print("+"*80)
clf_losses = compute_loss_fct(XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(clf_logits.to("cpu").numpy())
cost += clf_losses.sum().item()
logits = np.concatenate(logits, 0)
return logits, cost
def iter_apply_lm(Xs, Ms, denom):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
logits = []
cost = 0
total_loss = 0
total_preds = 0
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
lm_losses = compute_loss_fct(XMB, None, MMB, None, lm_logits, only_return_losses=True)
total_loss+= lm_losses.item()
total_preds+= torch.sum(MMB[:,1:]).item()
return total_loss / total_preds
def iter_predict(Xs, Ms):
logits = []
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
return logits
def log_lm(save_dir, desc):
print("Logging")
tr_cost = iter_apply_lm(trlmX[:374], trlmM[:374], 1.0)
va_cost = iter_apply_lm(valmX, valmM, 1.0)
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost = va_cost)
print('%d %d %.3f %.3f' % (n_epochs, n_updates, tr_cost, va_cost))
def log(save_dir, desc):
global best_score
print("Logging")
tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid])
va_logits, va_cost = iter_apply(vaX, vaM, vaY)
tr_cost = tr_cost / len(trY[:n_valid])
va_cost = va_cost / n_valid
tr_acc = accuracy_score(flatten_list(trY[:n_valid]), np.argmax(tr_logits, 1)) * 100.
va_acc = accuracy_score(flatten_list(vaY), np.argmax(va_logits, 1)) * 100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f' % (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
score = va_acc
print(args)
if score > best_score:
best_score = score
#path = os.path.join(save_dir, desc, 'best_params')
#torch.save(dh_model.state_dict(), make_path(path))
np.save('./train_transformer_recipes_replaced_lm{}_{}_{}_{}_{}.npy'.format(args.n_iter_lm, args.n_layer, args.n_head, args.n_embd, args.lmtotal), va_logits)
def predict(dataset, submission_dir):
filename = filenames[dataset]
pred_fn = pred_fns[dataset]
label_decoder = label_decoders[dataset]
predictions = pred_fn(iter_predict(teX, teM))
if label_decoder is not None:
predictions = [label_decoder[prediction] for prediction in predictions]
path = os.path.join(submission_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('{}\t{}\n'.format('index', 'prediction'))
for i, prediction in enumerate(predictions):
f.write('{}\t{}\n'.format(i, prediction))
def run_epoch():
for xmb, mmb, ymb in iter_data(*shuffle(trlmX, trlmM, trlmM, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
compute_loss_fct(XMB, YMB, MMB, None,lm_logits)
n_updates += 1
def run_epoch2():
for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
#if n_updates < 1400 or n_updates > 1500:
compute_loss_fct(XMB, YMB, MMB, clf_logits,lm_logits)
n_updates += 1
if n_updates in [ 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
def run_epoch_lm():
for xmb, mmb in iter_data(*shuffle(trlmX, trlmM, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, _ = dh_model(XMB)
compute_loss_fct(XMB, MMB, lm_logits)
n_updates += 1
'''
if n_updates in [ 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
'''
argmax = lambda x: np.argmax(x, 1)
pred_fns = {
'rocstories': argmax,
}
filenames = {
'rocstories': 'ROCStories.tsv',
}
label_decoders = {
'rocstories': None,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str, help="Description")
parser.add_argument('--dataset', type=str)
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter_lm', type=int, default=10)
parser.add_argument('--lmval', type=int, default=2000)
parser.add_argument('--lmtotal', type=int, default=20000)
parser.add_argument('--n_iter', type=int, default=25)
parser.add_argument('--n_batch', type=int, default=4)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=8)
parser.add_argument('--n_l ayer', type=int, default=8)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_valid', type=int, default=374)
args = parser.parse_args()
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Constants
submit = args.submit
dataset = args.dataset
n_ctx = args.n_ctx
save_dir = args.save_dir
desc = 'train_transformer_recipes_lm{}_{}_{}_{}_{}'.format(args.n_iter_lm, args.n_layer, args.n_head, args.n_embd, args.lmtotal)
print(desc)
data_dir = args.data_dir
log_dir = args.log_dir
submission_dir = args.submission_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc + 'replaced')), **args.__dict__)
text_encoder = TextEncoder(args.encoder_path, args.bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
'''
print("Encoding dataset...")
((trX1, trX2, trX3, trY),
(vaX1, vaX2, vaX3, vaY),
(teX1, teX2, teX3)) = encode_dataset(*rocstories(data_dir, n_valid=args.n_valid),
encoder=text_encoder)
'''
#LM-pretraining
encoder['_start_'] = len(encoder)
encoder['_delimiter_'] = len(encoder)
encoder['_classify_'] = len(encoder)
encoder['_extra1_'] = len(encoder)
encoder['_extra2_'] = len(encoder)
encoder['_extra3_'] = len(encoder)
encoder['_ing_present_'] = len(encoder)
encoder['_ing_not_present_'] = len(encoder)
clf_token = encoder['_classify_']
ing_present_token = encoder['_ing_present_']
ing_not_present_token = encoder['_ing_not_present_']
n_special = 8
'''
train_file = json.load(open('../train_recipes.json','r'))
val_file = json.load(open('../val_recipes.json','r'))
t_passage = []
v_passage = []
for ins in train_file:
t_passage.append(ins['story'])
for ins in val_file:
v_passage.append(ins['story'])
a = (t_passage),(v_passage)
((trX1),(vaX1)) = encode_dataset(*a,encoder = text_encoder)
'''
train_lm_file = json.load(open('./train_gpt_whole_just_recipes.json','r'))
train_file = json.load(open('./test_gpt_whole.json','r'))
val_file = json.load(open('./val_gpt_whole_verbimputed.json','r'))
#print(train_file[0])
t_passage = []
t_context = []
t_ing = []
t_gold = []
tlm_passage = []
tlm_context = []
tlm_ing = []
tlm_gold = []
v_passage = []
v_context = []
v_ing = []
v_gold = []
t_all_ings = []
v_all_ings = []
print(len(train_lm_file))
print(len(train_file))
print(len(val_file))
lmval = args.lmval
lmtotal =args.lmtotal
'''
for idx, ins in enumerate(train_lm_file):
max_len = 0
tmp = " ".join(ins['text'])
curr_len = len(tmp.split())
if curr_len > max_len:
max_len = curr_len
if idx%1000 == 0:
print(idx)
print(max_len)
print('\n')
max_len = 0
'''
'''
f = open('./recipes.txt', 'w')
for ins in train_lm_file[:lmtotal]:
f.write(" ".join(ins['text'])+'\n')
f.close()
exit
'''
for ins in train_lm_file[:200]:
tmp = ""
tmp = " ".join(ins['text'])
tmp = tmp.replace('-lrb-', '(')
tmp = tmp.replace('-rrb-', ')')
if len(tmp.split()) < 5:
continue
tlm_passage.append(tmp)
tlm_ing.append(ins['ing'])
print(tlm_passage[0])
for ins in train_file[:500]:
text= [step.replace('-lrb-','(').replace('-rrb-', ')') for step in ins['text']]
t_passage.append(text)
t_ing.append(ins['ing'])
t_gold.append(ins['gold'])
t_all_ings.append(ins['all_ings'])
'''
v_passage.append(['$'])
v_ing.append('ok')
v_gold.append([0])
v_all_ings.append(['a','b'])
'''
for ins in val_file:
text= [step.replace('-lrb-','(').replace('-rrb-', ')') for step in ins['ve_$replaced_text']]
v_passage.append(text)
v_ing.append(ins['ing'])
v_gold.append(ins['gold'])
v_all_ings.append(ins['all_ings'])
#print(tlm_passage[0])
a = (tlm_passage, tlm_ing,), (t_ing,t_gold),(v_ing,v_gold)
((trlmX1, trlmX2,),(trX2, trY),(vaX2, vaY)) = encode_dataset(*a,encoder = text_encoder)
#trlmX1 = encode_dataset_whole(tlm_passage, encoder = text_encoder)
trX1 = encode_dataset_whole(t_passage, encoder = text_encoder)
vaX1 = encode_dataset_whole(v_passage, encoder = text_encoder)
print(vaX1[0][1])
trX3 = encode_dataset_whole(t_all_ings, encoder = text_encoder)
vaX3 = encode_dataset_whole(v_all_ings, encoder = text_encoder)
n_batch_train = args.n_batch * max(n_gpu, 1)
print(n_ctx)
vocab = n_vocab + n_special + n_ctx
trlmX, trlmM = transform_recipe_whole_just_recipe(trlmX1, trlmX2,trlmX2)
trlmX, valmX = trlmX[:-lmval], trlmX[-lmval:]
trlmM, valmM = trlmM[:-lmval], trlmM[-lmval:]
trX, trM = transform_recipe_whole(trX1, trX2, trX3)
vaX, vaM = transform_recipe_whole(vaX1, vaX2, vaX3)
n_train_lm = len(trlmX)
n_train = len(trY)
n_valid = len(vaY)
print(len(trlmX))
print(len(valmX))
dh_model = DoubleHeadModel(args, clf_token, 'custom', vocab, n_ctx)
#load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special)
dh_model = nn.DataParallel(dh_model)
path = os.path.join(save_dir, desc, 'best_params')
dh_model.load_state_dict(torch.load(path))
dh_model.module.transformer.embed.weight.data[289,:] = torch.zeros([768,], dtype= torch.long)
#= torch.zeros(768)
dh_model.to(device)
dh_model.eval()
criterion = nn.CrossEntropyLoss(reduce=False)
model_opt = OpenAIAdam(dh_model.parameters(),
lr=6.25e-5,
schedule=args.lr_schedule,
warmup=.002,
t_total=1000,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
compute_loss_fct = ClassificationLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
n_updates = 0
n_epochs = 0
best_score = 0
log(save_dir, desc)
| [] |
2024-01-10 | aditya2211/transformer-entity-tracking | gpt-entity-tracking~train_transformer_recipe_lm.py | import argparse
import os
import random
import json
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from analysis import rocstories as rocstories_analysis
from datasets import rocstories
from model_pytorch_analysis import DoubleHeadModel, load_openai_pretrained_model
#from ..pytorch_pretrained_bert.modeling_openai import OpenAIGPTDoubleLMHeadModel, OpenAIGPTConfig
from opt import OpenAIAdam
from text_utils import TextEncoder
from utils import (encode_dataset, iter_data,
ResultLogger, make_path, encode_dataset_whole)
from loss import ClassificationLossCompute
def flatten_list(l):
flat_list = []
for sublist in l:
for item in sublist:
flat_list.append(item)
return flat_list
def transform_recipe_whole(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start] + x2 +[delimiter]
# Uncomment to add additional ingredients as well.
'''
for x in x3:
x12+= x + [extra1]
'''
for x in x1:
x12+= x +[clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
xmb[i, :l12, 0] = x12
mmb[i, :l12] = 1
# Position information that is added to the input embeddings in the TransformerModel
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe_whole_just_recipe(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = x1
x12+=[clf_token]
l12 = len(x12)
if l12 <= 2:
print('<2 length train para\n')
quit()
if l12 > 512:
print('512+ length paragraph\n')
xmb[i, :n_ctx, 0] = x12[:n_ctx]
mmb[i, :n_ctx] = 1
continue
xmb[i, :l12, 0] = x12
mmb[i, :l12-1] = 1
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start] + x1 + [delimiter] + x3+ [delimiter] + x2 + [delimiter] + x3+ [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
xmb[i, :l12, 0] = x12
mmb[i, :l12] = 1
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def transform_recipe_additional(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 3), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start] + x1 + [delimiter] + x2 + [delimiter] + x3+ [clf_token]
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
xmb[i, :l12, 0] = x12
mmb[i, :l12] = 1
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
xmb[:,: len(x1)+2,2] = encoder['_extra1_']
xmb[:, len(x1)+2: len(x1)+2 + len(x2)+1,2] = encoder['_extra2_']
xmb[:, len(x1)+2 + len(x2)+1:len(x1)+2 + len(x2)+1 + len(x3)+1,2] = encoder['_extra3_']
return xmb, mmb
def transform_recipe3(X1, X2, X3, X1_helper, X2_helper):
n_batch = len(X1)
xmb = np.zeros((n_batch, n_ctx, 4), dtype=np.int32)
mmb = np.zeros((n_batch, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3, x4, x5), in enumerate(zip(X1, X2, X3, X1_helper, X2_helper)):
x12 = [start] + x1 + [delimiter] + x2 + [delimiter] + x3+ [clf_token]
x14 = [ing_not_present_token] + x4 + [ing_not_present_token] + x5 + [ing_not_present_token] + [ing_present_token]*len(x3) + [ing_not_present_token]
assert len(x1) == len(x4)
assert len(x2) == len(x5)
l12 = len(x12)
if l12 == 0:
print('O length train para\n')
continue
if l12 > 512:
continue
xmb[i, :l12, 0] = x12
xmb[i, :l12, 3] = x14
mmb[i, :l12] = 1
xmb[:, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
xmb[:,: len(x1)+2,2] = encoder['_extra1_']
xmb[:, len(x1)+2: len(x1)+2 + len(x2)+1,2] = encoder['_extra2_']
xmb[:, len(x1)+2 + len(x2)+1:len(x1)+2 + len(x2)+1 + len(x3)+1,2] = encoder['_extra3_']
return xmb, mmb
def iter_apply(Xs, Ms, Ys):
logits = []
cost = 0
with torch.no_grad():
dh_model.eval()
for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
clf_logits *= n
clf_losses = compute_loss_fct(XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(clf_logits.to("cpu").numpy())
cost += clf_losses.sum().item()
logits = np.concatenate(logits, 0)
return logits, cost
def iter_apply_lm(Xs, Ms, denom):
logits = []
cost = 0
total_loss = 0
total_preds = 0
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
lm_losses = compute_loss_fct(XMB, None, MMB, None, lm_logits, only_return_losses=True)
total_loss+= lm_losses.item()
total_preds+= torch.sum(MMB[:,1:]).item()
return total_loss / total_preds
def iter_predict(Xs, Ms):
logits = []
with torch.no_grad():
dh_model.eval()
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
_, clf_logits = dh_model(XMB)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
return logits
def log_lm(save_dir, desc):
print("Logging...")
tr_cost = iter_apply_lm(trlmX[:374], trlmM[:374], 1.0)
va_cost = iter_apply_lm(valmX, valmM, 1.0)
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost = va_cost)
print('%d %d %.3f %.3f' % (n_epochs, n_updates, tr_cost, va_cost))
def log_task(save_dir, desc):
global best_score
print("Logging...")
tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid])
va_logits, va_cost = iter_apply(vaX, vaM, vaY)
tr_cost = tr_cost / len(trY[:n_valid])
va_cost = va_cost / n_valid
tr_acc = accuracy_score(flatten_list(trY[:n_valid]), np.argmax(tr_logits, 1)) * 100.
va_acc = accuracy_score(flatten_list(vaY), np.argmax(va_logits, 1)) * 100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f' % (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
score = va_acc
#if score > best_score:
best_score = score
path = os.path.join(save_dir, desc, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
def predict_task(save_dir, desc):
global best_score
print("Predicting...")
te_logits, _ = iter_apply(teX, teM, teY)
te_acc = accuracy_score(flatten_list(teY), np.argmax(te_logits, 1)) * 100.
print('%d %d %.3f' % (n_epochs, n_updates, te_acc))
np.save('./predictions_lm{}_{}_{}_{}_{}.npy'.format(args.n_iter_lm, args.n_layer, args.n_head, args.n_embd, args.lmtotal), te_logits)
def run_epoch_lm():
for xmb, mmb, ymb in iter_data(*shuffle(trlmX, trlmM, trlmM, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
compute_loss_fct(XMB, YMB, MMB, None,lm_logits)
n_updates += 1
def run_epoch_task():
for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(flatten_list(ymb), dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, clf_logits = dh_model(XMB)
#if n_updates < 1400 or n_updates > 1500:
compute_loss_fct(XMB, YMB, MMB, clf_logits,lm_logits)
n_updates += 1
if n_updates in [ 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
def run_epoch_only_lm():
for xmb, mmb in iter_data(*shuffle(trlmX, trlmM, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True):
global n_updates
dh_model.train()
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
lm_logits, _ = dh_model(XMB)
compute_loss_fct(XMB, MMB, lm_logits)
n_updates += 1
argmax = lambda x: np.argmax(x, 1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str, help="Description")
parser.add_argument('--dataset', type=str)
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter_lm', type=int, default=10)
parser.add_argument('--lmval', type=int, default=2000)
parser.add_argument('--lmtotal', type=int, default=20000)
parser.add_argument('--n_iter', type=int, default=2)
parser.add_argument('--n_batch', type=int, default=4)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=8)
parser.add_argument('--n_layer', type=int, default=8)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_valid', type=int, default=374)
args = parser.parse_args()
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Constants.
submit = args.submit
dataset = args.dataset
n_ctx = args.n_ctx
save_dir = args.save_dir
desc = 'train_transformer_recipes_nounverb_lm{}_{}_{}_{}_{}'.format(args.n_iter_lm, args.n_layer, args.n_head, args.n_embd, args.lmtotal)
data_dir = args.data_dir
log_dir = args.log_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)
text_encoder = TextEncoder(args.encoder_path, args.bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
# Special tokens for the task.
encoder['_start_'] = len(encoder)
encoder['_delimiter_'] = len(encoder)
encoder['_classify_'] = len(encoder)
encoder['_extra1_'] = len(encoder)
encoder['_extra2_'] = len(encoder)
encoder['_extra3_'] = len(encoder)
encoder['_ing_present_'] = len(encoder)
encoder['_ing_not_present_'] = len(encoder)
n_special = 8
clf_token = encoder['_classify_']
ing_present_token = encoder['_ing_present_']
ing_not_present_token = encoder['_ing_not_present_']
train_lm_file = json.load(open('./dataset/train_gpt_whole_just_recipes.json','r'))
train_file = json.load(open('.dataset/train_recipes_task.json','r'))
val_file = json.load(open('./dataset/test_recipes_task.json','r'))
print("Total # of recipes for lm fine-tuning: ", len(train_file))
print("Total # of train/test points: {}/{}".format(len(train_file), len(val_file)))
lmval = args.lmval
lmtotal =args.lmtotal
taskval = args.n_valid
n_valid = args.n_valid
tlm_passage = []
tlm_ing = []
for ins in train_lm_file[:lmtotal]:
curr_recipe = " ".join(ins['text']).replace('-lrb-', '(').replace('-rrb-', ')')
if len(curr_recipe.split()) < 5:
continue
tlm_passage.append(curr_recipe)
tlm_ing.append(ins['ing'])
print(tlm_passage[0])
t_passage = []
t_ing = []
t_gold = []
t_all_ings = []
for ins in train_file:
text= [step.replace('-lrb-','(').replace('-rrb-', ')') for step in ins['text']]
t_passage.append(text)
t_ing.append(ins['ing'])
t_gold.append(ins['gold'])
t_all_ings.append(ins['all_ings'])
v_passage = []
v_ing = []
v_gold = []
v_all_ings = []
for ins in val_file:
text= [step.replace('-lrb-','(').replace('-rrb-', ')') for step in ins['text']]
v_passage.append(text)
v_ing.append(ins['ing'])
v_gold.append(ins['gold'])
v_all_ings.append(ins['all_ings'])
dataset = (tlm_passage, tlm_ing,), (t_ing,t_gold),(v_ing,v_gold)
((trlmX1, trlmX2,),(trX2, trY),(teX2, teY)) = encode_dataset(*dataset,encoder = text_encoder)
trX1 = encode_dataset_whole(t_passage, encoder = text_encoder)
teX1 = encode_dataset_whole(v_passage, encoder = text_encoder)
trX3 = encode_dataset_whole(t_all_ings, encoder = text_encoder)
teX3 = encode_dataset_whole(v_all_ings, encoder = text_encoder)
print(n_ctx)
vocab = n_vocab + n_special + n_ctx
trlmX, trlmM = transform_recipe_whole_just_recipe(trlmX1, trlmX2, trlmX2)
trlmX, valmX = trlmX[:-lmval], trlmX[-lmval:]
trlmM, valmM = trlmM[:-lmval], trlmM[-lmval:]
trX, trM = transform_recipe_whole(trX1, trX2, trX3)
trX, vaX = trX[:-taskval], trX[-taskval:]
trM, vaM = trM[:-taskval], trM[-taskval:]
trY, vaY = trY[:-taskval], trY[-taskval:]
teX, teM = transform_recipe_whole(teX1, teX2, teX3)
n_train_lm = len(trlmX)
n_train = len(trY)
n_test = len(vaY)
print(len(trlmX))
print(len(valmX))
print("Number of training/val points for LM finetuning: {}/{}".format(len(trlmX), len(valmX)))
dh_model = DoubleHeadModel(args, clf_token, 'custom', vocab, n_ctx)
load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special)
dh_model.to(device)
dh_model = nn.DataParallel(dh_model)
n_updates = 0
n_epochs = 0
trYt = trY
best_score = 0
n_batch_train = args.n_batch * max(n_gpu, 1)
n_updates_total = (n_train_lm // n_batch_train) * args.n_iter_lm
print(n_updates_total)
criterion = nn.CrossEntropyLoss(reduce=False)
model_opt = OpenAIAdam(dh_model.parameters(),
lr=6.25e-5,
schedule=args.lr_schedule,
warmup=.002,
t_total=n_updates_total,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
compute_loss_fct = ClassificationLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
for i in range(args.n_iter_lm):
print("running lm fine-tuning epoch: ", i)
run_epoch_lm()
n_epochs += 1
log_lm(save_dir, desc)
n_updates = 0
n_epochs = 0
n_updates_total = (n_train // n_batch_train) * args.n_iter
model_opt = OpenAIAdam(dh_model.parameters(),
lr=6.25e-5,
schedule='warmup_linear',
warmup=.002,
t_total=n_updates_total,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
compute_loss_fct = ClassificationLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
for i in range(args.n_iter):
print("running task fine-tuning epoch", i)
run_epoch_task()
n_epochs += 1
log_task(save_dir, desc)
predict_task(save_dir, desc) | [] |
2024-01-10 | aditya2211/transformer-entity-tracking | bert-entity-tracking~gpt_ppl_recs_gpt.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py
This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
python run_openai_gpt.py \
--model_name openai-gpt \
--do_train \
--do_eval \
--train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
--eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
--output_dir ../log \
--train_batch_size 16 \
"""
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange
import json
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert.modeling_openai import OpenAIGPTLMHeadModel, OpenAIGPTConfig
from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer
from pytorch_pretrained_bert.optimization_openai import OpenAIAdam
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
return output
def load_recipes_dataset(dataset_path='./val_recipes.json'):
train_file = json.load(open(dataset_path,'r'))
output = []
for ins in train_file:
output.append(ins['story'])
return output
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch,), dtype=np.int64)
lm_labels = np.full((n_batch, input_len), fill_value=-1, dtype=np.int64)
#mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story), in enumerate(dataset):
with_cont1 = [start_token] + story[:cap_length] + [delimiter_token]
#with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
#print(with_cont1)
#print(input_ids[i, :len(with_cont1)] )
input_ids[i, :len(with_cont1)] = with_cont1
#input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i] = len(with_cont1) - 1
#mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, :len(with_cont1)-1] = with_cont1[1:]
#lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
#mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='openai-gpt',
help='pretrained model name')
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--train_dataset', type=str, default='./train_recipes.json')
parser.add_argument('--eval_dataset', type=str, default='./val_recipes.json')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_train_epochs', type=int, default=10)
parser.add_argument('--train_batch_size', type=int, default=2)
parser.add_argument('--eval_batch_size', type=int, default=2)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=6.25e-6)
parser.add_argument('--warmup_proportion', type=float, default=0.1)
parser.add_argument('--lr_schedule', type=str, default='warmup_cosine')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--lm_coef', type=float, default=0.9)
parser.add_argument('--n_valid', type=int, default=374)
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
print(args)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
special_tokens = ['_start_', '_delimiter_', '_classify_']
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
config = OpenAIGPTConfig()
#model = OpenAIGPTLMHeadModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
model = OpenAIGPTLMHeadModel(config)
model.set_num_special_tokens(len(special_tokens))
model.to(device)
# Load and encode the datasets
'''
if not args.train_dataset and not args.eval_dataset:
roc_stories = cached_path(ROCSTORIES_URL)
'''
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o) for o in obj)
logger.info("Encoding dataset...")
train_dataset = load_recipes_dataset(args.train_dataset)
train_dataset = train_dataset
#remove extra length train data
print(train_dataset[0])
eval_dataset = load_recipes_dataset(args.eval_dataset)
print(len(eval_dataset))
datasets = (train_dataset, eval_dataset)
encoded_datasets = tokenize_and_encode(datasets)
selected_train_data = []
print(len(encoded_datasets[0]))
for ins in encoded_datasets[0]:
if len(ins)<=510:
selected_train_data.append(ins)
encoded_datasets[0] = selected_train_data
print(len(encoded_datasets[0]))
# Compute the mex input length for the Transformer
max_length = model.config.n_positions - 2
print(max_length)
print(encoded_datasets[0][0])
input_length = max(len(story[:max_length]) + 2 for dataset in encoded_datasets for story in dataset)
input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model
print(input_length)
# Prepare inputs tensors and dataloaders
tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
train_tensor_dataset = tensor_datasets[0]
eval_tensor_dataset = tensor_datasets[1]
train_data = TensorDataset(*train_tensor_dataset)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
eval_data = TensorDataset(*eval_tensor_dataset)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
optimizer = OpenAIAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay,
t_total=num_train_optimization_steps)
print(.002*num_train_optimization_steps)
total_loss = 0
total_length = 0
print(model.transformer.h)
'''
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.eval()
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Pre LM training train data ppl")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
lengths = mc_token_ids.to('cpu').numpy()
#print(np.sum(lengths))
total_loss+=loss.item()*np.sum(lengths)
total_length+=np.sum(lengths)
print(total_loss/total_length)
total_loss = 0
total_length = 0
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.eval()
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(eval_dataloader, desc="Pre LM training val data ppl")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
lengths = mc_token_ids.to('cpu').numpy()
#print(np.sum(lengths))
total_loss+=loss.item()*np.sum(lengths)
total_length+=np.sum(lengths)
print(total_loss/total_length)
'''
if args.do_train:
print("=" * 80 + '\n')
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Training")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
loss.backward()
optimizer.step()
tr_loss += loss.item()
exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
nb_tr_steps += 1
tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
total_loss = 0
total_length = 0
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.eval()
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Post LM training train data ppl")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
lengths = mc_token_ids.to('cpu').numpy()
#print(np.sum(lengths))
total_loss+=loss.item()*np.sum(lengths)
total_length+=np.sum(lengths)
print(total_loss/total_length)
total_loss = 0
total_length = 0
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.eval()
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(eval_dataloader, desc="Post LM training val data ppl")
for step, batch in enumerate(tqdm_bar):
#print(batch)
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels = batch
loss = model(input_ids, lm_labels = lm_labels)
lengths = mc_token_ids.to('cpu').numpy()
#print(np.sum(lengths))
total_loss+=loss.item()*np.sum(lengths)
total_length+=np.sum(lengths)
print(total_loss/total_length)
print("=" * 80 + '\n')
# Save a trained model
'''
if args.do_train:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
config = model.config
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = OpenAIGPTDoubleHeadsModel(config)
model.load_state_dict(model_state_dict)
model.to(device)
if args.do_eval:
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels, mc_labels = batch
with torch.no_grad():
_, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
_, mc_logits = model(input_ids, mc_token_ids)
mc_logits = mc_logits.detach().cpu().numpy()
mc_labels = mc_labels.to('cpu').numpy()
tmp_eval_accuracy = accuracy(mc_logits, mc_labels)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
train_loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'train_loss': train_loss}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
'''
if __name__ == '__main__':
main() | [] |
2024-01-10 | monarch-initiative/curate-gpt | tests~extract~test_extractor.py | from typing import List
import pytest
from curate_gpt.extract.basic_extractor import BasicExtractor
from curate_gpt.extract.extractor import AnnotatedObject
from curate_gpt.extract.openai_extractor import OpenAIExtractor
from curate_gpt.extract.recursive_extractor import RecursiveExtractor
from curate_gpt.store.schema_proxy import SchemaProxy
from linkml_runtime.utils.schema_builder import SchemaBuilder
from pydantic import BaseModel
class Occupation(BaseModel):
category: str
current: bool
class Person(BaseModel):
name: str
age: int
occupations: List[Occupation]
@pytest.fixture
def schema_manager() -> SchemaProxy:
sb = SchemaBuilder("test")
sb.add_class("Person", slots=["name", "age", "occupations"])
sb.add_class("Occupation", slots=["category", "current"])
sb.add_slot("age", range="integer", description="age in years", replace_if_present=True)
sb.add_slot(
"occupations",
range="Occupation",
description="job held, and is it current",
multivalued=True,
replace_if_present=True,
)
sb.add_slot("current", range="boolean", replace_if_present=True)
sb.add_defaults()
sm = SchemaProxy(sb.schema)
sm.pydantic_root_model = Person
return sm
@pytest.mark.parametrize(
"extractor_type,kwargs,num_examples",
[
(RecursiveExtractor, {}, 5),
(RecursiveExtractor, {}, 99),
(OpenAIExtractor, {}, 99),
(OpenAIExtractor, {}, 0),
(OpenAIExtractor, {"examples_as_functions": True}, 99),
(BasicExtractor, {}, 99),
],
)
def test_extract(extractor_type, kwargs, num_examples, schema_manager):
extractor = extractor_type()
extractor.schema_proxy = schema_manager
examples = [
AnnotatedObject(
object={
"name": "John Doe",
"age": 42,
"occupations": [{"category": "Software Developer", "current": True}],
},
annotations={
"text": "His name is John doe and he is 42 years old. He currently develops software for a living."
},
),
AnnotatedObject(
object={
"name": "Eleonore Li",
"age": 27,
"occupations": [{"category": "Physicist", "current": True}],
},
annotations={"text": "Eleonore Li is a 27 year old rising star Physicist."},
),
AnnotatedObject(
object={
"name": "Lois Lane",
"age": 24,
"occupations": [{"category": "Reporter", "current": True}],
},
annotations={"text": "Lois Lane is a reporter for the daily planet. She is 24."},
),
AnnotatedObject(
object={
"name": "Sandy Sands",
"age": 33,
"occupations": [
{"category": "Costume Designer", "current": False},
{"category": "Architect", "current": True},
],
},
annotations={
"text": "the 33 year old Sandy Sands used to design costumes, now they are an architect."
},
),
]
successes = []
failures = []
for i in range(0, len(examples)):
print(f"ITERATION {i} // {extractor_type}")
test = examples[i]
train = examples[:i] + examples[i + 1 :]
result = extractor.extract(
target_class="Person", examples=train[0:num_examples], text=test.text, **kwargs
)
print("RESULTS:")
print(result)
if result.object == test.object:
print("SUCCESS")
successes.append(result)
else:
print(f"FAILURE: expected={test.object}")
failures.append(result)
print(
f"{extractor_type} {kwargs} {num_examples} SUCCESSES: {len(successes)} FAILURES: {len(failures)}"
)
@pytest.mark.parametrize(
"input,output",
[
('{"x": 1}', {"x": 1}),
('blah {"x": 1}', {"x": 1}),
('blah {"x": 1} blah', {"x": 1}),
('blah {"x": {"y": 1}} blah', {"x": {"y": 1}}),
("{", {}),
("foo", {}),
],
)
def test_deserialize(input, output):
"""
Test that the basic extractor can deserialize a json object.
Ensures that is capable of handling some of the prefixual junk that
some models provide
"""
ex = BasicExtractor()
ao = ex.deserialize(input)
assert ao.object == output
| [] |
2024-01-10 | monarch-initiative/curate-gpt | src~curate_gpt~extract~openai_extractor.py | """Extractor that uses OpenAI functions."""
import json
import logging
from dataclasses import dataclass
from typing import List
import openai
import yaml
from curate_gpt.extract.extractor import AnnotatedObject, Extractor
FUNC_NAME = "extract_data"
logger = logging.getLogger(__name__)
@dataclass
class OpenAIExtractor(Extractor):
"""
Extractor that uses OpenAI functions.
"""
max_tokens: int = 3000
model: str = "gpt-4"
# conversation: List[Dict[str, Any]] = None
# conversation_mode: bool = False
def functions(self):
return [
{
"name": FUNC_NAME,
"description": "A n ontology term",
"parameters": self.schema_proxy.json_schema(),
},
]
def extract(
self,
text: str,
target_class: str,
examples: List[AnnotatedObject] = None,
examples_as_functions=False,
conversation=None,
**kwargs,
) -> AnnotatedObject:
messages = [
{
"role": "system",
"content": f"You are system that returns {target_class} object in JSON.",
},
]
for example in examples:
ex_text = example.text
ex_object = example.object
print(f"EXAMPLE = {ex_text}")
messages.append(
{
"role": "user",
"content": f"make terms for {ex_text}",
}
)
if not examples_as_functions:
messages.append(
{
"role": "assistant",
"content": None,
"function_call": {
"name": FUNC_NAME,
"arguments": json.dumps(ex_object),
},
},
)
else:
messages.append(
{
"role": "function",
"name": FUNC_NAME,
"content": json.dumps(ex_object),
},
)
if conversation:
messages.extend(conversation)
# content = f"make terms for {text}"
content = text
messages.append(
{
"role": "user",
"content": content,
}
)
print(yaml.dump(messages))
response = openai.ChatCompletion.create(
model=self.model,
functions=self.functions(),
messages=messages,
max_tokens=self.max_tokens,
)
logger.debug(f"RESPONSE = {response}")
print(response)
choice = response.choices[0]
message = choice["message"]
if "function_call" not in message:
if self.raise_error_if_unparsable:
raise ValueError("No function call in response")
r = "{}"
else:
r = message["function_call"]["arguments"]
try:
obj = json.loads(r)
if conversation:
conversation.append(messages[-1])
conversation.append({"role": "function", "name": FUNC_NAME, "content": r})
except json.decoder.JSONDecodeError as e:
if self.raise_error_if_unparsable:
raise e
obj = {}
return AnnotatedObject(object=obj)
| [
"None",
"{}",
"You are system that returns PLACEHOLDER object in JSON.",
"make terms for PLACEHOLDER"
] |
2024-01-10 | hugomsm/agenta | examples~test_apps~MultiChoiceParamTestApp~mytest.py | import agenta as ag
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
default_prompt = "What is a good name for a company that makes {product}?"
ag.init()
ag.config.default(
prompt_template=ag.TextParam(default_prompt),
model=ag.MultipleChoiceParam(1, [1, 2]),
)
@ag.entrypoint
def completion(
product: str,
) -> str:
llm = OpenAI(model=ag.config.model)
prompt = PromptTemplate(
input_variables=["product"],
template=ag.config.prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
output = chain.run(product=product)
return output
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | xingfanxia/ChuanhuChatGPT | modules~models~base_model.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
import logging
import json
import commentjson as cjson
import os
import sys
import requests
import urllib3
import traceback
import pathlib
import shutil
from tqdm import tqdm
import colorama
from duckduckgo_search import DDGS
from itertools import islice
import asyncio
import aiohttp
from enum import Enum
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import BaseCallbackManager
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.input import print_text
from langchain.schema import AgentAction, AgentFinish, LLMResult
from threading import Thread, Condition
from collections import deque
from langchain.chat_models.base import BaseChatModel
from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
from ..presets import *
from ..index_func import *
from ..utils import *
from .. import shared
from ..config import retrieve_proxy
class CallbackToIterator:
def __init__(self):
self.queue = deque()
self.cond = Condition()
self.finished = False
def callback(self, result):
with self.cond:
self.queue.append(result)
self.cond.notify() # Wake up the generator.
def __iter__(self):
return self
def __next__(self):
with self.cond:
# Wait for a value to be added to the queue.
while not self.queue and not self.finished:
self.cond.wait()
if not self.queue:
raise StopIteration()
return self.queue.popleft()
def finish(self):
with self.cond:
self.finished = True
self.cond.notify() # Wake up the generator if it's waiting.
def get_action_description(text):
match = re.search('```(.*?)```', text, re.S)
json_text = match.group(1)
# 把json转化为python字典
json_dict = json.loads(json_text)
# 提取'action'和'action_input'的值
action_name = json_dict['action']
action_input = json_dict['action_input']
if action_name != "Final Answer":
return f'<!-- S O PREFIX --><p class="agent-prefix">{action_name}: {action_input}\n</p><!-- E O PREFIX -->'
else:
return ""
class ChuanhuCallbackHandler(BaseCallbackHandler):
def __init__(self, callback) -> None:
"""Initialize callback handler."""
self.callback = callback
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
self.callback(get_action_description(action.log))
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
# if observation_prefix is not None:
# self.callback(f"\n\n{observation_prefix}")
# self.callback(output)
# if llm_prefix is not None:
# self.callback(f"\n\n{llm_prefix}")
if observation_prefix is not None:
logging.info(observation_prefix)
self.callback(output)
if llm_prefix is not None:
logging.info(llm_prefix)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
# self.callback(f"{finish.log}\n\n")
logging.info(finish.log)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.callback(token)
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any) -> Any:
"""Run when a chat model starts running."""
pass
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
LLaMA = 2
XMChat = 3
StableLM = 4
MOSS = 5
YuanAI = 6
Minimax = 7
ChuanhuAgent = 8
GooglePaLM = 9
LangchainChat = 10
Midjourney = 11
Spark = 12
OpenAIInstruct = 13
Claude = 14
Qwen = 15
OpenAIVision = 16
ERNIE = 17
@classmethod
def get_type(cls, model_name: str):
model_type = None
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
if "instruct" in model_name_lower:
model_type = ModelType.OpenAIInstruct
elif "vision" in model_name_lower:
model_type = ModelType.OpenAIVision
else:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
model_type = ModelType.LLaMA
elif "xmchat" in model_name_lower:
model_type = ModelType.XMChat
elif "stablelm" in model_name_lower:
model_type = ModelType.StableLM
elif "moss" in model_name_lower:
model_type = ModelType.MOSS
elif "yuanai" in model_name_lower:
model_type = ModelType.YuanAI
elif "minimax" in model_name_lower:
model_type = ModelType.Minimax
elif "川虎助理" in model_name_lower:
model_type = ModelType.ChuanhuAgent
elif "palm" in model_name_lower:
model_type = ModelType.GooglePaLM
elif "midjourney" in model_name_lower:
model_type = ModelType.Midjourney
elif "azure" in model_name_lower or "api" in model_name_lower:
model_type = ModelType.LangchainChat
elif "星火大模型" in model_name_lower:
model_type = ModelType.Spark
elif "claude" in model_name_lower:
model_type = ModelType.Claude
elif "qwen" in model_name_lower:
model_type = ModelType.Qwen
elif "ernie" in model_name_lower:
model_type = ModelType.ERNIE
else:
model_type = ModelType.LLaMA
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt=INITIAL_SYSTEM_PROMPT,
temperature=1.0,
top_p=1.0,
n_choices=1,
stop=None,
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
) -> None:
self.history = []
self.all_token_counts = []
if model_name in MODEL_METADATA:
self.model_name = MODEL_METADATA[model_name]["model_name"]
else:
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_METADATA[model_name]["token_limit"]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.single_turn = False
self.history_file_path = get_first_history_name(user)
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = None
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.user_identifier = user
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logging.warning(
"stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logging.warning(
"at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
# logging.warning("billing info not implemented, using default")
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
# logging.warning("token count not implemented, using default")
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = i18n("开始实时传输回答……")
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logging.debug(f"输入token计数: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
if display_append:
display_append = '\n\n<hr class="append-display no-in-raw" />' + display_append
partial_text = ""
token_increment = 1
for partial_text in stream_iter:
if type(partial_text) == tuple:
partial_text, token_increment = partial_text
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += token_increment
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
self.history.append(construct_assistant(partial_text))
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(
construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - \
sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def handle_file_upload(self, files, chatbot, language):
"""if the model accepts multi modal input, implement this function"""
status = gr.Markdown.update()
if files:
index = construct_index(self.api_key, file_src=files)
status = i18n("索引构建完成")
return gr.Files.update(), chatbot, status
def summarize_index(self, files, chatbot, language):
status = gr.Markdown.update()
if files:
index = construct_index(self.api_key, file_src=files)
status = i18n("总结完成")
logging.info(i18n("生成内容总结中……"))
os.environ["OPENAI_API_KEY"] = self.api_key
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import StdOutCallbackHandler
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["text"])
llm = ChatOpenAI()
chain = load_summarize_chain(
llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
summary = chain({"input_documents": list(index.docstore.__dict__[
"_dict"].values())}, return_only_outputs=True)["output_text"]
print(i18n("总结") + f": {summary}")
chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
return chatbot, status
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=True):
display_append = []
limited_context = False
if type(real_inputs) == list:
fake_inputs = real_inputs[0]['text']
else:
fake_inputs = real_inputs
if files:
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.base import VectorStoreRetriever
limited_context = True
msg = "加载索引中……"
logging.info(msg)
index = construct_index(self.api_key, file_src=files, load_from_cache_if_possible=load_from_cache_if_possible)
assert index is not None, "获取索引失败"
msg = "索引获取成功,生成回答中……"
logging.info(msg)
with retrieve_proxy():
retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity", search_kwargs={"k": 6})
# retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity_score_threshold", search_kwargs={
# "k": 6, "score_threshold": 0.2})
try:
relevant_documents = retriever.get_relevant_documents(
fake_inputs)
except AssertionError:
return self.prepare_inputs(fake_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=False)
reference_results = [[d.page_content.strip("�"), os.path.basename(
d.metadata["source"])] for d in relevant_documents]
reference_results = add_source_numbers(reference_results)
display_append = add_details(reference_results)
display_append = "\n\n" + "".join(display_append)
if type(real_inputs) == list:
real_inputs[0]["text"] = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", fake_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
real_inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", real_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
search_results = []
with DDGS() as ddgs:
ddgs_gen = ddgs.text(fake_inputs, backend="lite")
for r in islice(ddgs_gen, 10):
search_results.append(r)
reference_results = []
for idx, result in enumerate(search_results):
logging.debug(f"搜索结果{idx + 1}:{result}")
domain_name = urllib3.util.parse_url(result['href']).host
reference_results.append([result['body'], result['href']])
display_append.append(
# f"{idx+1}. [{domain_name}]({result['href']})\n"
f"<a href=\"{result['href']}\" target=\"_blank\">{idx+1}. {result['title']}</a>"
)
reference_results = add_source_numbers(reference_results)
# display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
display_append = '<div class = "source-a">' + \
"".join(display_append) + '</div>'
if type(real_inputs) == list:
real_inputs[0]["text"] = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", fake_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
real_inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", fake_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_append = ""
return limited_context, fake_inputs, display_append, real_inputs, chatbot
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
should_check_token_count=True,
): # repetition_penalty, top_k
status_text = "开始生成回答……"
if type(inputs) == list:
logging.info(
"用户" + f"{self.user_identifier}" + "的输入为:" +
colorama.Fore.BLUE + "(" + str(len(inputs)-1) + " images) " + f"{inputs[0]['text']}" + colorama.Style.RESET_ALL
)
else:
logging.info(
"用户" + f"{self.user_identifier}" + "的输入为:" +
colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
)
if should_check_token_count:
if type(inputs) == list:
yield chatbot + [(inputs[0]['text'], "")], status_text
else:
yield chatbot + [(inputs, "")], status_text
if reply_language == "跟随问题语言(不稳定)":
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
yield chatbot + [(fake_inputs, "")], status_text
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logging.info(status_text)
chatbot.append((fake_inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(fake_inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(fake_inputs)
yield chatbot + [(fake_inputs, "")], status_text
return
elif len(fake_inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logging.info(status_text)
yield chatbot + [(fake_inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
if type(inputs) == list:
self.history.append(inputs)
else:
self.history.append(construct_user(inputs))
try:
if stream:
logging.debug("使用流式传输")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logging.debug("不使用流式传输")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
yield chatbot, status_text
except Exception as e:
traceback.print_exc()
status_text = STANDARD_ERROR_MSG + beautify_err_msg(str(e))
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != fake_inputs:
logging.info(
"回答为:"
+ colorama.Fore.BLUE
+ f"{self.history[-1]['content']}"
+ colorama.Style.RESET_ALL
)
if limited_context:
# self.history = self.history[-4:]
# self.all_token_counts = self.all_token_counts[-2:]
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logging.info(status_text)
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
yield chatbot, status_text
self.auto_save(chatbot)
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
):
logging.debug("重试中……")
if len(self.history) > 1:
inputs = self.history[-2]["content"]
del self.history[-2:]
if len(self.all_token_counts) > 0:
self.all_token_counts.pop()
elif len(chatbot) > 0:
inputs = chatbot[-1][0]
if '<div class="user-message">' in inputs:
inputs = inputs.split('<div class="user-message">')[1]
inputs = inputs.split("</div>")[0]
elif len(self.history) == 1:
inputs = self.history[-1]["content"]
del self.history[-1]
else:
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
return
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logging.debug("重试完毕")
# def reduce_token_size(self, chatbot):
# logging.info("开始减少token数量……")
# chatbot, status_text = self.next_chatbot_at_once(
# summarize_prompt,
# chatbot
# )
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
# num_chat = find_n(self.all_token_counts, max_token_count)
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
# chatbot = chatbot[:-1]
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
# msg = f"保留了最近{num_chat}轮对话"
# logging.info(msg)
# logging.info("减少token数量完毕")
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
print(f"token上限设置为{new_upper_limit}")
def set_temperature(self, new_temperature):
self.temperature = new_temperature
def set_top_p(self, new_top_p):
self.top_p = new_top_p
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
def set_logit_bias(self, logit_bias):
logit_bias = logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
self.logit_bias = bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
def set_key(self, new_access_key):
if "*" not in new_access_key:
self.api_key = new_access_key.strip()
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
logging.info(msg)
return self.api_key, msg
else:
return gr.update(), gr.update()
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
def reset(self, remain_system_prompt=False):
self.history = []
self.all_token_counts = []
self.interrupted = False
self.history_file_path = new_auto_history_filename(self.user_identifier)
history_name = self.history_file_path[:-5]
choices = [history_name] + get_history_names(self.user_identifier)
system_prompt = self.system_prompt if remain_system_prompt else ""
return [], self.token_message([0]), gr.Radio.update(choices=choices, value=history_name), system_prompt
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot = chatbot[:-1]
return chatbot, self.history
if len(self.history) > 0:
self.history = self.history[:-2]
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot = chatbot[:-1]
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
msg = "删除了一组对话"
self.auto_save(chatbot)
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
def rename_chat_history(self, filename, chatbot, user_name):
if filename == "":
return gr.update()
if not filename.endswith(".json"):
filename += ".json"
self.delete_chat_history(self.history_file_path, user_name)
# 命名重复检测
repeat_file_index = 2
full_path = os.path.join(HISTORY_DIR, user_name, filename)
while os.path.exists(full_path):
full_path = os.path.join(HISTORY_DIR, user_name, f"{repeat_file_index}_{filename}")
repeat_file_index += 1
filename = os.path.basename(full_path)
self.history_file_path = filename
save_file(filename, self.system_prompt, self.history, chatbot, user_name)
return init_history_list(user_name)
def auto_name_chat_history(self, name_chat_method, user_question, chatbot, user_name, single_turn_checkbox):
if len(self.history) == 2 and not single_turn_checkbox:
user_question = self.history[0]["content"]
if type(user_question) == list:
user_question = user_question[0]["text"]
filename = replace_special_symbols(user_question)[:16] + ".json"
return self.rename_chat_history(filename, chatbot, user_name)
else:
return gr.update()
def auto_save(self, chatbot):
save_file(self.history_file_path, self.system_prompt,
self.history, chatbot, self.user_identifier)
def export_markdown(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def load_chat_history(self, new_history_file_path=None, username=None):
logging.debug(f"{self.user_identifier} 加载对话历史中……")
if new_history_file_path is not None:
if type(new_history_file_path) != str:
# copy file from new_history_file_path.name to os.path.join(HISTORY_DIR, self.user_identifier)
new_history_file_path = new_history_file_path.name
shutil.copyfile(new_history_file_path, os.path.join(
HISTORY_DIR, self.user_identifier, os.path.basename(new_history_file_path)))
self.history_file_path = os.path.basename(new_history_file_path)
else:
self.history_file_path = new_history_file_path
try:
if self.history_file_path == os.path.basename(self.history_file_path):
history_file_path = os.path.join(
HISTORY_DIR, self.user_identifier, self.history_file_path)
else:
history_file_path = self.history_file_path
if not self.history_file_path.endswith(".json"):
history_file_path += ".json"
with open(history_file_path, "r", encoding="utf-8") as f:
json_s = json.load(f)
try:
if type(json_s["history"][0]) == str:
logging.info("历史记录格式为旧版,正在转换……")
new_history = []
for index, item in enumerate(json_s["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
json_s["history"] = new_history
logging.info(new_history)
except:
pass
if len(json_s["chatbot"]) < len(json_s["history"])//2:
logging.info("Trimming corrupted history...")
json_s["history"] = json_s["history"][-len(json_s["chatbot"]):]
logging.info(f"Trimmed history: {json_s['history']}")
logging.debug(f"{self.user_identifier} 加载对话历史完毕")
self.history = json_s["history"]
return os.path.basename(self.history_file_path), json_s["system"], json_s["chatbot"]
except:
# 没有对话历史或者对话历史解析失败
logging.info(f"没有找到对话历史记录 {self.history_file_path}")
return self.history_file_path, "", []
def delete_chat_history(self, filename, user_name):
if filename == "CANCELED":
return gr.update(), gr.update(), gr.update()
if filename == "":
return i18n("你没有选择任何对话历史"), gr.update(), gr.update()
if not filename.endswith(".json"):
filename += ".json"
if filename == os.path.basename(filename):
history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
else:
history_file_path = filename
md_history_file_path = history_file_path[:-5] + ".md"
try:
os.remove(history_file_path)
os.remove(md_history_file_path)
return i18n("删除对话历史成功"), get_history_list(user_name), []
except:
logging.info(f"删除对话历史失败 {history_file_path}")
return i18n("对话历史")+filename+i18n("已经被删除啦"), get_history_list(user_name), []
def auto_load(self):
filepath = get_history_filepath(self.user_identifier)
if not filepath:
self.history_file_path = new_auto_history_filename(
self.user_identifier)
else:
self.history_file_path = filepath
filename, system_prompt, chatbot = self.load_chat_history()
filename = filename[:-5]
return filename, system_prompt, chatbot
def like(self):
"""like the last response, implement if needed
"""
return gr.update()
def dislike(self):
"""dislike the last response, implement if needed
"""
return gr.update()
class Base_Chat_Langchain_Client(BaseLLMModel):
def __init__(self, model_name, user_name=""):
super().__init__(model_name, user=user_name)
self.need_api_key = False
self.model = self.setup_model()
def setup_model(self):
# inplement this to setup the model then return it
pass
def _get_langchain_style_history(self):
history = [SystemMessage(content=self.system_prompt)]
for i in self.history:
if i["role"] == "user":
history.append(HumanMessage(content=i["content"]))
elif i["role"] == "assistant":
history.append(AIMessage(content=i["content"]))
return history
def get_answer_at_once(self):
assert isinstance(
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
history = self._get_langchain_style_history()
response = self.model.generate(history)
return response.content, sum(response.content)
def get_answer_stream_iter(self):
it = CallbackToIterator()
assert isinstance(
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
history = self._get_langchain_style_history()
def thread_func():
self.model(messages=history, callbacks=[
ChuanhuCallbackHandler(it.callback)])
it.finish()
t = Thread(target=thread_func)
t.start()
partial_text = ""
for value in it:
partial_text += value
yield partial_text
| [
"content",
"Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN PLACEHOLDER:"
] |
2024-01-10 | xingfanxia/ChuanhuChatGPT | modules~shared.py | from modules.presets import CHAT_COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST, OPENAI_API_BASE
import os
import queue
import openai
class State:
interrupted = False
multi_api_key = False
chat_completion_url = CHAT_COMPLETION_URL
balance_api_url = BALANCE_API_URL
usage_api_url = USAGE_API_URL
openai_api_base = OPENAI_API_BASE
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_api_host(self, api_host: str):
api_host = api_host.rstrip("/")
if not api_host.startswith("http"):
api_host = f"https://{api_host}"
if api_host.endswith("/v1"):
api_host = api_host[:-3]
self.chat_completion_url = f"{api_host}/v1/chat/completions"
self.openai_api_base = f"{api_host}/v1"
self.balance_api_url = f"{api_host}/dashboard/billing/credit_grants"
self.usage_api_url = f"{api_host}/dashboard/billing/usage"
os.environ["OPENAI_API_BASE"] = api_host
def reset_api_host(self):
self.chat_completion_url = CHAT_COMPLETION_URL
self.balance_api_url = BALANCE_API_URL
self.usage_api_url = USAGE_API_URL
os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}"
return API_HOST
def reset_all(self):
self.interrupted = False
self.chat_completion_url = CHAT_COMPLETION_URL
def set_api_key_queue(self, api_key_list):
self.multi_api_key = True
self.api_key_queue = queue.Queue()
for api_key in api_key_list:
self.api_key_queue.put(api_key)
def switching_api_key(self, func):
if not hasattr(self, "api_key_queue"):
return func
def wrapped(*args, **kwargs):
api_key = self.api_key_queue.get()
args[0].api_key = api_key
ret = func(*args, **kwargs)
self.api_key_queue.put(api_key)
return ret
return wrapped
state = State()
modules_path = os.path.dirname(os.path.realpath(__file__))
chuanhu_path = os.path.dirname(modules_path)
assets_path = os.path.join(chuanhu_path, "web_assets") | [] |
2024-01-10 | openai/bugbounty-gpt | bugbounty_gpt~__main__.py | import logging
import asyncio
from bugbounty_gpt.db import db_handler
from bugbounty_gpt.db.models import SubmissionState
from bugbounty_gpt.handlers.openai_handler import OpenAIHandler
from bugbounty_gpt.handlers.submission_handler import BugCrowdSubmission
from bugbounty_gpt.handlers.bugcrowd_api import BugCrowdAPI
from bugbounty_gpt.env import USER_ID, FILTER_PROGRAM, RESPONSE_CATEGORIES, SQLALCHEMY_URL
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.info("Configuration is valid.")
logger.info("Initializing database connection.")
engine = create_async_engine(SQLALCHEMY_URL, echo=False)
SessionLocal = sessionmaker(
bind=engine,
class_=AsyncSession,
expire_on_commit=False,
)
SEEN_SUBMISSIONS = []
async def process_new_submissions():
"""
Fetch and process new submissions that are not duplicates and store them in the database.
"""
params = {
'filter[program]': FILTER_PROGRAM,
'filter[state]': 'new',
'filter[duplicate]': 'false'
}
async with SessionLocal() as session:
if (submissions := await BugCrowdAPI.fetch_submissions(params)) is not None:
for submission in submissions:
# Submission processing
submission_id = submission['id']
if submission_id in SEEN_SUBMISSIONS:
continue
user_id = submission['relationships']['researcher']['data']['id']
submission_content = submission['attributes']['description']
classification, reasoning = await OpenAIHandler.classify_submission(submission_content)
submission_data = {
'submission_id': submission_id,
'user_id': user_id,
'classification': classification,
'submission_state': SubmissionState.NEW,
'reasoning': reasoning
}
SEEN_SUBMISSIONS.append(submission_id)
await db_handler.insert_submission(session, submission_data)
async def process_in_scope_submissions():
"""
Process submissions that are in scope by generating comments, assigning users, closing submissions,
and updating their state in the database.
"""
async with SessionLocal() as session:
states = [SubmissionState.NEW]
classifications = RESPONSE_CATEGORIES # Using the RESPONSE_CATEGORIES from config
in_scope_submissions = await db_handler.fetch_submission_by_state_and_classification(session, states, classifications)
for submission_data in in_scope_submissions:
submission = BugCrowdSubmission(submission_data.submission_id, submission_data.classification, submission_data.reasoning)
user_id = USER_ID # From config
if await submission.is_submission_new():
comment_body = submission.generate_comment_text()
if comment_body:
await submission.create_comment(comment_body)
await submission.close_submission()
await db_handler.update_submission_state(session, submission.submission_id, SubmissionState.UPDATED)
else:
await db_handler.update_submission_state(session, submission.submission_id, SubmissionState.UPDATED_OUT_OF_BAND)
async def main():
"""
Main loop that repeatedly fetches and processes new submissions and in-scope submissions.
"""
while True:
logger.info("Fetching and processing new submissions...")
await process_new_submissions()
logger.info("Processing in-scope submissions...")
await process_in_scope_submissions()
minutes_waited = 1
logger.info(f"Doing nothing for {minutes_waited} minutes....")
await asyncio.sleep(60 * minutes_waited)
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | AlphabetRanger/AgentGPT | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from lanarky.responses import StreamingResponse
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from loguru import logger
from pydantic import ValidationError
from reworkd_platform.schemas import LLM_MODEL_MAX_TOKENS, ModelSettings
from reworkd_platform.services.tokenizer.service import TokenService
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis, AnalysisArguments
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
openai_error_handler,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_settings import WrappedChatOpenAI
from reworkd_platform.web.api.agent.prompts import (
analyze_task_prompt,
create_tasks_prompt,
start_goal_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.open_ai_function import get_tool_function
from reworkd_platform.web.api.agent.tools.tools import (
get_default_tool,
get_tool_from_name,
get_tool_name,
get_user_tools,
)
from reworkd_platform.web.api.errors import OpenAIError
from reworkd_platform.web.api.memory.memory import AgentMemory
class OpenAIAgentService(AgentService):
def __init__(
self,
model: WrappedChatOpenAI,
settings: ModelSettings,
agent_memory: AgentMemory,
token_service: TokenService,
callbacks: Optional[List[AsyncCallbackHandler]],
):
self.model = model
self.agent_memory = agent_memory
self.settings = settings
self.token_service = token_service
self.callbacks = callbacks
async def start_goal_agent(self, *, goal: str) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
)
self.calculate_max_tokens(
prompt.format_prompt(
goal=goal,
language=self.settings.language,
).to_string(),
)
completion = await call_model_with_handling(
self.model,
ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
),
{"goal": goal, "language": self.settings.language},
settings=self.settings,
callbacks=self.callbacks,
)
task_output_parser = TaskOutputParser(completed_tasks=[])
tasks = parse_with_handling(task_output_parser, completion)
with self.agent_memory as memory:
memory.reset_class()
memory.add_tasks(tasks)
return tasks
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
functions = list(map(get_tool_function, get_user_tools(tool_names)))
prompt = analyze_task_prompt.format_prompt(
goal=goal,
task=task,
language=self.settings.language,
)
self.calculate_max_tokens(
prompt.to_string(),
str(functions),
)
message = await openai_error_handler(
func=self.model.apredict_messages,
messages=prompt.to_messages(),
functions=functions,
settings=self.settings,
callbacks=self.callbacks,
)
function_call = message.additional_kwargs.get("function_call", {})
completion = function_call.get("arguments", "")
try:
pydantic_parser = PydanticOutputParser(pydantic_object=AnalysisArguments)
analysis_arguments = parse_with_handling(pydantic_parser, completion)
return Analysis(
action=function_call.get("name", get_tool_name(get_default_tool())),
**analysis_arguments.dict(),
)
except (OpenAIError, ValidationError):
return Analysis.get_default_analysis()
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> StreamingResponse:
# TODO: More mature way of calculating max_tokens
if self.model.max_tokens > 3000:
self.model.max_tokens = max(self.model.max_tokens - 1000, 3000)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model, self.settings.language).call(
goal, task, analysis.arg
)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=create_tasks_prompt)]
)
args = {
"goal": goal,
"language": self.settings.language,
"tasks": "\n".join(tasks),
"lastTask": last_task,
"result": result,
}
self.calculate_max_tokens(prompt.format_prompt(**args).to_string())
completion = await call_model_with_handling(
self.model, prompt, args, settings=self.settings, callbacks=self.callbacks
)
previous_tasks = (completed_tasks or []) + tasks
tasks = [completion] if completion not in previous_tasks else []
unique_tasks = []
with self.agent_memory as memory:
for task in tasks:
similar_tasks = memory.get_similar_tasks(task)
# Check if similar tasks are found
if not similar_tasks:
unique_tasks.append(task)
else:
logger.info(f"Similar tasks to '{task}' found: {similar_tasks}")
if unique_tasks:
memory.add_tasks(unique_tasks)
return unique_tasks
def calculate_max_tokens(self, *prompts: str) -> None:
max_allowed_tokens = LLM_MODEL_MAX_TOKENS.get(self.model.model_name, 4000)
prompt_tokens = sum([self.token_service.count(p) for p in prompts])
requested_tokens = max_allowed_tokens - prompt_tokens
self.model.max_tokens = min(self.model.max_tokens, requested_tokens)
| [] |
2024-01-10 | AlphabetRanger/AgentGPT | platform~reworkd_platform~web~api~agent~model_settings.py | import openai
from langchain.chat_models import ChatOpenAI
from pydantic import Field
from reworkd_platform.schemas import ModelSettings, UserBase, LLM_Model
from reworkd_platform.settings import settings
from reworkd_platform.web.api.agent.api_utils import rotate_keys
openai.api_base = settings.openai_api_base
class WrappedChatOpenAI(ChatOpenAI):
max_tokens: int
model_name: LLM_Model = Field(alias="model")
def create_model(
model_settings: ModelSettings, user: UserBase, streaming: bool = False
) -> WrappedChatOpenAI:
api_key = model_settings.custom_api_key or rotate_keys(
gpt_3_key=settings.openai_api_key,
gpt_4_key=settings.secondary_openai_api_key,
model=model_settings.model,
)
return WrappedChatOpenAI(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=api_key,
temperature=model_settings.temperature,
model=model_settings.model,
max_tokens=model_settings.max_tokens,
streaming=streaming,
max_retries=5,
model_kwargs={"user": user.email},
)
| [] |
2024-01-10 | small-thinking/taotie | taotie~tests~utils~test_utils.py | """Test the utils.
Run this test with command: pytest taotie/tests/utils/test_utils.py
"""
from unittest.mock import MagicMock, patch
import pytest
from openai.types.chat import ChatCompletion, ChatCompletionMessage
from openai.types.chat.chat_completion_message import FunctionCall
from openai.types.edit import Choice
from taotie.utils.utils import *
@pytest.mark.parametrize(
"input, expected",
[(1681684094, "2023-04-16 14:28:14")],
)
def test_get_datetime(input, expected):
assert get_datetime(input) == expected
@pytest.mark.parametrize(
"url, response_text, status_code, expected_output, expected_exception",
[
(
"https://raw.githubusercontent.com/Open-EdTech/python-for-dev/main/README.md",
"Python for Developers",
200,
"Python for Developers",
None,
), # Test with a valid URL
(
"https://example.com/404",
"",
404,
None,
Exception,
), # Test with a 404 error
(
"https://example.com/notfound",
"",
500,
None,
Exception,
), # Test with a 500 error
],
)
def test_fetch_url_content(
url, response_text, status_code, expected_output, expected_exception
):
with patch("requests.get") as mock_get:
mock_get.return_value.text = response_text
mock_get.return_value.status_code = status_code
if expected_exception:
with pytest.raises(expected_exception):
fetch_url_content(url)
else:
assert fetch_url_content(url) == expected_output
@pytest.mark.parametrize(
"input, expected",
[
('{"name": "John", "age": 30}', {"name": "John", "age": 30}),
('{name": "John", "age": 30}', None), # Invalid JSON
],
)
def test_parse_json(input, expected):
if expected is None:
with pytest.raises(json.decoder.JSONDecodeError):
parse_json(input)
else:
assert parse_json(input) == expected
@pytest.mark.parametrize(
"model_type, prompt, content, max_tokens, temperature, expected_result",
[
(
"gpt-3.5-turbo-1106",
"Please summarize the following:",
"Hello, my name is John and I am 30 years old.",
50,
0.0,
"John is a 30 year old.",
),
(
"gpt-3.5-turbo-1106",
"Please generate a response:",
"How are you?",
20,
0.5,
"I'm doing well, thanks for asking!",
),
],
)
def test_chat_completion(
model_type, prompt, content, max_tokens, temperature, expected_result
):
with patch.dict(os.environ, {"OPENAI_API_KEY": "your_mocked_api_key"}):
# Mock the OpenAI constructor
with patch("openai.OpenAI") as MockOpenAI:
mock_openai_instance = MockOpenAI.return_value
# Set up the mock for chat.completions.create method
mock_openai_instance.chat.completions.create.return_value = ChatCompletion(
id="chatcmpl-123",
created=1677652288,
model="gpt-3.5-turbo-1106",
object="chat.completion",
choices=[
Choice(
message=ChatCompletionMessage(
role="assistant", content=expected_result
),
finish_reason="stop",
index=0,
text="aaa",
)
],
)
# Call the function under test
result = chat_completion(
model_type,
prompt,
content,
max_tokens,
temperature,
client=mock_openai_instance,
)
# Assertions
assert result == expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"text_summary, metadata, model_type, max_tokens, expected_output",
[
(
"This is a test summary",
{
"createdDate": "2021-09-15",
"lastUpdated": "2021-09-15",
"description": "Test",
},
"gpt-3.5-turbo-1106",
6000,
{
"metadata": {
"createdDate": "2021-09-15",
"lastUpdated": "2021-09-15",
"description": "Test",
},
"nodes": [
{
"id": "1",
"label": "Test Summary",
"type": "Summary",
"color": "#FFD700",
}
],
"edges": [],
},
),
],
)
async def test_text_to_triplets(
text_summary, metadata, model_type, max_tokens, expected_output
):
logger = Logger("test_logger")
with patch("taotie.utils.utils.chat_completion") as mock_chat_completion:
# Mock response as an object, not JSON
mock_chat_completion.return_value = ChatCompletion(
id="chatcmpl-123",
created=1677652288,
model="gpt-3.5-turbo-1106",
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
text="",
message=ChatCompletionMessage(
role="assistant",
function_call=FunctionCall(
name="", arguments=json.dumps(expected_output)
),
),
)
],
)
result = await text_to_triplets(
text_summary, metadata, logger, model_type, max_tokens
)
assert isinstance(result, Dict)
@pytest.mark.parametrize(
"triplets, expected_output",
[
(
{
"nodes": [
{"id": "1", "label": "Node1", "color": "red"},
{"id": "2", "label": "Node2", "color": "blue"},
],
"edges": [
{
"from": "1",
"to": "2",
"relationship": "connects",
"color": "green",
}
],
},
"knowledge_graph_",
),
# Add more test cases here
],
)
def test_construct_knowledge_graph(triplets, expected_output):
logger = Logger("test_logger")
result = construct_knowledge_graph(triplets, logger)
assert (
expected_output in result
) # Modify this line based on what you actually expect
assert os.path.exists(result) # Check if the file actually exists
os.remove(result) # Clean up the generated image file
@pytest.mark.parametrize(
"url, status_code, expected",
[
("https://example.com", 200, True),
("https://example.com", 404, False),
("https://example.invalid", None, False),
],
)
def test_check_url_exists(url, status_code, expected):
with patch("requests.head") as mock_head:
mock_head.return_value.status_code = status_code
result = check_url_exists(url)
assert result == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
"repo_name, readme_response, chat_completion_response, check_url_exists_response, expected_result, readme_url",
[
(
"test-repo",
"# Hello\n",
'{"image_url": "https://example.com/image.png"}',
True,
"https://i.imgur.com/image.png",
"https://raw.githubusercontent.com/test-repo/master/README.md",
),
(
"test-repo-invalid-url",
"# Hello\n",
'{"image_url": "https://example.invalid/image.png"}',
False,
"",
"https://raw.githubusercontent.com/test-repo-invalid-url/master/README.md",
),
(
"test-repo-no-image",
"# Hello",
'{"image_url": ""}',
False,
"",
"https://raw.githubusercontent.com/test-repo-no-image/master/README.md",
),
],
)
async def test_extract_representative_image(
repo_name,
readme_response,
chat_completion_response,
check_url_exists_response,
expected_result,
readme_url,
):
logger = Logger("test_extract_representative_image")
with patch("requests.get") as mock_get:
mock_get.return_value.text = readme_response
mock_get.return_value.status_code = 200
with patch("taotie.utils.utils.chat_completion") as mock_chat_completion:
mock_chat_completion.return_value = chat_completion_response
with patch("taotie.utils.utils.check_url_exists") as mock_check_url_exists:
mock_check_url_exists.return_value = check_url_exists_response
with patch(
"taotie.utils.utils.save_image_to_imgur"
) as mock_save_image_to_imgur:
mock_save_image_to_imgur.return_value = expected_result
result = await extract_representative_image(
repo_name=repo_name, readme_url=readme_url, logger=logger
)
assert result == expected_result
| [] |
2024-01-10 | small-thinking/taotie | taotie~consumer~info_summarizer.py | """
"""
import asyncio
import json
import os
from typing import Any, Dict, List, Optional
import openai
from colorama import Fore
from taotie.consumer.base import Consumer
from taotie.storage.base import Storage
from taotie.utils.utils import *
class InfoSummarizer(Consumer):
"""A consumer that summarize the message in batch."""
def __init__(
self,
summarize_instruction: str,
verbose: bool = False,
dedup: bool = False,
storage: Optional[Storage] = None,
**kwargs,
):
Consumer.__init__(self, verbose=verbose, dedup=dedup, storage=storage, **kwargs)
self.buffer: List[str] = []
self.buffer_size = 0
self.max_buffer_size = kwargs.get("max_buffer_size", -1)
self.summarize_instruction = summarize_instruction
tags = kwargs.get(
"CANDIDATE_TAGS",
"AI,CV,deep-learning,GPT,LLM,foundation-model,HuggingFace,image-generation,"
"inference,knowledge-extraction,language-model,machine-learning,model,"
"model-generation,NLP,QA,chatbot,speech-recognition,text-generation,"
"text-to-speech,training,voice-recognition",
)
if not self.summarize_instruction:
self.summarize_instruction = f"""
Please follow the instructions below to generate the json formated response:
1. Summarize the following collected json data wrapped by triple quotes in Chinese.
2. Plese summarize the content CONCISELY, ACCURATELY, and COMPREHENSIVELY.
And CONCATENATE the Chinese and English summaries with \n\n IN ONE "summary" FIELD.
For example "summary": "这是中文总结。\\n\\nThis is an English summary."
3. Generate at most 5 tags from {tags}. If the content is irrelevant to any of the tags, instead use tag "N/A" ONLY.
4. Please STRICTLY follow the instructions above and output the results in ONE JSON blob, \
and STRICTLY WRAP EACH KEY OR VALUE WITH DOUBLE QUOTES.
Some examples:
Example 1:
{{
"summary": "这是一个总结。\\n\\nThis is a summary.",
"tags": ["tag1", "tag2"],
}}
Example 2:
{{
"summary": "Segment Anything是一个新的图像分割任务、模型和数据集项目。",
"tags": ["deep-learning", "image-generation"],
}}
"""
self.max_tokens = kwargs.get("max_tokens", 3000)
self.model_type = kwargs.get("model_type", "gpt-3.5-turbo-1106")
self.logger.debug("PrintConsumer initialized.")
async def _process(self, messages: List[Dict[str, Any]]) -> None:
id = messages[0].get("id", "")
info_type = messages[0].get("type", "")
self.buffer.extend(map(lambda m: json.dumps(m, ensure_ascii=False), messages))
concatenated_messages = "\n".join(self.buffer)
self.logger.info(f"Summarizer received information: {concatenated_messages}\n")
summary_json_str = await self.gpt_summary(concatenated_messages)
try:
parse_json(summary_json_str)
except Exception as e:
# Ask LLM to fix the potentially malformed json string.
self.logger.warning(f"Generated summary is not in JSON, fixing...")
summary_json_str = chat_completion(
model_type=self.model_type,
prompt="""
You are a json fixer that can fix various types of malformed json strings.
Please directly return the JSON as is if it is already in a valid format.
IF not, please fix the following json string and return the fixed string in a way that is DIRECTLY PARSABLE by json.loads().
""",
content=f"""
{summary_json_str}
""",
max_tokens=self.max_tokens,
temperature=0.05,
)
self.logger.info(
f"""JSON summary result after fixing:
{summary_json_str}
"""
)
# Extract the representative image from the repo README.md.
representative_image_url_str = ""
if info_type == "github-repo":
readme_url = f"https://raw.githubusercontent.com{id}/main/README.md"
if not check_url_exists(readme_url):
readme_url = f"https://raw.githubusercontent.com{id}/master/README.md"
# Extract the representative image from the repo.
representative_image_url_str = await extract_representative_image(
repo_name=id, readme_url=readme_url, logger=self.logger
)
# Generate the knowledge graph image url.
knowledge_graph_image_url_str = await self.knowledge_graph_summary(
concatenated_messages, messages[0]
)
self.logger.info(f"Knowledge graph image url: {knowledge_graph_image_url_str}")
# Save to storage.
if self.storage:
# Parse the output as json.
try:
processed_data = parse_json(summary_json_str)
except json.JSONDecodeError as e:
self.logger.error(
f"Failed to parse the output as json. Error: {str(e)}, the json string is [[{summary_json_str}]]"
)
self.buffer.clear()
return
try:
# TODO: This is a hack. We should have a better way to do this.
list_of_tuples = [(raw, processed_data) for raw in messages]
await self.storage.save(
list_of_tuples,
image_urls=[
representative_image_url_str,
knowledge_graph_image_url_str,
],
truncate=20,
)
self.logger.info(f"Saved to storage.")
except Exception as e:
self.logger.error(f"Failed to save to storage. Error: {str(e)}")
finally:
self.buffer.clear()
async def gpt_summary(self, input: str) -> str:
"""A tiny example use case of using LLM to process the gathered information."""
input = input[: self.max_buffer_size]
prompt = f"""
{self.summarize_instruction}
```
{input}
```
"""
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("Please set OPENAI_API_KEY in .env.")
openai.api_key = os.getenv("OPENAI_API_KEY")
result = chat_completion(
model_type=self.model_type,
prompt=prompt,
content=input,
max_tokens=self.max_tokens,
temperature=0.0,
)
self.logger.output(f"Get summary: {result}\n", color=Fore.BLUE)
return result
async def knowledge_graph_summary(
self, text_summary: str, metadata: Dict[str, Any]
) -> str:
try:
rdf_triplets = await text_to_triplets(text_summary, metadata, self.logger)
self.logger.info(f"Successfully generated triplets: \n{rdf_triplets}\n")
except Exception as e:
self.logger.error(f"Error generating triplets: {e}")
return ""
try:
knowledge_graph_image_path = await async_construct_knowledge_graph(
rdf_triplets
)
self.logger.info(
f"Successfully generated knowledge graph image: {knowledge_graph_image_path}"
)
except Exception as e:
self.logger.error(
f"Error generating knowledge graph image from triplets: {e}"
)
return ""
try:
image_url = await upload_image_to_imgur(
knowledge_graph_image_path, self.logger
)
return image_url
except Exception as e:
self.logger.error(f"Error uploading knowledge graph image to Imgur: {e}")
return ""
| [] |
2024-01-10 | small-thinking/taotie | taotie~reporter~notion_reporter.py | """Notion reporter will check the gathered knowledge in notion and generate the text report for the AI related contents.
"""
import json
import os
from datetime import date, datetime, timedelta
from typing import Dict, List
import openai
import pytz # type: ignore
from notion_client import AsyncClient
from taotie.reporter.base_reporter import BaseReporter
from taotie.utils.utils import *
class NotionReporter(BaseReporter):
"""NotionReporter will check the gathered knowledge in notion and
generate the text report accordingly."""
def __init__(
self,
knowledge_source_uri: str,
date_lookback: int,
type_filters: List[str],
topic_filters: List[str],
verbose: bool = False,
**kwargs,
):
"""
Args:
knowledge_source_uri: The uri of the notion database id.
"""
super().__init__(knowledge_source_uri=knowledge_source_uri, verbose=verbose)
self.token = os.environ.get("NOTION_TOKEN")
if not self.token:
raise ValueError("Please set the Notion token in .env.")
self.date_lookback = max(0, date_lookback)
self.type_filters = type_filters
self.topic_filters = topic_filters
self.max_retrieve = kwargs.get("max_retrieve", 1000)
# Model configs.
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("Please set OPENAI_API_KEY in .env.")
openai.api_key = os.getenv("OPENAI_API_KEY")
self.model_type = kwargs.get("model_type", "gpt-3.5-turbo-16k")
# Prompt.
language = kwargs.get("language", "English")
if "github-repo" in self.topic_filters:
self.report_prompt = f"""
Please generate a report that will be published by the WECHAT BLOG based on the json string in the triple quotes.
Follow the following rules STRICTLY:
1. Summarize in {language} and at the beginning give a short overall summary of the repos in this report.
2. REMOVE the items that are NOT RELATED to AI or the topics of {self.topic_filters}.
3. Generate each item as an individual section, include the URL in each of the item, and \
including the strength of recommendation (draw 1-5 stars) and the reason to recommend. \
Make the summary as informative as possible.
4. Generate the description in an attractive way, so that the readers will be willing to check the content.
5. Rank by importance (e.g. whether has image) and keep AT MOST the top 10 items based on the recommendation strength.
6. Output the results as a JSON string which contains a list of items (with keys "Title", "Rating", "Image URLs", "Summary", "Reason", "URL"). Example:
{{
"results": [
{{
"Title": "【★★★★★】TransformerOptimus/SuperAGI",
"Image URLs": ["url1", "url2"],
"Summary": "这是一个用于构建和运行有用的自主智能体的Python项目。",
"Reason": "推荐理由:自主性AI最新版本。该项目旨在创造一个可以解决朴实问题的自主智能体。",
"URL": "https://github.com/TransformerOptimus/SuperAGI",
}},
{{
"Title": "【★★★★】LLM-ToolMaker",
"Image URLs": ["url1"],
"Summary": "这个项目提出了一种名为LLMs As Tool Makers (LATM)的闭环框架,其中大型语言模型(LLMs)可以作为工具制造者为解决问题创造自己的可重用工具。",
"Reason": "推荐理由:开放框架。该项目旨在创造一个可以使用外部工具的自主智能体。",
"URL": "https://github.com/ctlllll/LLM-ToolMaker",
}}
]
}}
"""
else:
self.report_prompt = f"""
Please generate a report of the paper summary that will be published by the WECHAT BLOG based on the json string in the triple quotes.
Follow the following rules STRICTLY:
1. Summarize in {language} and at the beginning give a short overall summary of the repos in this report.
2. REMOVE the items that are NOT RELATED to AI or the topics of {self.topic_filters}.
3. Generate each item as an individual section, include the URL in each of the item, and \
including the strength of recommendation (draw 1-5 stars) and the reason to recommend. \
Make the summary as informative as possible.
4. Use the paper name as the title for each item. Then followed by a short overall summary of the paper.
5. Output the results as a JSON string which contains a list of items (with keys "Title", "Rating", “Image URLs", "Summary", "Reason", "URL"). Example:
{{
"results": [
{{
"Title": "Training Language Models with Language Feedback at Scale",
"Image URLs": ["url1", "url2"],
"Summary": "本文介绍了一种新的语言反馈模型训练方法ILF,利用更具信息量的语言反馈来解决预训练语言模型生成的文本与人类偏好不一致的问题。",
"Reason": "推荐理由:新的语言反馈模型训练方法。",
"URL": "https://arxiv.org/abs/2303.16755v2",
}},
{{
"Title": "Language Models Don't Always Say What They Think: Unfaithful Explanations in Chain-of-Thought Prompting",
"Image URLs": ["url1"],
"Summary": "本文研究了大型语言模型(LLMs)在链式思考推理(CoT)中的解释不忠实问题,揭示了CoT解释可能受到多种因素的影响。",
"Reason": "推荐理由:深入研究LLMs的行为。",
"URL": "http://arxiv.org/abs/2305.04388v1",
}}
]
}}
"""
async def _connect(self):
self.notion = AsyncClient(auth=self.token)
async def _cleanup(self):
print("cleanup")
async def _distill(self) -> str:
"""Grab the gathered knowledge from notion database and generate the text report.
Returns:
str: The text report.
"""
doc_list = await self._retrieve_data()
self.logger.output(f"Number docs retrieved: {len(doc_list)}\n")
self.logger.output(json.dumps(doc_list, indent=2))
report = await self._generate_report(doc_list)
self.logger.output(f"Report: {report}\n", color=Fore.BLUE)
return report
async def _retrieve_data(self) -> List[Dict[str, Any]]:
"""Retrieve data from Notion database.
This queries the configured Notion database to get pages created in the
specified date range, filtered by type and topic as configured.
The data for each page is returned as a list of dictionaries containing
title, summary, and URL.
Returns:
List[Dict]: List of page data dicts with title, summary, and URL
"""
# Get the date range.
timezone = pytz.timezone("America/Los_Angeles")
start_date = datetime.now(timezone) - timedelta(days=self.date_lookback)
date_start = start_date.astimezone(timezone).isoformat()
# Query the database and convert them into json if not.
filter_params = {
"and": [
{
"property": "Created Time",
"date": {
"after": date_start,
},
},
{"or": []}, # type filter.
{"or": []}, # topic filter.
]
}
and_blob: List[Any] = filter_params["and"]
# Add type filters.
if self.type_filters:
for type_filter in self.type_filters:
and_blob[1]["or"].append(
{
"property": "Type",
"select": {"equals": type_filter},
}
)
# Add tag filters.
if self.topic_filters:
for topic in self.topic_filters:
and_blob[-1]["or"].append(
{
"property": "Topics",
"multi_select": {"contains": topic},
}
)
# Query notion db with async API.
response = await self.notion.databases.query(
database_id=self.knowledge_source_uri,
filter=filter_params,
page_size=self.max_retrieve,
)
# Format the data.
doc_list = []
for item in response["results"]:
url = ""
url_block = item["properties"].get("URL", None)
if url_block:
url = url_block["rich_text"][0]["plain_text"]
else:
self.logger.warning("No url found.")
summary = item["properties"]["Summary"]["rich_text"][0]["plain_text"]
# Get the page id and retrieve the content of the page to get the images.
page_id = item["id"]
image_urls = []
page_response = await self.notion.blocks.children.list(block_id=page_id)
for block in page_response["results"]:
if block["type"] == "image":
image_blob = block["image"]
if "external" in block["image"]:
image_url = image_blob["external"]["url"]
else:
image_url = image_blob["file"]["url"]
image_urls.append(image_url)
break
elif block["type"] == "embed":
image_url = block["embed"]["url"]
image_urls.append(image_url)
break
doc = {
"Title": item["properties"]["Title"]["title"][0]["plain_text"],
"Summary": summary[:300],
"url": url,
"images": image_urls,
}
doc_list.append(doc)
return doc_list
async def _generate_report(self, doc_list: List[Dict[str, Any]]):
"""Generate the report for the given doc_list.
Args:
doc_list (List[Dict[str, Any]]): The list of docs.
"""
today = date.today()
formatted_date = today.strftime("%Y/%m/%d")
json_string = json.dumps(doc_list)
content_prompt = f"""
'''
Report of {formatted_date}
{json_string}
'''
"""
# Truncate.
truncate_size = 12000 if self.model_type == "gpt-3.5-turbo-16k" else 7000
content_prompt = content_prompt[:truncate_size]
self.logger.output(f"Content prompt: {content_prompt}")
# Rough estimation of remaining tokens for generation.
prompt_tokens = len(content_prompt)
max_tokens = 4096
self.logger.output(
f"Prompt tokens: {prompt_tokens}, response tokens: {max_tokens}"
)
result = chat_completion(
model_type=self.model_type,
prompt=self.report_prompt,
content=content_prompt,
max_tokens=max_tokens,
temperature=0.5,
)
return result
| [
"84",
"\n '''\n Report of PLACEHOLDER\n\n PLACEHOLDER\n '''\n ",
"\n Report of PLACEHOLDER\n\n PLACEHOLDER\n "
] |
2024-01-10 | epam/ai-dial-adapter-vertexai | tests~unit_tests~test_models.py | from typing import Any
import openai
import openai.error
import requests
from aidial_adapter_vertexai.llm.vertex_ai_deployments import (
ChatCompletionDeployment,
)
from tests.conftest import DEFAULT_API_VERSION, TEST_SERVER_URL
deployments = [
ChatCompletionDeployment.CHAT_BISON_1,
ChatCompletionDeployment.CODECHAT_BISON_1,
]
def models_request_http() -> Any:
response = requests.get(f"{TEST_SERVER_URL}/openai/models")
assert response.status_code == 200
return response.json()
def models_request_openai() -> Any:
return openai.Model.list(
api_type="azure",
api_base=TEST_SERVER_URL,
api_version=DEFAULT_API_VERSION,
api_key="dummy_key",
)
def assert_models_subset(models: Any):
actual_models = [model["id"] for model in models["data"]]
expected_models = list(map(lambda e: e.value, deployments))
assert set(expected_models).issubset(
set(actual_models)
), f"Expected models: {expected_models}, Actual models: {actual_models}"
def test_model_list_http(server):
assert_models_subset(models_request_http())
def test_model_list_openai(server):
assert_models_subset(models_request_openai())
| [] |
2024-01-10 | epam/ai-dial-adapter-vertexai | tests~integration_tests~test_chat_completion.py | import re
from dataclasses import dataclass
from typing import Callable, List, Optional
import openai.error
import pytest
from langchain.schema import BaseMessage
from aidial_adapter_vertexai.llm.vertex_ai_deployments import (
ChatCompletionDeployment,
)
from tests.conftest import TEST_SERVER_URL
from tests.utils.llm import (
assert_dialog,
create_chat_model,
sanitize_test_name,
sys,
user,
)
deployments = [
ChatCompletionDeployment.CHAT_BISON_1,
ChatCompletionDeployment.CODECHAT_BISON_1,
]
@dataclass
class TestCase:
__test__ = False
name: str
deployment: ChatCompletionDeployment
streaming: bool
max_tokens: Optional[int]
stop: Optional[List[str]]
messages: List[BaseMessage]
expected: Callable[[str], bool] | Exception
def get_id(self):
max_tokens_str = str(self.max_tokens) if self.max_tokens else "inf"
stop_sequence_str = str(self.stop) if self.stop else "nonstop"
return sanitize_test_name(
f"{self.deployment.value} {self.streaming} {max_tokens_str} {stop_sequence_str} {self.name}"
)
def get_test_cases(
deployment: ChatCompletionDeployment, streaming: bool
) -> List[TestCase]:
ret: List[TestCase] = []
ret.append(
TestCase(
name="2+3=5",
deployment=deployment,
streaming=streaming,
max_tokens=None,
stop=None,
messages=[user("2+3=?")],
expected=lambda s: "5" in s,
)
)
ret.append(
TestCase(
name="hello",
deployment=deployment,
streaming=streaming,
max_tokens=None,
stop=None,
messages=[user('Reply with "Hello"')],
expected=lambda s: "hello" in s.lower(),
)
)
ret.append(
TestCase(
name="empty sys message",
deployment=deployment,
streaming=streaming,
max_tokens=None,
stop=None,
messages=[sys(""), user("2+4=?")],
expected=lambda s: "6" in s,
)
)
ret.append(
TestCase(
name="max tokens 1",
deployment=deployment,
streaming=streaming,
max_tokens=1,
stop=None,
messages=[user("tell me the full story of Pinocchio")],
expected=lambda s: len(s.split()) == 1,
)
)
ret.append(
TestCase(
name="stop sequence",
deployment=deployment,
streaming=streaming,
max_tokens=None,
stop=["world"],
messages=[user('Reply with "hello world"')],
expected=Exception(
"stop sequences are not supported for code chat model"
)
if deployment == ChatCompletionDeployment.CODECHAT_BISON_1
else lambda s: "world" not in s.lower(),
)
)
return ret
@pytest.mark.asyncio
@pytest.mark.parametrize(
"test",
[
test_case
for model in deployments
for streaming in [False, True]
for test_case in get_test_cases(model, streaming)
],
ids=lambda test: test.get_id(),
)
async def test_chat_completion_langchain(server, test: TestCase):
model = create_chat_model(
TEST_SERVER_URL,
test.deployment,
test.streaming,
test.max_tokens,
)
if isinstance(test.expected, Exception):
with pytest.raises(Exception) as exc_info:
await assert_dialog(
model=model,
messages=test.messages,
output_predicate=lambda s: True,
streaming=test.streaming,
stop=test.stop,
)
assert isinstance(exc_info.value, openai.error.OpenAIError)
assert exc_info.value.http_status == 422
assert re.search(str(test.expected), str(exc_info.value))
else:
await assert_dialog(
model=model,
messages=test.messages,
output_predicate=test.expected,
streaming=test.streaming,
stop=test.stop,
)
| [] |
2024-01-10 | cdifranco1/gpt_content_indexing | ask_question.py | import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
import pandas as pd
import argparse
import numpy as np
import openai
from pprint import pprint
from transformers import GPT2TokenizerFast
# Create an ArgumentParser object
parser = argparse.ArgumentParser()
# Add an argument with a flag and a name
parser.add_argument("--question", help="Specify the question you are asking")
parser.add_argument("--file", default="./output/indexed_content.csv", help="Specify the path to the CSV containing the content")
parser.add_argument("--embeddings", default="./output/embeddings.csv", help="Specify the path to the embeddings CSV")
parser.add_argument("--show_prompt", default=False, help="Output the prompt sent to OpenAI")
parser.add_argument("--allow_hallucinations", default=False, help="Don't restrict answers to be based from the provided context")
parser.add_argument("--use_fine_tune", default=False, help="Use the fine tuned model")
args = parser.parse_args()
show_prompt = bool(args.show_prompt)
allow_hallucinations = bool(args.allow_hallucinations)
QUERY_EMBEDDINGS_MODEL = "text-embedding-ada-002"
COMPLETIONS_MODEL = "davinci:ft-learning-pool:strm-prompts-2022-12-20-18-07-34" if args.use_fine_tune else "text-davinci-003"
MAX_SECTION_LEN = 1000
SEPARATOR = "\n* "
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
separator_len = len(tokenizer.tokenize(SEPARATOR))
def get_embedding(text: str, model: str) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def get_query_embedding(text: str) -> list[float]:
return get_embedding(text, QUERY_EMBEDDINGS_MODEL)
def load_embeddings(filename: str) -> dict[tuple[str, str], list[float]]:
"""
Read the document embeddings and their keys from a CSV.
filename is the path to a CSV with exactly these named columns:
"title", "heading", "0", "1", ... up to the length of the embedding vectors.
"""
df = pd.read_csv(filename, header=0)
max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
return {
(r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
}
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
We could use cosine similarity or dot product to calculate the similarity between vectors.
In practice, we have found it makes little difference.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_query_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
title, heading = section_index
content = document_section.content.replace("\n", " ");
url = document_section.url;
chosen_sections.append(f"{SEPARATOR}{title} - {heading} - {content} (URL: {url})")
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
if show_prompt:
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
if bool(allow_hallucinations) == True:
print("Halluncinations are enabled!")
header = "Answer the question based on the provided context. If the answer is not in the provided context, you may make a best guess using your wider knowledge."
header += "The context provided contains multiple sections of text from a knowledge base and a URL for each. For each section of text (which starts with a \"*\" character), return a unique answer followed by the text 'More info:' followed by the URL. You may return up to three answers, each separated with two line breaks."
else:
header = "Answer the question as truthfully as possible using the provided context. You should use as much detail from the given context as possible when answering the question."
header += "If the answer is not contained within the text below, say 'I don't know.' followed by the all the text in the 'Context' section (preceeded by 'Here is the closest information I could find to your question\\n\\n:'). "
header += "Within the context are URLs. If an answer if found within a relevant section, return the answer and then three line breaks and then the text 'More info:' followed by the URL."
header += ""
header += "\n\nContext:\n"
header += "".join(chosen_sections) + "\n\n"
header += "Q: " + question + "\n A:"
return header
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
show_prompt: bool = False
) -> str:
prompt = construct_prompt(
query,
document_embeddings,
df
)
print("\n\n")
if show_prompt:
print(prompt)
else:
print(f"Question: {query}")
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 1.0 if allow_hallucinations else 0.0,
"max_tokens": 600,
"model": COMPLETIONS_MODEL,
}
# Fetch the embeddings from the CSV
document_embeddings = load_embeddings(args.embeddings)
df = pd.read_csv(args.file)
df = df.set_index(["title", "heading"])
response = answer_query_with_context(args.question, df, document_embeddings, show_prompt=show_prompt)
print("")
print(f"Answer: {response}") | [] |
2024-01-10 | cdifranco1/gpt_content_indexing | index_content.py | import os
import pandas as pd
import csv
import html2text
import sys
import requests
from atlassian import Confluence
import openai
from pprint import pprint
from bs4 import BeautifulSoup
import argparse
from transformers import GPT2TokenizerFast
from typing import Tuple
from nltk.tokenize import sent_tokenize
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
# Create an ArgumentParser object
parser = argparse.ArgumentParser()
# Add an argument with a flag and a name
parser.add_argument("--spaces", nargs="*", default=["STRM"], help="Specify the Confluence Space you want to index")
parser.add_argument("--zendesk", nargs="*", default=["learningpool"], help="Specify the Zendesk domains you want to index")
parser.add_argument("--max_pages", default=1000, help="The maximum amount of Space pages to index")
parser.add_argument("--out", default="indexed_content", help="Specify the filename to save the content")
parser.add_argument("--min_tokens", default=20, help="Remove content with less than this number of tokens")
args = parser.parse_args()
max_pages = int(args.max_pages)
# Connect to Confluence
confluence = Confluence(url='https://learninglocker.atlassian.net', username=os.environ.get('CONFLUENCE_USERNAME'), password=os.environ.get('CONFLUENCE_API_KEY'))
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
def reduce_long(
long_text: str, long_text_tokens: bool = False, max_len: int = 590
) -> str:
"""
Reduce a long text to a maximum of `max_len` tokens by potentially cutting at a sentence end
"""
if not long_text_tokens:
long_text_tokens = count_tokens(long_text)
if long_text_tokens > max_len:
sentences = sent_tokenize(long_text.replace("\n", " "))
ntokens = 0
for i, sentence in enumerate(sentences):
ntokens += 1 + count_tokens(sentence)
if ntokens > max_len:
return ". ".join(sentences[:i][:-1]) + "."
return long_text
def extract_html_content(
title_prefix: str,
page_title: str,
html: str,
url: str
):
ntitles, nheadings, ncontents, nurls = [], [], [], []
soup = BeautifulSoup(html, 'html.parser')
headings = soup.find_all(["h1", "h2", "h3", "h4", "h5", "h6"])
prev_heading = []
# Iterate through all headings and subheadings
for h in headings:
# Extract the heading text and remove HTML
heading = html2text.html2text(str(h)).strip()
# Initialize the content list
content = []
# Find the next heading or subheading
next_h = h.find_next(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
actual_heading = heading.lstrip('#').lstrip(' ')
# Iterate through all siblings until the next heading or subheading is reached
for sibling in h.next_siblings:
if sibling == next_h:
break
# If the sibling is a tag, extract the text and remove HTML
if sibling.name:
para = html2text.html2text(str(sibling)).strip()
if len(para) > 0:
content.append(para)
# If there are content entries, join them all together, clean up for utf-8 and write the row
if len(content) > 0:
content = "".join(content).replace("\n", "").encode('utf-8').decode('utf-8')
# If there are headings above this one without content, we concat them here
if len(prev_heading) > 0:
full_heading = " - ".join(prev_heading) + " - " + actual_heading
else:
full_heading = actual_heading
title = f"{title_prefix} - {page_title}"
# Store the extracted title, heading, content
ntitles.append(title)
nheadings.append(full_heading)
ncontents.append(f"{title} - {full_heading} - {content}")
nurls.append(url)
prev_heading = []
else:
# Otherwise, we store this heading to append to the next sibling with content
prev_heading.append(actual_heading)
# Return the 3 arrays of titles, headings and content
return (ntitles, nheadings, ncontents, nurls)
def count_content_tokens(
ntitles: list,
nheadings:list,
ncontents: list,
nurls: list
):
# count the tokens of each section
ncontent_ntokens = [
count_tokens(c) # Add the tokens from the content
+ 4
+ count_tokens(" ".join(t.split(" ")[1:-1])) # Add the tokens from the titles
+ count_tokens(" ".join(h.split(" ")[1:-1])) # Add the tokens from the headings
+ count_tokens(" ".join(u.split(" ")[1:-1])) # Add the tokens from the url
- (1 if len(c) == 0 else 0)
for t, h, c, u in zip(ntitles, nheadings, nurls, ncontents)
]
# Create a tuple of (title, section_name, content, number of tokens)
outputs = []
outputs += [(t, h, u, c, tk) if tk<max_len
else (h, reduce_long(c, max_len), count_tokens(reduce_long(c,max_len)))
for t, h, u, c, tk in zip(ntitles, nheadings, nurls, ncontents, ncontent_ntokens)]
return outputs
def extract_sections(
space: str,
limit: int = max_pages
):
ntitles, nheadings, ncontents, nurls = [], [], [], []
confluence_space = confluence.get_space(space_key=space)
space_title = confluence_space['name']
print(f"Fetching up to {limit} pages from '{space_title}'...")
# Search for all pages in a given space
results = confluence.get_all_pages_from_space(space=space, start=0, limit=limit)
page_ids = []
for result in results:
page_ids.append(result["id"])
# Iterate through the list of Confluence pages
for page_id in page_ids:
# Fetch the Confluence page
page = confluence.get_page_by_id(page_id=page_id, expand="body.storage")
# Extract the page title and content
page_title = page['title']
page_html = page['body']['storage']['value']
page_url = page['_links']['base'] + page['_links']['webui'];
pageTitles, pageHeadings, pageContent, pageUrls = extract_html_content(space_title, page_title, page_html, page_url)
ntitles += pageTitles
nheadings += pageHeadings
ncontents += pageContent
nurls += pageUrls
return count_content_tokens(ntitles, nheadings, ncontents, nurls)
def extract_zendesk_domain(
zendesk_domain: str,
limit: int = max_pages
):
ntitles, nheadings, ncontents, nurls = [], [], [], []
total_pages = 0;
URL = f"https://{zendesk_domain}.zendesk.com/api/v2/help_center/en-us"
print(f"Fetching up to {limit} pages from 'https://{zendesk_domain}.zendesk.com'...")
# Fetch the Categories from Zendesk
cat_response = requests.get(URL + '/categories.json')
cat_data = cat_response.json()
for category in cat_data['categories']:
category_title = category['name']
# Fetch the sections within the categories
sections_response = requests.get(URL + '/categories/' + str(category['id']) + '/sections.json')
sections_data = sections_response.json()
for section in sections_data['sections']:
page_title = section['name']
# Fetch the articles within the section
articles_response = requests.get(URL + '/sections/' + str(section['id']) + '/articles.json')
articles_data = articles_response.json()
for article in articles_data["articles"]:
page_title += " - " + article['title']
page_html = article['body']
page_url = article['html_url']
if (page_html is not None and total_pages < limit ):
pageTitles, pageHeadings, pageContent, pageUrls = extract_html_content(category_title, page_title, page_html, page_url)
ntitles += pageTitles
nheadings += pageHeadings
ncontents += pageContent
nurls += pageUrls
total_pages += 1
if (articles_data['next_page'] is not None):
pprint('TODO! But have not seen multiple pages yet at this level (due to using sections...)')
return count_content_tokens(ntitles, nheadings, ncontents, nurls)
# Define the maximum number of tokens we allow per row
max_len = 1500
# For each Space, fetch the content and add to a list(title, heading, content, tokens)
res = []
for space in args.spaces:
res += extract_sections(space)
for domain in args.zendesk:
res += extract_zendesk_domain(domain)
# Remove rows with less than 40 tokens
df = pd.DataFrame(res, columns=["title", "heading", "url", "content", "tokens"])
df = df[df.tokens > args.min_tokens]
df = df.drop_duplicates(['title','heading'])
df = df.reset_index().drop('index',axis=1) # reset index
print(df.head())
# Store the content to a CSV
dir = 'output/';
filename = args.out + '.csv'
fullpath = dir + filename
df.to_csv(fullpath, index=False)
print(f"Done! File saved to {fullpath}") | [] |
2024-01-10 | CognitiveLabs/GPT-auto-webscraping | chains~output_format~templates.py | from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, PromptTemplate
# prompt templates
system_template_output_format = PromptTemplate(
input_variables = ['html_content'],
template='''You are a helpful assitant that helps people extract JSON information from HTML content.
The input is a HTML content.
The expected output is a JSON with a relevant information in the following html: {html_content}
Try to extract as much information as possible. Including images, links, etc.
The assitant answer should ONLY contain the JSON information without any aditional word or character.
The JSON output must have 1 depth level as much.
The expected output format is an array of objects.
''')
human_template_output_format = PromptTemplate(
input_variables = ['html_content'],
template='this is the html content: {html_content}'
)
# chat prompts objects
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template_output_format.template)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template_output_format.template)
output_format_chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
| [
"html_content",
"[PLACEHOLDER, PLACEHOLDER]",
"this is the html content: {html_content}",
"You are a helpful assitant that helps people extract JSON information from HTML content.\n\n The input is a HTML content. \n\n The expected output is a JSON with a relevant information in the following html: {html_content}\n\n Try to extract as much information as possible. Including images, links, etc.\n\n The assitant answer should ONLY contain the JSON information without any aditional word or character.\n\n The JSON output must have 1 depth level as much.\n\n The expected output format is an array of objects.\n \n "
] |
2024-01-10 | CognitiveLabs/GPT-auto-webscraping | chains~code_generator~templates.py | from langchain.prompts import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
PromptTemplate,
)
# Prompt templates
system_template_script = PromptTemplate(
input_variables=["output_format", "html_content"],
template="""You are a helpful assitant that helps people create python scripts for web scraping.
--------------------------------
The example of the html content is: {html_content}
--------------------------------
You have to create a python function that extract information from an html code using web scrapping.
Try to select the deeper class that is common among the elements to make de find_all function.
Your answer SHOULD only contain the python function code without any aditional word or character.
Import the used libraries above the function definition.
The function name must be extract_info.
The function have to receive the html data as a parameter.
Your function needs to extract information for all the elements with similar attributes.
An element could have missing attributes
Before calling .text or ['href'] methods, check if the element exists.
----------------
FINAL ANSWER EXAMPLE:
from bs4 import BeautifulSoup
def extract_info(html):
...CODE...
return {output_format}
----------------
Always check if the element exists before calling some method.
""",
)
human_template_script = PromptTemplate(input_variables=[], template="give me the code")
# Chat Prompt objects
system_template_script_prompt = SystemMessagePromptTemplate.from_template(
system_template_script.template
)
human_template_script_prompt = HumanMessagePromptTemplate.from_template(
human_template_script.template
)
chat_script_prompt = ChatPromptTemplate.from_messages(
[system_template_script_prompt, human_template_script_prompt]
)
| [
"html_content",
"[PLACEHOLDER, PLACEHOLDER]",
"output_format",
"You are a helpful assitant that helps people create python scripts for web scraping.\n --------------------------------\n The example of the html content is: {html_content}\n --------------------------------\n You have to create a python function that extract information from an html code using web scrapping.\n Try to select the deeper class that is common among the elements to make de find_all function.\n\n Your answer SHOULD only contain the python function code without any aditional word or character.\n\n Import the used libraries above the function definition.\n\n The function name must be extract_info.\n\n The function have to receive the html data as a parameter.\n\n Your function needs to extract information for all the elements with similar attributes.\n\n An element could have missing attributes\n\n Before calling .text or ['href'] methods, check if the element exists.\n\n ----------------\n FINAL ANSWER EXAMPLE:\n from bs4 import BeautifulSoup\n\n def extract_info(html):\n ...CODE...\n return {output_format}\n ----------------\n \n Always check if the element exists before calling some method.\n\n ",
"give me the code"
] |
2024-01-10 | CognitiveLabs/GPT-auto-webscraping | AssistantService.py | from langchain.chat_models import ChatOpenAI
from chains.output_format.base import chain_output_format
from chains.code_generator.base import chain_code_generator
import os
class GPTAssistant():
def __init__(self,api_key:str):
os.environ['OPENAI_API_KEY'] = api_key
self.llm = ChatOpenAI(temperature=0, model_name='gpt-3.5-turbo-16k', request_timeout=120, client=None)
def chain_response_format(self, html_content):
# prompt templates
output_format_chain = chain_output_format(self.llm)
# chain
return output_format_chain.run(html_content=html_content)
def chain_code_generator(self, output_format, html_content):
# Prompt templates
script_chain = chain_code_generator(self.llm)
return script_chain.run(output_format=output_format, html_content=html_content)
| [] |
2024-01-10 | LeonardoEmili/GEMBA | gemba~gpt_api.py | import openai
import time
from termcolor import colored
from datetime import datetime
# class for calling OpenAI API and handling cache
class GptApi:
def __init__(self, credentials, verbose=True):
assert "api_key" in credentials, "api_key not found in credentials"
assert "deployments" in credentials, "deployments not found in credentials"
self.deployments = credentials["deployments"]
self.verbose = verbose
if "api_base" in credentials:
# Azure API access
openai.api_type = "azure"
openai.api_version = "2022-06-01-preview"
openai.api_base = credentials["api_base"]
openai.api_key = credentials["api_key"]
self.api_type = "azure"
else:
# OpenAI API access
openai.api_key = credentials["api_key"]
self.api_type = "openai"
# limit the number of requests per second
if "requests_per_second_limit" in credentials:
self.rps_limit = 1 / credentials["requests_per_second_limit"]
else:
self.rps_limit = 0
self.last_call_timestamp = 0
# answer_id is used for determining if it was the top answer or how deep in the list it was
def request(self, prompt, model, parse_response, temperature=0, answer_id=-1, cache=None):
max_tokens = 20
answers = None
if cache is not None:
answers = cache.get({
"model": model,
"temperature": temperature,
"prompt": prompt,
})
if answers is None:
answers = self.request_api(prompt, model, temperature, max_tokens)
if cache is not None:
cache.add({
"model": model,
"temperature": temperature,
"prompt": prompt,
"answers": answers,
})
# there is no valid answer
if len(answers) == 0:
return [{
"temperature": temperature,
"answer_id": answer_id,
"answer": None,
"prompt": prompt,
"finish_reason": None,
"model": model,
}]
parsed_answers = []
for full_answer in answers:
finish_reason = full_answer["finish_reason"]
full_answer = full_answer["answer"]
answer_id += 1
answer = parse_response(full_answer)
if self.verbose or temperature > 0:
print(f"Answer (t={temperature}): " + colored(answer, "yellow") + " (" + colored(full_answer, "blue") + ")")
if answer is None:
continue
parsed_answers.append(
{
"temperature": temperature,
"answer_id": answer_id,
"answer": answer,
"prompt": prompt,
"finish_reason": finish_reason,
"model": model,
}
)
# there was no valid answer, increase temperature and try again
if len(parsed_answers) == 0:
return self.request(prompt, model, parse_response, temperature=temperature + 1, answer_id=answer_id, cache=cache)
return parsed_answers
def request_api(self, prompt, model, temperature=0, max_tokens=20):
# if temperature is 0, then request only 1 response
n = 1
if temperature > 0:
n = 10
elif temperature >= 5:
n = 20
if max_tokens > 500 or temperature > 10:
return []
dt = datetime.now()
ts = datetime.timestamp(dt)
if ts - self.last_call_timestamp < self.rps_limit:
time.sleep(self.rps_limit - (ts - self.last_call_timestamp))
self.last_call_timestamp = ts
if self.verbose:
print(prompt)
while True:
try:
response = self.call_api(prompt, model, n, temperature, max_tokens)
break
except Exception as e:
# response was filtered
if hasattr(e, 'code'):
if e.code == 'content_filter':
return []
print(e.code)
# frequent error is reaching the API limit
print(colored("Error, retrying...", "red"))
print(e)
time.sleep(1)
answers = []
for choice in response["choices"]:
answer = choice['text'].strip()
# one of the responses didn't finish, we need to request more tokens
if choice["finish_reason"] != "stop" and model != "text-chat-davinci-002": # TODO remove exception
if self.verbose:
print(colored(f"Increasing max tokens to fit answers.", "red") + colored(answer, "blue"))
return self.request_api(prompt, model, temperature=temperature, max_tokens=max_tokens + 200)
answers.append({
"answer": answer,
"finish_reason": choice["finish_reason"],
})
if len(answers) > 1:
# remove duplicate answers
answers = [dict(t) for t in {tuple(d.items()) for d in answers}]
return answers
def call_api(self, prompt, model, n, temperature, max_tokens):
if self.api_type == "azure":
return openai.Completion.create(
engine=self.deployments[model],
prompt=prompt,
temperature=temperature/10,
max_tokens=max_tokens,
top_p=1,
n=n,
frequency_penalty=0,
presence_penalty=0,
stop=None)
else:
return openai.Completion.create(
model=self.deployments[model],
prompt=prompt,
temperature=temperature/10,
max_tokens=max_tokens,
top_p=1,
n=n,
frequency_penalty=0,
presence_penalty=0,
stop=None)
| [] |
2024-01-10 | GregBaugues/subclippy | subclip.py | import os
import assemblyai as aai
from datetime import timedelta
import json
from openai import OpenAI
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
from pytube import YouTube
import subprocess
YOUTUBE_URL = "https://www.youtube.com/watch?v=_GxrDjGRfFc"
BASE_FILENAME = "new_heights"
def video_filename():
return f"source_videos/{BASE_FILENAME}.mp4"
def video_only_filename():
return f"source_videos/{BASE_FILENAME}_video_only.mp4"
def audio_filename():
return f"audio/{BASE_FILENAME}.mp3"
def data_filename():
return f"data/{BASE_FILENAME}.json"
def rendered_filename():
return f"rendered/{BASE_FILENAME}.mp4"
def write_data(data):
with open(data_filename(), "w") as f:
json.dump(data, f, indent=4)
def load_data():
with open(data_filename(), "r") as f:
return json.load(f)
def clip_filename(i):
return f"clips/{BASE_FILENAME}_{str(i).zfill(3)}.mp4"
def merge_audio_and_video():
ffmpeg_command = [
"ffmpeg",
"-i",
video_only_filename(),
"-i",
audio_filename(),
"-c:v",
"copy",
"-c:a",
"aac",
video_filename(),
]
subprocess.run(ffmpeg_command, check=True)
def download_1080p(url=YOUTUBE_URL):
yt = YouTube(url)
video = yt.streams.filter(file_extension="mp4", res="1080p").first()
video.download(filename=video_only_filename())
merge_audio_and_video()
def download_720p(url=YOUTUBE_URL):
yt = YouTube(url)
video = yt.streams.filter(file_extension="mp4", res="720p").first()
video.download(filename=video_filename())
def download_video(res="720p"):
download_720p()
extract_audio()
if res == "1080p":
download_1080p()
def extract_audio(infile=video_filename(), outfile=audio_filename()):
command = f"ffmpeg -i {infile} -vn -acodec libmp3lame {outfile}"
subprocess.run(command, shell=True)
def to_timestamp(ms):
td = timedelta(milliseconds=ms)
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return "{:02d}:{:02d}:{:02d},{:03d}".format(
hours, minutes, seconds, td.microseconds // 1000
)
def transcribe():
aai.settings.api_key = os.environ.get("AAI_API_KEY")
config = aai.TranscriptionConfig(speaker_labels=True, auto_highlights=True)
transcriber = aai.Transcriber(config=config)
transcript = transcriber.transcribe(audio_filename())
print(transcript)
return transcript
def clean_string(s):
s = s.lower()
s = "".join(c for c in s if c.isalnum() or c.isspace() or c == "'")
return s
def get_transcript_data(transcript):
data = {}
data["youtube_url"] = YOUTUBE_URL
data["transcript_id"] = transcript.id
data["transcript"] = transcript.text
data["duration"] = transcript.audio_duration
data["utterances"] = []
for utterance in transcript.utterances:
data["utterances"].append(
{
"start": utterance.start,
"end": utterance.end,
"speaker": utterance.speaker,
"duration": int(utterance.end) - int(utterance.start),
"text": utterance.text,
}
)
data["words"] = []
for word in transcript.words:
data["words"].append(
{
"text": clean_string(word.text),
"start": word.start,
"end": word.end,
"confidence": word.confidence,
}
)
data["highlights"] = []
for result in transcript.auto_highlights.results:
timestamps = []
for t in result.timestamps:
timestamps.append({"start": t.start, "end": t.end})
data["highlights"].append(
{
"text": result.text,
"count": result.count,
"rank": result.rank,
"timestamps": timestamps,
}
)
return data
def ask_gpt(transcript, prompt=""):
MODEL = "gpt-4-1106-preview"
client = OpenAI()
sys_msg = f"""
{prompt}
I'll tip you $2000 if the clip you return goes viral.
(But you'll get no tip if you modify the quote -- it has to be an exact quote)
"""
sys_msg += """
Return results in JSON in this format:
{"phrases": ["What is your name?"]}
"""
messages = [
{"role": "system", "content": sys_msg},
]
messages.append({"role": "user", "content": transcript})
print("Asking GPT...", messages)
response = client.chat.completions.create(
model=MODEL, response_format={"type": "json_object"}, messages=messages
)
str_response = response.choices[0].message.content
data = json.loads(str_response)
return data
def get_phrases(data, prompt=None):
if not data.get("phrases"):
data["phrases"] = []
new_phrases = ask_gpt(data["transcript"], prompt=prompt)
for p in new_phrases["phrases"]:
data["phrases"].append({"text": p})
write_data(data)
return data
def calc_durations(data):
for i in range(len(data["utterances"])):
p = data["utterances"][i]
p["duration"] = int(p["end"]) - int(p["start"])
data["utterances"][i] = p
for i in range(len(data["words"])):
w = data["words"][i]
w["duration"] = int(w["end"]) - int(w["start"])
data["words"][i] = w
return data
def find_exact_stamp(data, phrase):
# Clean up the phrase text.
phrase_text = clean_string(phrase["text"])
phrase_words = phrase_text.split()
# Early exit if phrase is empty.
if not phrase_words:
return None, None
# Iterate through words in data to find the matching phrase.
for i in range(len(data["words"]) - len(phrase_words) + 1):
if all(
data["words"][i + j]["text"] == phrase_words[j]
for j in range(len(phrase_words))
):
phrase_start = int(data["words"][i]["start"])
phrase_end = int(data["words"][i + len(phrase_words) - 1]["end"])
if phrase_end < phrase_start:
raise Exception(
f"ERROR: End time {phrase_end} is less than start time {phrase_start} for phrase:\n{phrase_text}"
)
return phrase_start, phrase_end
# Phrase not found.
print(f"ERROR: Could not find exact stamp for phrase:\n{phrase_text}")
return None, None
def calc_word_frequency(data):
words = data["words"]
word_frequency = {}
for word in words:
w = clean_string(word["text"])
if w in word_frequency:
word_frequency[w] += 1
else:
word_frequency[w] = 1
for w in data["words"]:
w["frequency"] = word_frequency[clean_string(w["text"])]
# print word frequency sorted by frequency
# for w in sorted(word_frequency, key=word_frequency.get, reverse=True):
# if len(w) > 4 and word_frequency[w] > 5:
# print(w, word_frequency[w])
return data
def stitch_clips():
import os
import subprocess
clips_dir = "clips/"
clips = [
clips_dir + clip
for clip in os.listdir(clips_dir)
if clip.endswith(".mp4") and clip.startswith(BASE_FILENAME)
]
clips.sort()
with open("file_list.txt", "w") as f:
for clip in clips:
f.write(f"file '{clip}'\n")
subprocess.run(
[
"ffmpeg",
"-f",
"concat",
"-i",
"file_list.txt",
"-c",
"copy",
rendered_filename(),
]
)
os.remove("file_list.txt")
def slice_video(source, start, end, buffer=50, filename=video_filename()):
if not filename:
raise Exception("Filename is required")
start = (start - buffer) / 1000
end = (end + buffer) / 1000
if start < 0:
start = 0
print("Slicing video from", start, " to ", end, "into", filename)
command = [
"ffmpeg",
"-i",
source,
"-ss",
str(start),
"-to",
str(end),
"-reset_timestamps",
"1",
filename,
]
subprocess.run(command, check=True)
def slice_by_words(words, buffer=50):
for i, w in enumerate(words):
slice_video(
video_filename(),
w["start"],
w["end"],
buffer=buffer,
filename=clip_filename(i),
)
def slice_by_phrases(phrases, buffer=50):
print(phrases)
for i, p in enumerate(phrases):
print(p)
slice_video(
video_filename(),
p["start"],
p["end"],
buffer=buffer,
filename=clip_filename(i),
)
def slice_by_timestamps(timestamps=[], buffer=50):
for i, t in enumerate(timestamps):
slice_video(
video_filename(),
t["start"],
t["end"],
buffer=buffer,
filename=clip_filename(i),
)
def find_words(data, needles):
needles = [needles].flatten()
found = []
for w in data["words"]:
if w["text"].lower() in needles:
found.append(w)
return found
def get_words_to_make_phrase(data, phrase):
word_list = []
phrase = phrase.lower()
for w in phrase.split(" "):
words = find_words(data, w)
if not words:
raise Exception("Could not find word: ", w)
# iterate over words and add the one with highest confidence to the word_list
max_duration = 0
for word in words:
if word["duration"] > max_duration:
max_duration = word["duration"]
best_word = word
word_list.append(best_word)
return word_list
def get_timestamps_for_highlights(data):
timestamps = []
for h in data["highlights"]:
for t in h["timestamps"]:
timestamps.append(
{
"start": t.get("start"),
"end": t.get("end"),
}
)
return timestamps
def get_timestamps_for_phrases(data):
for i, p in enumerate(data["phrases"]):
start, end = find_exact_stamp(data, p)
if start and end:
p["start"] = int(start)
p["end"] = int(end)
data["phrases"][i] = p
else:
print("Could not find exact stamp for phrase: ", p["text"])
del data["phrases"][i]
return data
def reset_phrases(data):
try:
del data["phrases"]
except:
pass
return data
def clip_and_stitch_from_needles(data, needles=""):
word_list = []
for needle in needles.split(" "):
words = find_words(data, needle)
word_list.extend(words)
# sort word_list by word['start']
word_list.sort(key=lambda x: int(x["start"]))
slice_by_words(word_list, buffer=100)
stitch_clips()
def clip_and_stitch_to_make_phrase(data, phrase):
words = get_words_to_make_phrase(data, phrase)
slice_by_words(words, buffer=50)
def clip_and_stitch_from_highlights(data):
timestamps = get_timestamps_for_highlights(data)
slice_by_timestamps(timestamps, buffer=50)
stitch_clips()
def clip_and_stitch_from_prompt(data, prompt=None):
# data = reset_phrases(data)
if not data.get("phrases"):
data["phrases"] = []
data = get_phrases(data, prompt=prompt)
write_data(data)
data = get_timestamps_for_phrases(data)
write_data(data)
slice_by_phrases(data["phrases"], buffer=150)
stitch_clips()
if __name__ == "__main__":
if not os.path.exists(video_filename()):
download_video(res="1080p")
if os.path.exists(data_filename()):
data = load_data()
else:
transcript = transcribe()
data = get_transcript_data(transcript)
write_data(data)
prompt = """
This is a transcript from a youtube video.
Extract the most interesting and funny quotes from this clip.
Give me exact quotes -- do not paraphrase.
Select the clips most likely to go viral.
Each clip should be 50-200 words.
"""
clip_and_stitch_from_prompt(data, prompt=prompt)
# clip_and_stitch_from_needles(data, needles=["lazers"])
# clip_and_stitch_from_phrase(data, phrase="")
# clip_and_stitch_from_highlights(data)
| [
"\n This is a transcript from a youtube video.\n Extract the most interesting and funny quotes from this clip. \n Give me exact quotes -- do not paraphrase.\n Select the clips most likely to go viral.\n Each clip should be 50-200 words.\n "
] |
2024-01-10 | RockChinQ/QChatGPT | pkg~openai~modelmgr.py | """OpenAI 接口底层封装
目前使用的对话接口有:
ChatCompletion - gpt-3.5-turbo 等模型
Completion - text-davinci-003 等模型
此模块封装此两个接口的请求实现,为上层提供统一的调用方式
"""
import tiktoken
import openai
from ..openai.api import model as api_model
from ..openai.api import completion as api_completion
from ..openai.api import chat_completion as api_chat_completion
COMPLETION_MODELS = {
"text-davinci-003", # legacy
"text-davinci-002", # legacy
"code-davinci-002", # legacy
"code-cushman-001", # legacy
"text-curie-001", # legacy
"text-babbage-001", # legacy
"text-ada-001", # legacy
"gpt-3.5-turbo-instruct",
}
CHAT_COMPLETION_MODELS = {
# GPT 4 系列
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-4-0314", # legacy
"gpt-4-32k-0314", # legacy
# GPT 3.5 系列
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613", # legacy
"gpt-3.5-turbo-16k-0613", # legacy
"gpt-3.5-turbo-0301", # legacy
# One-API 接入
"SparkDesk",
"chatglm_pro",
"chatglm_std",
"chatglm_lite",
"qwen-v1",
"qwen-plus-v1",
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"gemini-pro",
}
EDIT_MODELS = {
}
IMAGE_MODELS = {
}
def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> api_model.RequestBase:
if model_name in CHAT_COMPLETION_MODELS:
return api_chat_completion.ChatCompletionRequest(client, model_name, messages, **args)
elif model_name in COMPLETION_MODELS:
return api_completion.CompletionRequest(client, model_name, messages, **args)
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))
def count_chat_completion_tokens(messages: list, model: str) -> int:
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
"SparkDesk",
"chatglm_pro",
"chatglm_std",
"chatglm_lite",
"qwen-v1",
"qwen-plus-v1",
"ERNIE-Bot",
"ERNIE-Bot-turbo",
"gemini-pro",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
# print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return count_chat_completion_tokens(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return count_chat_completion_tokens(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""count_chat_completion_tokens() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_completion_tokens(messages: list, model: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
text = ""
for message in messages:
text += message['role'] + message['content'] + "\n"
text += "assistant: "
return len(encoding.encode(text))
def count_tokens(messages: list, model: str):
if model in CHAT_COMPLETION_MODELS:
return count_chat_completion_tokens(messages, model)
elif model in COMPLETION_MODELS:
return count_completion_tokens(messages, model)
raise ValueError("不支持模型[{}],请检查配置文件".format(model))
| [] |
2024-01-10 | RockChinQ/QChatGPT | pkg~qqbot~message.py | # 普通消息处理模块
import logging
import openai
from ..utils import context
from ..openai import session as openai_session
from ..plugin import host as plugin_host
from ..plugin import models as plugin_models
import tips as tips_custom
def handle_exception(notify_admin: str = "", set_reply: str = "") -> list:
"""处理异常,当notify_admin不为空时,会通知管理员,返回通知用户的消息"""
config = context.get_config_manager().data
context.get_qqbot_manager().notify_admin(notify_admin)
if config['hide_exce_info_to_user']:
return [tips_custom.alter_tip_message] if tips_custom.alter_tip_message else []
else:
return [set_reply]
def process_normal_message(text_message: str, mgr, config: dict, launcher_type: str,
launcher_id: int, sender_id: int) -> list:
session_name = f"{launcher_type}_{launcher_id}"
logging.info("[{}]发送消息:{}".format(session_name, text_message[:min(20, len(text_message))] + (
"..." if len(text_message) > 20 else "")))
session = openai_session.get_session(session_name)
unexpected_exception_times = 0
max_unexpected_exception_times = 3
reply = []
while True:
if unexpected_exception_times >= max_unexpected_exception_times:
reply = handle_exception(notify_admin=f"{session_name},多次尝试失败。", set_reply=f"[bot]多次尝试失败,请重试或联系管理员")
break
try:
prefix = "[GPT]" if config['show_prefix'] else ""
text, finish_reason, funcs = session.query(text_message)
# 触发插件事件
args = {
"launcher_type": launcher_type,
"launcher_id": launcher_id,
"sender_id": sender_id,
"session": session,
"prefix": prefix,
"response_text": text,
"finish_reason": finish_reason,
"funcs_called": funcs,
}
event = plugin_host.emit(plugin_models.NormalMessageResponded, **args)
if event.get_return_value("prefix") is not None:
prefix = event.get_return_value("prefix")
if event.get_return_value("reply") is not None:
reply = event.get_return_value("reply")
if not event.is_prevented_default():
reply = [prefix + text]
except openai.APIConnectionError as e:
err_msg = str(e)
if err_msg.__contains__('Error communicating with OpenAI'):
reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e),
"[bot]err:调用API失败,请重试或联系管理员,或等待修复")
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复")
except openai.RateLimitError as e:
logging.debug(type(e))
logging.debug(e.error['message'])
if 'message' in e.error and e.error['message'].__contains__('You exceeded your current quota'):
# 尝试切换api-key
current_key_name = context.get_openai_manager().key_mgr.get_key_name(
context.get_openai_manager().key_mgr.using_key
)
context.get_openai_manager().key_mgr.set_current_exceeded()
# 触发插件事件
args = {
'key_name': current_key_name,
'usage': context.get_openai_manager().audit_mgr
.get_usage(context.get_openai_manager().key_mgr.get_using_key_md5()),
'exceeded_keys': context.get_openai_manager().key_mgr.exceeded,
}
event = plugin_host.emit(plugin_models.KeyExceeded, **args)
if not event.is_prevented_default():
switched, name = context.get_openai_manager().key_mgr.auto_switch()
if not switched:
reply = handle_exception(
"api-key调用额度超限({}),无可用api_key,请向OpenAI账户充值或在config.py中更换api_key;如果你认为这是误判,请尝试重启程序。".format(
current_key_name), "[bot]err:API调用额度超额,请联系管理员,或等待修复")
else:
openai.api_key = context.get_openai_manager().key_mgr.get_using_key()
mgr.notify_admin("api-key调用额度超限({}),接口报错,已切换到{}".format(current_key_name, name))
reply = ["[bot]err:API调用额度超额,已自动切换,请重新发送消息"]
continue
elif 'message' in e.error and e.error['message'].__contains__('You can retry your request'):
# 重试
unexpected_exception_times += 1
continue
elif 'message' in e.error and e.error['message']\
.__contains__('The server had an error while processing your request'):
# 重试
unexpected_exception_times += 1
continue
else:
reply = handle_exception("{}会话调用API失败:{}".format(session_name, e),
"[bot]err:RateLimitError,请重试或联系作者,或等待修复")
except openai.BadRequestError as e:
if config['auto_reset'] and "This model's maximum context length is" in str(e):
session.reset(persist=True)
reply = [tips_custom.session_auto_reset_message]
else:
reply = handle_exception("{}API调用参数错误:{}\n".format(
session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复")
except openai.APIStatusError as e:
reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复")
except Exception as e:
logging.exception(e)
reply = handle_exception("{}会话处理异常:{}".format(session_name, e), "[bot]err:{}".format(e))
break
return reply
| [] |
2024-01-10 | RockChinQ/QChatGPT | tests~token_test~tiktoken_test.py | import tiktoken
import openai
import json
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
def encode(text: str, model: str):
import tiktoken
enc = tiktoken.get_encoding("cl100k_base")
assert enc.decode(enc.encode("hello world")) == "hello world"
# To get the tokeniser corresponding to a specific model in the OpenAI API:
enc = tiktoken.encoding_for_model(model)
return enc.encode(text)
# def ask(prompt: str, model: str = "gpt-3.5-turbo"):
# # To get the tokeniser corresponding to a specific model in the OpenAI API:
# enc = tiktoken.encoding_for_model(model)
# resp = openai.ChatCompletion.create(
# model=model,
# messages=[
# {
# "role": "user",
# "content": prompt
# }
# ]
# )
# return enc.encode(prompt), enc.encode(resp['choices'][0]['message']['content']), resp
def ask(
messages: list,
model: str = "gpt-3.5-turbo"
):
enc = tiktoken.encoding_for_model(model)
resp = openai.ChatCompletion.create(
model=model,
messages=messages
)
txt = ""
for r in messages:
txt += r['role'] + r['content'] + "\n"
txt += "assistant: "
return enc.encode(txt), enc.encode(resp['choices'][0]['message']['content']), resp
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
messages = [
{
"role": "user",
"content": "你叫什么名字?"
},{
"role": "assistant",
"content": "我是AI助手,没有具体的名字。你可以叫我GPT-3。有什么可以帮到你的吗?"
},{
"role": "user",
"content": "你是由谁开发的?"
},{
"role": "assistant",
"content": "我是由OpenAI开发的,一家人工智能研究实验室。OpenAI的使命是促进人工智能的发展,使其为全人类带来积极影响。我是由OpenAI团队使用GPT-3模型训练而成的。"
},{
"role": "user",
"content": "很高兴见到你。"
}
]
pro, rep, resp=ask(messages)
print(len(pro), len(rep))
print(resp)
print(resp['choices'][0]['message']['content'])
print(num_tokens_from_messages(messages, model="gpt-3.5-turbo")) | [
"你是由谁开发的?",
"很高兴见到你。",
"我是AI助手,没有具体的名字。你可以叫我GPT-3。有什么可以帮到你的吗?",
"你叫什么名字?",
"我是由OpenAI开发的,一家人工智能研究实验室。OpenAI的使命是促进人工智能的发展,使其为全人类带来积极影响。我是由OpenAI团队使用GPT-3模型训练而成的。"
] |
2024-01-10 | RockChinQ/QChatGPT | tests~gpt3_test.py | import openai
openai.api_key = "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"
resp = openai.Completion.create(
prompt="user:你好,今天天气怎么样?\nbot:",
model="text-davinci-003",
temperature=0.9, # 数值越低得到的回答越理性,取值范围[0, 1]
top_p=1, # 生成的文本的文本与要求的符合度, 取值范围[0, 1]
frequency_penalty=0.2,
presence_penalty=1.0,
)
print(resp) | [
"user:你好,今天天气怎么样?\nbot:"
] |
2024-01-10 | RockChinQ/QChatGPT | tests~proxy_test~forward_proxy_test.py | import os
import openai
client = openai.Client(
api_key=os.environ["OPENAI_API_KEY"],
)
openai.proxies = {
'http': 'http://127.0.0.1:7890',
'https': 'http://127.0.0.1:7890',
}
resp = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "Hello, how are you?",
}
]
)
print(resp) | [
"Hello, how are you?"
] |
2024-01-10 | RockChinQ/QChatGPT | pkg~openai~api~chat_completion.py | import json
import logging
import openai
from openai.types.chat import chat_completion_message
from .model import RequestBase
from .. import funcmgr
from ...plugin import host
from ...utils import context
class ChatCompletionRequest(RequestBase):
"""调用ChatCompletion接口的请求类。
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。
若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。
"""
model: str
messages: list[dict[str, str]]
kwargs: dict
stopped: bool = False
pending_func_call: chat_completion_message.FunctionCall = None
pending_msg: str
def flush_pending_msg(self):
self.append_message(
role="assistant",
content=self.pending_msg
)
self.pending_msg = ""
def append_message(self, role: str, content: str, name: str=None, function_call: dict=None):
msg = {
"role": role,
"content": content
}
if name is not None:
msg['name'] = name
if function_call is not None:
msg['function_call'] = function_call
self.messages.append(msg)
def __init__(
self,
client: openai.Client,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.client = client
self.model = model
self.messages = messages.copy()
self.kwargs = kwargs
self.req_func = self.client.chat.completions.create
self.pending_func_call = None
self.stopped = False
self.pending_msg = ""
def __iter__(self):
return self
def __next__(self) -> dict:
if self.stopped:
raise StopIteration()
if self.pending_func_call is None: # 没有待处理的函数调用请求
args = {
"model": self.model,
"messages": self.messages,
}
funcs = funcmgr.get_func_schema_list()
if len(funcs) > 0:
args['functions'] = funcs
# 拼接kwargs
args = {**args, **self.kwargs}
from openai.types.chat import chat_completion
resp: chat_completion.ChatCompletion = self._req(**args)
choice0 = resp.choices[0]
# 如果不是函数调用,且finish_reason为stop,则停止迭代
if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop"
self.stopped = True
if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None:
self.pending_func_call = choice0.message.function_call
self.append_message(
role="assistant",
content=choice0.message.content,
function_call=choice0.message.function_call
)
return {
"id": resp.id,
"choices": [
{
"index": choice0.index,
"message": {
"role": "assistant",
"type": "function_call",
"content": choice0.message.content,
"function_call": {
"name": choice0.message.function_call.name,
"arguments": choice0.message.function_call.arguments
}
},
"finish_reason": "function_call"
}
],
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
}
else:
# self.pending_msg += choice0['message']['content']
# 普通回复一定处于最后方,故不用再追加进内部messages
return {
"id": resp.id,
"choices": [
{
"index": choice0.index,
"message": {
"role": "assistant",
"type": "text",
"content": choice0.message.content
},
"finish_reason": choice0.finish_reason
}
],
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
}
else: # 处理函数调用请求
cp_pending_func_call = self.pending_func_call.copy()
self.pending_func_call = None
func_name = cp_pending_func_call.name
arguments = {}
try:
try:
arguments = json.loads(cp_pending_func_call.arguments)
# 若不是json格式的异常处理
except json.decoder.JSONDecodeError:
# 获取函数的参数列表
func_schema = funcmgr.get_func_schema(func_name)
arguments = {
func_schema['parameters']['required'][0]: cp_pending_func_call.arguments
}
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
# 执行函数调用
ret = ""
try:
ret = funcmgr.execute_function(func_name, arguments)
logging.info("函数执行完成。")
except Exception as e:
ret = "error: execute function failed: {}".format(str(e))
logging.error("函数执行失败: {}".format(str(e)))
# 上报数据
plugin_info = host.get_plugin_info_for_audit(func_name.split('-')[0])
audit_func_name = func_name.split('-')[1]
audit_func_desc = funcmgr.get_func_schema(func_name)['description']
context.get_center_v2_api().usage.post_function_record(
plugin=plugin_info,
function_name=audit_func_name,
function_description=audit_func_desc,
)
self.append_message(
role="function",
content=json.dumps(ret, ensure_ascii=False),
name=func_name
)
return {
"id": -1,
"choices": [
{
"index": -1,
"message": {
"role": "function",
"type": "function_return",
"function_name": func_name,
"content": json.dumps(ret, ensure_ascii=False)
},
"finish_reason": "function_return"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
except funcmgr.ContentFunctionNotFoundError:
raise Exception("没有找到函数: {}".format(func_name))
| [] |
2024-01-10 | RockChinQ/QChatGPT | pkg~openai~manager.py | import logging
import openai
from openai.types import images_response
from ..openai import keymgr
from ..utils import context
from ..audit import gatherer
from ..openai import modelmgr
from ..openai.api import model as api_model
class OpenAIInteract:
"""OpenAI 接口封装
将文字接口和图片接口封装供调用方使用
"""
key_mgr: keymgr.KeysManager = None
audit_mgr: gatherer.DataGatherer = None
default_image_api_params = {
"size": "256x256",
}
client: openai.Client = None
def __init__(self, api_key: str):
self.key_mgr = keymgr.KeysManager(api_key)
self.audit_mgr = gatherer.DataGatherer()
# logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length())
self.client = openai.Client(
api_key=self.key_mgr.get_using_key(),
base_url=openai.base_url
)
context.set_openai_manager(self)
def request_completion(self, messages: list):
"""请求补全接口回复=
"""
# 选择接口请求类
config = context.get_config_manager().data
request: api_model.RequestBase
model: str = config['completion_api_params']['model']
cp_parmas = config['completion_api_params'].copy()
del cp_parmas['model']
request = modelmgr.select_request_cls(self.client, model, messages, cp_parmas)
# 请求接口
for resp in request:
if resp['usage']['total_tokens'] > 0:
self.audit_mgr.report_text_model_usage(
model,
resp['usage']['total_tokens']
)
yield resp
def request_image(self, prompt) -> images_response.ImagesResponse:
"""请求图片接口回复
Parameters:
prompt (str): 提示语
Returns:
dict: 响应
"""
config = context.get_config_manager().data
params = config['image_api_params']
response = self.client.images.generate(
prompt=prompt,
n=1,
**params
)
self.audit_mgr.report_image_model_usage(params['size'])
return response
| [] |
2024-01-10 | RockChinQ/QChatGPT | pkg~openai~api~completion.py | import openai
from openai.types import completion, completion_choice
from . import model
class CompletionRequest(model.RequestBase):
"""调用Completion接口的请求类。
调用方可以一直next completion直到finish_reason为stop。
"""
model: str
prompt: str
kwargs: dict
stopped: bool = False
def __init__(
self,
client: openai.Client,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.client = client
self.model = model
self.prompt = ""
for message in messages:
self.prompt += message["role"] + ": " + message["content"] + "\n"
self.prompt += "assistant: "
self.kwargs = kwargs
self.req_func = self.client.completions.create
def __iter__(self):
return self
def __next__(self) -> dict:
"""调用Completion接口,返回生成的文本
{
"id": "id",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"type": "text",
"content": "message"
},
"finish_reason": "reason"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30
}
}
"""
if self.stopped:
raise StopIteration()
resp: completion.Completion = self._req(
model=self.model,
prompt=self.prompt,
**self.kwargs
)
if resp.choices[0].finish_reason == "stop":
self.stopped = True
choice0: completion_choice.CompletionChoice = resp.choices[0]
self.prompt += choice0.text
return {
"id": resp.id,
"choices": [
{
"index": choice0.index,
"message": {
"role": "assistant",
"type": "text",
"content": choice0.text
},
"finish_reason": choice0.finish_reason
}
],
"usage": {
"prompt_tokens": resp.usage.prompt_tokens,
"completion_tokens": resp.usage.completion_tokens,
"total_tokens": resp.usage.total_tokens
}
}
| [] |
2024-01-10 | kingler/MultiDocQA | multi_prompt.py | from langchain.chains.router import MultiPromptChain
from langchain.llms import OpenAI
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{input}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{input}"""
biology_template = """You are a skilled biology professor. \
You are great at explaining complex biological concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
english_template = """You are a skilled english professor. \
You are great at explaining complex literary concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
cs_template = """You are a proficient computer scientist. \
You can explain complex algorithms and data structures in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
python_template = """You are a skilled python programmer. \
You can explain complex algorithms and data structures in simple terms. \
When you don't know the answer to a question, you admit it.
here is a question:
{input}"""
accountant_template = """You are a skilled accountant. \
You can explain complex accounting concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
lawyer_template = """You are a skilled lawyer. \
You can explain complex legal concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
teacher_template = """You are a skilled teacher. \
You can explain complex educational concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
engineer_template = """You are a skilled engineer. \
You can explain complex engineering concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
psychologist_template = """You are a skilled psychologist. \
You can explain complex psychological concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
scientist_template = """You are a skilled scientist. \
You can explain complex scientific concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
economist_template = """You are a skilled economist. \
You can explain complex economic concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
architect_template = """You are a skilled architect. \
You can explain complex architectural concepts in simple terms. \
When you don't know the answer to a question, you admit it.
Here is a question:
{input}"""
prompt_infos = [
("physics", "Good for answering questions about physics", physics_template),
("math", "Good for answering math questions", math_template),
("biology", "Good for answering questions about biology", biology_template),
("english", "Good for answering questions about english", english_template),
("cs", "Good for answering questions about computer science", cs_template),
("python", "Good for answering questions about python", python_template),
("accountant", "Good for answering questions about accounting", accountant_template),
("lawyer", "Good for answering questions about law", lawyer_template),
("teacher", "Good for answering questions about education", teacher_template),
("engineer", "Good for answering questions about engineering", engineer_template),
("psychologist", "Good for answering questions about psychology", psychologist_template),
("scientist", "Good for answering questions about science", scientist_template),
("economist", "Good for answering questions about economics", economist_template),
("architect", "Good for answering questions about architecture", architect_template),
]
chain = MultiPromptChain.from_prompts(OpenAI(), *zip(*prompt_infos), verbose=True)
# get user question
while True:
question = input("Ask a question: ")
print(chain.run(question)) | [
"You are a skilled english professor. You are great at explaining complex literary concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled python programmer. You can explain complex algorithms and data structures in simple terms. When you don't know the answer to a question, you admit it.\n\nhere is a question:\n{input}",
"You are a proficient computer scientist. You can explain complex algorithms and data structures in simple terms. When you don't know the answer to a question, you admit it.\n\n\nHere is a question:\n{input}",
"You are a skilled lawyer. You can explain complex legal concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled accountant. You can explain complex accounting concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled engineer. You can explain complex engineering concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled scientist. You can explain complex scientific concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled economist. You can explain complex economic concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\n\nHere is a question:\n{input}",
"You are a skilled psychologist. You can explain complex psychological concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question.\n\nHere is a question:\n{input}",
"[('physics', 'Good for answering questions about physics', \"You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\\n\\nHere is a question:\\n{input}\"), ('math', 'Good for answering math questions', 'You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question.\\n\\nHere is a question:\\n{input}'), ('biology', 'Good for answering questions about biology', \"You are a skilled biology professor. You are great at explaining complex biological concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('english', 'Good for answering questions about english', \"You are a skilled english professor. You are great at explaining complex literary concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('cs', 'Good for answering questions about computer science', \"You are a proficient computer scientist. You can explain complex algorithms and data structures in simple terms. When you don't know the answer to a question, you admit it.\\n\\n\\nHere is a question:\\n{input}\"), ('python', 'Good for answering questions about python', \"You are a skilled python programmer. You can explain complex algorithms and data structures in simple terms. When you don't know the answer to a question, you admit it.\\n\\nhere is a question:\\n{input}\"), ('accountant', 'Good for answering questions about accounting', \"You are a skilled accountant. You can explain complex accounting concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('lawyer', 'Good for answering questions about law', \"You are a skilled lawyer. You can explain complex legal concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('teacher', 'Good for answering questions about education', \"You are a skilled teacher. You can explain complex educational concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('engineer', 'Good for answering questions about engineering', \"You are a skilled engineer. You can explain complex engineering concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('psychologist', 'Good for answering questions about psychology', \"You are a skilled psychologist. You can explain complex psychological concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('scientist', 'Good for answering questions about science', \"You are a skilled scientist. You can explain complex scientific concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('economist', 'Good for answering questions about economics', \"You are a skilled economist. You can explain complex economic concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\"), ('architect', 'Good for answering questions about architecture', \"You are a skilled architect. You can explain complex architectural concepts in simple terms. When you don't know the answer to a question, you admit it.\\n\\nHere is a question:\\n{input}\")]",
"You are a skilled teacher. You can explain complex educational concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled biology professor. You are great at explaining complex biological concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}",
"You are a skilled architect. You can explain complex architectural concepts in simple terms. When you don't know the answer to a question, you admit it.\n\nHere is a question:\n{input}"
] |
2024-01-10 | kingler/MultiDocQA | multi_file_st.py | import os
import re
import io
import contextlib
import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.router import MultiRetrievalQAChain
from langchain.llms import OpenAI
from langchain.document_loaders import PyPDFLoader
import shutil
def main():
st.title(':blue[QA over documents with langchain router chain]')
# Check for OpenAI API Key in environment variable
st.sidebar.header('API Key')
if 'OPENAI_API_KEY' not in os.environ:
openai_api_key = st.sidebar.text_input('Enter your OpenAI API Key', type='password')
if openai_api_key:
os.environ['OPENAI_API_KEY'] = openai_api_key
else:
st.sidebar.write(":green[API Key set successfully.]")
# Initialize the OpenAI embeddings
embedding = OpenAIEmbeddings()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20, length_function=len)
# Initialize session_state
if "retrievers" not in st.session_state:
st.session_state.retrievers = []
if "retriever_descriptions" not in st.session_state:
st.session_state.retriever_descriptions = []
if "retriever_names" not in st.session_state:
st.session_state.retriever_names = []
# Directories for storing indexes and uploaded files
indexes_dir = 'indexes'
docs_dir = 'docs'
os.makedirs(indexes_dir, exist_ok=True)
os.makedirs(docs_dir, exist_ok=True)
if "initialized" not in st.session_state:
st.session_state.initialized = False
indexes = [f for f in os.listdir(indexes_dir) if os.path.isdir(os.path.join(indexes_dir, f))]
if not st.session_state.initialized:
# Process existing indexes
for index in indexes:
if index not in st.session_state.retriever_names:
retriever = Chroma(persist_directory=os.path.join(indexes_dir, index), embedding_function=embedding).as_retriever()
st.session_state.retrievers.append(retriever)
st.session_state.retriever_names.append(index)
st.session_state.retriever_descriptions.append(f"Good for answering questions about {index}")
st.session_state.initialized = True
st.sidebar.header('Uploaded Files')
uploaded_files = [f for f in os.listdir(docs_dir) if os.path.isfile(os.path.join(docs_dir, f))]
st.sidebar.write(uploaded_files)
st.sidebar.header('Document Indexes')
st.sidebar.write(indexes)
# Save uploaded files to "docs" folder
files = st.file_uploader('Upload files', type=['txt', 'pdf'], accept_multiple_files=True)
if files:
st.session_state.files = files # Save uploaded files to session state
for file in files:
filename = file.name
filepath = os.path.join(docs_dir, filename)
with open(filepath, "wb") as f:
f.write(file.getvalue())
# Check for each file in the "docs" folder and create/load the index
for filename in os.listdir(docs_dir):
filepath = os.path.join(docs_dir, filename)
if os.path.exists(os.path.join(indexes_dir, filename[:-4])):
continue
else:
with st.spinner(f'Creating index for {filename}...'):
if filename.endswith('.txt'):
with open(filepath, 'r', encoding="utf-8", errors="ignore") as f:
doc = text_splitter.create_documents([f.read()])
elif filename.endswith('.pdf'):
with open(filepath, 'rb') as f:
loader = PyPDFLoader(filepath)
doc = loader.load_and_split()
if doc is not None:
retriever = Chroma.from_documents(documents=doc, embedding=embedding, persist_directory=os.path.join(indexes_dir, filename[:-4]))
retriever.persist()
st.session_state.retrievers.append(retriever)
st.session_state.retriever_names.append(filename[:-4])
st.session_state.retriever_descriptions.append(f"Good for answering questions about {filename[:-4]}")
st.success(f'Index created for {filename}.')
# st.write(st.session_state.retrievers)
chain = MultiRetrievalQAChain.from_retrievers(OpenAI(), st.session_state.retriever_names, st.session_state.retriever_descriptions, st.session_state.retrievers, verbose=True)
st.header('Ask a Question')
question = st.text_input('Enter your question')
if st.button('Ask'):
if not st.session_state.retrievers:
st.warning("Please upload files or ensure they have been indexed.")
return
with st.spinner('Processing your question...'):
with io.StringIO() as buf, contextlib.redirect_stdout(buf):
# st.write("INDEXES: ", st.session_state.retrievers)
resp = chain.run(question)
output = buf.getvalue()
match = re.search(r"(\w+: \{'query': '.*?'\})", output)
if match:
# write wihch index we are using in green
st.write(":green[We are using the following index:]")
st.write(match.group(1))
else:
st.write("No match found.")
st.write(":green[Answer:]")
st.write(resp)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | kingler/MultiDocQA | multi_file.py | import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.router import MultiRetrievalQAChain
from langchain.llms import OpenAI
from langchain.document_loaders import PyPDFLoader
# Directory where the .txt files are stored
docs_dir = 'docs'
# Initialize the OpenAI embeddings
embedding = OpenAIEmbeddings()
retrievers = []
retriever_descriptions = []
retriever_names = []
# Initialize the text splitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 500,
chunk_overlap = 20,
length_function = len,
)
# Iterate over all .txt and .pdf files in the directory
for filename in os.listdir(docs_dir):
doc = None
# Check if a persistent Chroma VectorStore already exists for this document
if os.path.exists(filename[:-4]):
# If it exists, load it from disk
retriever = Chroma(persist_directory=filename[:-4], embedding_function=embedding).as_retriever()
else:
# Load the document and split it
if filename.endswith('.txt'):
try:
with open(os.path.join(docs_dir, filename), 'r', encoding='utf-8') as f:
doc = f.read()
except UnicodeDecodeError:
# Handle possible encoding errors
print(f"Skipping file {filename} due to encoding errors.")
continue
# If it's a .txt, we split the document
doc = text_splitter.create_documents([doc])
elif filename.endswith('.pdf'):
loader = PyPDFLoader(os.path.join(docs_dir, filename))
doc = loader.load_and_split()
print(doc)
if doc is not None:
# Create a new Chroma VectorStore and save it to disk
retriever = Chroma.from_documents(documents=doc, embedding=embedding, persist_directory=filename[:-4])
retriever.persist()
retriever = retriever.as_retriever()
# Add the retriever, its name and its description to the respective lists
retrievers.append(retriever)
retriever_names.append(filename[:-4])
# PAY ATTENTON TO THE NAMES OF THE FILES AS THEY WILL BE IN THE DESCRIPTIONS
retriever_descriptions.append(f"Good for answering questions about {filename[:-4]}")
# Initialize the MultiRetrievalQAChain
chain = MultiRetrievalQAChain.from_retrievers(OpenAI(), retriever_names, retriever_descriptions, retrievers, verbose=True)
# Test it
# print(chain.run("What are the differences between Newton and Feynman?"))
while True:
print(chain.run(input("What would you like to know?>>> ")))
| [] |
2024-01-10 | karljayg/twitch-gpt-chat-bot | api~chat_utils.py | from settings import config
import openai
import re
import random
import sys
import time
import math
from utils.emote_utils import get_random_emote
from utils.emote_utils import remove_emotes_from_message
import utils.wiki_utils as wiki_utils
import utils.tokensArray as tokensArray
import string
from models.mathison_db import Database
from .text2speech import speak_text
# This function logs that the bot is starting with also logs some configurations of th bot
# This also sends random emoticon to twitch chat room
def message_on_welcome(self, logger):
logger.debug(
"================================================STARTING BOT========================================")
bot_mode = "BOT MODES \n"
bot_mode += "TEST_MODE: " + str(config.TEST_MODE) + "\n"
bot_mode += "TEST_MODE_SC2_CLIENT_JSON: " + \
str(config.TEST_MODE_SC2_CLIENT_JSON) + "\n"
bot_mode += "ANALYZE_REPLAYS_FOR_TEST: " + \
str(config.USE_CONFIG_TEST_REPLAY_FILE) + "\n"
bot_mode += "IGNORE_REPLAYS: " + \
str(config.IGNORE_GAME_STATUS_WHILE_WATCHING_REPLAYS) + "\n"
bot_mode += "IGNORE_PREVIOUS_GAME_RESULTS_ON_FIRST_RUN: " + \
str(config.IGNORE_PREVIOUS_GAME_RESULTS_ON_FIRST_RUN) + "\n"
bot_mode += "MONITOR_GAME_SLEEP_SECONDS: " + \
str(config.MONITOR_GAME_SLEEP_SECONDS) + "\n"
logger.debug(bot_mode)
prefix = "" # if any
greeting_message = f'{prefix} {get_random_emote()}'
msgToChannel(self, greeting_message, logger)
def clean_text_for_chat(msg):
# Combine carriage return and line feed replacement with filtering non-printable characters
msg = ''.join(filter(lambda x: x in set(string.printable), msg.replace('\r', '').replace('\n', '')))
return msg
# This function sends and logs the messages sent to twitch chat channel
def msgToChannel(self, message, logger, text2speech=False):
# Clean up the message
message = clean_text_for_chat(message)
# Calculate the size of the message in bytes
message_bytes = message.encode()
message_size = len(message_bytes)
# Log the byte size of the message
logger.debug(f"Message size in bytes: {message_size}")
# Check if the message exceeds the 512-byte limit
if message_size > 512:
truncated_message_bytes = message_bytes[:512 - len(" ... more".encode())] + " ... more".encode()
else:
truncated_message_bytes = message_bytes
# Convert the truncated message back to a string
truncated_message_str = truncated_message_bytes.decode()
self.connection.privmsg(self.channel, truncated_message_str)
logger.debug(
"---------------------MSG TO CHANNEL----------------------")
logger.debug(truncated_message_str)
logger.debug(
"---------------------------------------------------------")
if text2speech:
# use text to speech capability to speak the response if enabled
# try catch
if not config.TEXT_TO_SPEECH:
return
try:
logger.debug(f"Speaking")
truncated_message_str = remove_emotes_from_message(truncated_message_str)
truncated_message_str = "add commas, period and other appropriate punctuation: " + truncated_message_str
completion = send_prompt_to_openai(truncated_message_str)
if completion.choices[0].message is not None:
logger.debug(
"completion.choices[0].message.content: " + completion.choices[0].message.content)
response = completion.choices[0].message.content
truncated_message_str = response
speak_text(truncated_message_str, mode=1)
except Exception as e:
logger.debug(f"Error: {e}")
finally:
logger.debug(f"Spoken")
# This function processes the message receive in twitch chat channel
# This will determine if the bot will reply base on dice roll
# And this will generate the response
def process_pubmsg(self, event, logger, contextHistory):
logger.debug("processing pubmsg")
# Get message from chat
msg = event.arguments[0].lower()
sender = event.source.split('!')[0]
# tags = {kvpair["key"]: kvpair["value"] for kvpair in event.tags}
# user = {"name": tags["display-name"], "id": tags["user-id"]}
if 'commands' in msg.lower():
response = f"{config.BOT_COMMANDS}"
response = clean_text_for_chat(response)
trimmed_msg = tokensArray.truncate_to_byte_limit(response, config.TWITCH_CHAT_BYTE_LIMIT)
msgToChannel(self, trimmed_msg, logger)
return
# Send response to direct msg or keyword which includes Mathison being mentioned
if 'open sesame' in msg.lower() or any(sub.lower() == msg.lower() for sub in config.OPEN_SESAME_SUBSTITUTES):
logger.debug("received open sesame: " + str(msg.lower()))
processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
return
# search wikipedia
if 'wiki' in msg.lower():
logger.debug("received wiki command: /n" + msg)
msg = wiki_utils.wikipedia_question(msg, self)
logger.debug("wiki answer: /n" + msg)
trimmed_msg = tokensArray.truncate_to_byte_limit(msg, config.TWITCH_CHAT_BYTE_LIMIT)
trimmed_msg = "restate all the info, do not ommit any details: " + trimmed_msg
processMessageForOpenAI(self, trimmed_msg, self.conversation_mode, logger, contextHistory)
return
# search replays DB
if 'career' in msg.lower():
contextHistory.clear()
logger.debug("received career record command: \n" + msg)
player_name = msg.split(" ", 1)[1]
career_record = self.db.get_player_overall_records(player_name)
logger.debug("career overall record answer: \n" + career_record)
career2_record = self.db.get_player_race_matchup_records(player_name)
logger.debug("career matchups record answer: \n" + career_record)
career_record = career_record + " " + career2_record
# Check if there are any results
if career_record:
trimmed_msg = tokensArray.truncate_to_byte_limit(career_record, config.TWITCH_CHAT_BYTE_LIMIT)
msg = f'''
Review this example:
when given a player, DarkMenace the career records are:
Overall matchup records for darkmenace: 425 wins - 394 losses Race matchup records for darkmenace: Protoss vs Protoss: 15 wins - 51 lossesProtoss vs Terran: 11 wins - 8 lossesProtoss vs Zerg: 1 wins - 1 lossesTerran vs Protoss: 8 wins - 35 lossesTerran vs Terran: 3 wins - 1 lossesTerran vs Zerg: 4 wins - 3 lossesZerg vs Protoss: 170 wins - 137 lossesZerg vs Terran: 138 wins - 100 lossesZerg vs Zerg: 75 wins - 58 losses
From the above, say it exactly like this format:
overall: 425-394, each matchup: PvP: 15-51 PvT: 11-8 PvZ: 1-1 TvP: 8-35 TvT: 3-1 TvZ: 4-3 ZvP: 170-137 ZvT: 138-100 ZvZ: 75-58
Now do the same but only using this data:
{player_name} : {trimmed_msg}.
Then add a 10 word comment about the matchup, after.
'''
else:
msg = f"Restate all of the info here: There is no career games that I know for {player_name} ."
# Send the message for processing
# processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
# no mathison flavoring, just raw send to prompt
completion = send_prompt_to_openai(msg)
if completion.choices[0].message is not None:
logger.debug(
"completion.choices[0].message.content: " + completion.choices[0].message.content)
response = completion.choices[0].message.content
if len(response) >= 400:
logger.debug(
f"Chunking response since it's {len(response)} characters long")
# Split the response into chunks of 400 characters without splitting words
chunks = []
temp_chunk = ''
for word in response.split():
if len(temp_chunk + ' ' + word) <= 400:
temp_chunk += ' ' + word if temp_chunk != '' else word
else:
chunks.append(temp_chunk)
temp_chunk = word
if temp_chunk:
chunks.append(temp_chunk)
# Send response chunks to chat
for chunk in chunks:
msgToChannel(self, chunk, logger)
else:
msgToChannel(self, response, logger)
return
# search replays DB
if 'history' in msg.lower():
contextHistory.clear()
logger.debug("received history command: /n" + msg)
player_name = msg.split(" ", 1)[1]
history_list = self.db.get_player_records(player_name)
logger.debug("history answer: /n" + str(history_list))
# Process each record and format it as desired
formatted_records = [f"{rec.split(', ')[0]} vs {rec.split(', ')[1]}, {rec.split(', ')[2].split(' ')[0]}-{rec.split(', ')[3].split(' ')[0]}" for rec in history_list]
# Join the formatted records into a single string
result_string = " and ".join(formatted_records)
trimmed_msg = tokensArray.truncate_to_byte_limit(result_string, config.TWITCH_CHAT_BYTE_LIMIT)
# if history_list is empty then msg is "no records found"
if history_list == []:
msg = (f"restate all of the info here: there are no game records in history for {player_name}")
else:
msg = (f"restate all of the info here and do not exclude anything: total win/loss record of {player_name} we know the results of so far {trimmed_msg}")
#msgToChannel(self, msg, logger)
processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
return
def chunk_list(lst, max_chunk_size):
"""Splits the list into smaller lists each having a length less than or equal to max_chunk_size."""
for i in range(0, len(lst), max_chunk_size):
yield lst[i:i + max_chunk_size]
# Check if the message contains "games in" and "hours"
if 'games in' in msg.lower() and 'hours' in msg.lower():
logger.debug("Received command to fetch games in the last X hours")
# Use regex to extract the number of hours from the message
match = re.search(r'games in (the )?last (\d+) hours', msg.lower())
if match:
hours = int(match.group(2)) # Extract the number of hours
else:
hours = 4 # Default value if no number is found
if hours > 72:
hours = 72 # Max number of hours allowed is 72
# Retrieve games for the last X hours
recent_games = self.db.get_games_for_last_x_hours(hours)
logger.debug(f"Games in the last {hours} hours: \n" + str(recent_games))
# Process each game record and format it as desired
formatted_records = [f"{game}" for game in recent_games]
# Define chunk size based on an estimated average size of each record
avg_record_size = 100 # This is an estimation; you might need to adjust it
max_chunk_size = config.TWITCH_CHAT_BYTE_LIMIT // avg_record_size
msg = f"Games played in the last {hours} hours are: "
msgToChannel(self, msg, logger)
# Split the formatted records into chunks
for chunk in chunk_list(formatted_records, max_chunk_size):
# Join the records in the chunk into a single string
chunk_string = " and ".join(chunk)
# Truncate the chunk string to byte limit
trimmed_msg = tokensArray.truncate_to_byte_limit(chunk_string, config.TWITCH_CHAT_BYTE_LIMIT)
# Send the chunk message
msgToChannel(self, trimmed_msg, logger)
# processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
# Function to process the 'head to head' command
if 'head to head' in msg.lower():
contextHistory.clear()
logger.debug(f"Received 'head to head' command: \n{msg}")
# Use regular expression to extract player names
match = re.search(r"head to head (\w+) (\w+)", msg.lower())
if match:
player1_name, player2_name = match.groups()
# Retrieve head-to-head records
head_to_head_list = self.db.get_head_to_head_matchup(player1_name, player2_name)
logger.debug(f"Type of head_to_head_list: {type(head_to_head_list)}")
logger.debug(f"Head to head answer: \n{str(head_to_head_list)}")
# Check if there are any results
if head_to_head_list:
# Since the records are already formatted, join them into a single string
result_string = ", ".join(head_to_head_list)
trimmed_msg = tokensArray.truncate_to_byte_limit(result_string, config.TWITCH_CHAT_BYTE_LIMIT)
msg = f'''
Review this example:
when given 2 player, DarkMenace vs KJ the records are:
['DarkMenace (Terran) vs KJ (Zerg), 29 wins - 7 wins', 'DarkMenace (Protoss) vs KJ (Zerg), 9 wins - 12 wins', 'DarkMenace (Zerg) vs KJ (Zerg), 3 wins - 2 wins', 'DarkMenace (Protoss) vs KJ (Terran), 6 wins - 1 wins', 'DarkMenace (Terran) vs KJ (Terran), 1 wins - 0 wins', 'DarkMenace (Protoss) vs KJ (Protoss), 2 wins - 2 wins']
From the above, say it exactly like this format:
overall: 50-24, each matchup: TvZ 29-7, PvZ 9-12, ZvZ 3-2, PvT 6-1, TvT 1-0, PvP, 2-2. Summary: <10 word comment about the matchup>
Now do the same but only using this data:
{player1_name} vs {player2_name}: {result_string}.
Then add a 10 word comment about the matchup, after.
'''
else:
msg = f"Restate all of the info here: There are no head-to-head game records between {player1_name} and {player2_name} ."
# Send the message for processing
# processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
# no mathison flavoring, just raw send to prompt
completion = send_prompt_to_openai(msg)
if completion.choices[0].message is not None:
logger.debug(
"completion.choices[0].message.content: " + completion.choices[0].message.content)
response = completion.choices[0].message.content
if len(response) >= 400:
logger.debug(
f"Chunking response since it's {len(response)} characters long")
# Split the response into chunks of 400 characters without splitting words
chunks = []
temp_chunk = ''
for word in response.split():
if len(temp_chunk + ' ' + word) <= 400:
temp_chunk += ' ' + word if temp_chunk != '' else word
else:
chunks.append(temp_chunk)
temp_chunk = word
if temp_chunk:
chunks.append(temp_chunk)
# Send response chunks to chat
for chunk in chunks:
msgToChannel(self, chunk, logger)
else:
msgToChannel(self, response, logger)
else:
logger.debug("Invalid 'head to head' command format or player names not found.")
# Optionally send an error message to the channel or log it.
return
# ignore certain users
logger.debug("checking user: " + sender + " against ignore list")
if sender.lower() in [user.lower() for user in config.IGNORE]:
logger.debug("ignoring user: " + sender)
return
else:
logger.debug("allowed user: " + sender)
if config.PERSPECTIVE_DISABLED:
logger.debug("google perspective config is disabled")
toxicity_probability = 0
else:
toxicity_probability = tokensArray.get_toxicity_probability(
msg, logger)
# do not send toxic messages to openAI
if toxicity_probability < config.TOXICITY_THRESHOLD:
# any user greets via config keywords will be responded to
if any(greeting in msg.lower() for greeting in config.GREETINGS_LIST_FROM_OTHERS):
response = f"Hi {sender}!"
response = f'{response} {get_random_emote()}'
msgToChannel(self, response, logger)
# disable the return - sometimes it matches words so we want mathison to reply anyway
# DO NOT return
if 'bye' in msg.lower():
response = f"byers {sender}!"
msgToChannel(self, response, logger)
return
if 'gg' in msg.lower():
response = f"HSWP"
msgToChannel(self, response, logger)
return
if 'bracket' in msg.lower() or '!b' in msg.lower() or 'FSL' in msg.upper() or 'fsl' in msg.lower():
msg = f"Restate this including the full URL: This is the tournament info {config.BRACKET}"
processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
return
# will only respond to a certain percentage of messages per config
diceRoll = random.randint(0, 100) / 100
logger.debug("rolled: " + str(diceRoll) +
" settings: " + str(config.RESPONSE_PROBABILITY))
if diceRoll >= config.RESPONSE_PROBABILITY:
logger.debug("will not respond")
return
processMessageForOpenAI(self, msg, self.conversation_mode, logger, contextHistory)
else:
response = random.randint(1, 3)
switcher = {
1: f"{sender}, please refrain from sending toxic messages.",
2: f"Woah {sender}! Strong language",
3: f"Calm down {sender}. What's with the attitude?"
}
msgToChannel(self, switcher.get(response), logger)
def send_prompt_to_openai(msg):
"""
Send a given message as a prompt to OpenAI and return the response.
:param msg: The message to send to OpenAI as a prompt.
:return: The response from OpenAI.
"""
completion = openai.ChatCompletion.create(
model=config.ENGINE,
messages=[
{"role": "user", "content": msg}
]
)
return completion
# This function will process the message for Open API
# This will connect to OpenAI and send the inquiry/message for AI to response
# Then calls the msgToChannel to send the response back to channel
# This also logs the what message was sent to OpenAI and the response
def processMessageForOpenAI(self, msg, conversation_mode, logger, contextHistory):
# let's give these requests some breathing room
time.sleep(config.MONITOR_GAME_SLEEP_SECONDS)
# remove open sesame
msg = msg.replace('open sesame', '')
logger.debug(
"----------------------------------------NEW MESSAGE FOR OPENAI-----------------------------------------")
# logger.debug(msg)
logger.debug(
'msg omitted in log, to see it, look in: "sent to OpenAI"')
# remove open sesame
msg = msg.replace('open sesame', '')
# remove quotes
msg = msg.replace('"', '')
msg = msg.replace("'", '')
# add line break to ensure separation
msg = msg + "\n"
# TODO: redo this logic
# if bool(config.STOP_WORDS_FLAG):
# msg, removedWords = tokensArray.apply_stop_words_filter(msg)
# logger.debug("removed stop words: %s" , removedWords)
# check tokensize
total_tokens = tokensArray.num_tokens_from_string(
msg, config.TOKENIZER_ENCODING)
msg_length = len(msg)
logger.debug(f"string length: {msg_length}, {total_tokens} tokens")
# This approach calculates the token_ratio as the desired token limit divided by the actual total tokens.
# Then, it trims the message length based on this ratio, ensuring that the message fits within the desired token limit.
# Additionally, the code adjusts the desired token limit by subtracting the buffer size before calculating the token ratio.
# This ensures that the trimming process takes the buffer into account and helps prevent the message from
# exceeding the desired token limit by an additional (BUFFER) of 200 tokens.
# check tokensize
total_tokens = tokensArray.num_tokens_from_string(
msg, config.TOKENIZER_ENCODING)
msg_length = len(msg)
logger.debug(f"string length: {msg_length}, {total_tokens} tokens")
if int(total_tokens) > config.CONVERSATION_MAX_TOKENS:
divided_by = math.ceil(len(msg) // config.CONVERSATION_MAX_TOKENS)
logger.debug(
f"msg is too long so we are truncating it 1/{divided_by} of its length")
msg = msg[0:msg_length // divided_by]
msg = msg + "\n" # add line break to ensure separation
total_tokens = tokensArray.num_tokens_from_string(
msg, config.TOKENIZER_ENCODING)
msg_length = len(msg)
logger.debug(
f"new string length: {msg_length}, {total_tokens} tokens")
# add User msg to conversation context if not replay nor last time played analysis
if conversation_mode not in ["replay_analysis", "last_time_played"]:
# add User msg to conversation context
tokensArray.add_new_msg(
contextHistory, 'User: ' + msg + "\n", logger)
logger.debug("adding msg to context history")
else:
contextHistory.clear()
if conversation_mode == "last_time_played":
# no mood / perspective
pass
else:
# add complete array as msg to OpenAI
msg = msg + \
tokensArray.get_printed_array("reversed", contextHistory)
# Choose a random mood and perspective from the selected options
mood = random.choice(self.selected_moods)
if conversation_mode == "replay_analysis":
# say cutoff is 4, then select indices 0-3
perspective_indices = config.BOT_PERSPECTIVES[:config.PERSPECTIVE_INDEX_CUTOFF]
else:
# Select indices 4-onwards
perspective_indices = config.BOT_PERSPECTIVES[config.PERSPECTIVE_INDEX_CUTOFF:]
selected_perspectives = [
config.PERSPECTIVE_OPTIONS[i] for i in perspective_indices]
perspective = random.choice(selected_perspectives)
if (conversation_mode == "normal"):
# if contextHistory has > 15 tuples, clear it
if len(contextHistory) > 15:
logger.debug(f"contextHistory has more than 15 tuples, clearing it")
contextHistory.clear()
else:
pass
# Add custom SC2 viewer perspective
msg = (f"As a {mood} acquaintance of {config.STREAMER_NICKNAME}, {perspective}, "
+ msg)
else:
if (conversation_mode == "in_game"):
msg = (f"As a {mood} observer of matches in StarCraft 2, {perspective}, comment on this statement: "
+ msg)
else:
msg = (f"As a {mood} observer of matches in StarCraft 2, {perspective}, "
+ msg)
logger.debug("CONVERSATION MODE: " + conversation_mode)
logger.debug("sent to OpenAI: %s", msg)
#msgToChannel(self, "chanchan", logger)
completion = send_prompt_to_openai(msg)
try:
if completion.choices[0].message is not None:
logger.debug(
"completion.choices[0].message.content: " + completion.choices[0].message.content)
response = completion.choices[0].message.content
# add emote
if random.choice([True, False]):
response = f'{response} {get_random_emote()}'
logger.debug('raw response from OpenAI:')
logger.debug(response)
# Clean up response
# Remove carriage returns, newlines, and tabs
response = re.sub('[\r\n\t]', ' ', response)
# Remove non-ASCII characters
response = re.sub('[^\x00-\x7F]+', '', response)
response = re.sub(' +', ' ', response) # Remove extra spaces
response = response.strip() # Remove leading and trailing whitespace
# dont make it too obvious its a bot
response = response.replace("As an AI language model, ", "")
response = response.replace("User: , ", "")
response = response.replace("Observer: , ", "")
response = response.replace("Player: , ", "")
logger.debug("cleaned up message from OpenAI:")
# replace with ? all non ascii characters that throw an error in logger
response = tokensArray.replace_non_ascii(response, replacement='?')
logger.debug(response)
if len(response) >= 400:
logger.debug(
f"Chunking response since it's {len(response)} characters long")
# Split the response into chunks of 400 characters without splitting words
chunks = []
temp_chunk = ''
for word in response.split():
if len(temp_chunk + ' ' + word) <= 400:
temp_chunk += ' ' + word if temp_chunk != '' else word
else:
chunks.append(temp_chunk)
temp_chunk = word
if temp_chunk:
chunks.append(temp_chunk)
# Send response chunks to chat
for chunk in chunks:
# Remove all occurrences of "AI: "
chunk = re.sub(r'\bAI: ', '', chunk)
msgToChannel(self, chunk, logger)
# Add AI response to conversation context
tokensArray.add_new_msg(
contextHistory, 'AI: ' + chunk + "\n", logger)
# Log relevant details
logger.debug(f'Sending openAI response chunk: {chunk}')
logger.debug(
f'Conversation in context so far: {tokensArray.get_printed_array("reversed", contextHistory)}')
else:
response = re.sub(r'\bAI: ', '', response)
# if response is less than 150 characters
if len(response) <= 150:
# really short messages get to be spoken
msgToChannel(self, response, logger, text2speech=True)
else:
msgToChannel(self, response, logger)
# Add AI response to conversation context
tokensArray.add_new_msg(
contextHistory, 'AI: ' + response + "\n", logger)
# Log relevant details
logger.debug(f'AI msg to chat: {response}')
logger.debug(
f'Conversation in context so far: {tokensArray.get_printed_array("reversed", contextHistory)}')
else:
response = 'oops, I have no response to that'
msgToChannel(self, response, logger)
logger.debug('Failed to send response: %s', response)
except SystemExit as e:
logger.error('Failed to send response: %s', e) | [] |
2024-01-10 | karljayg/twitch-gpt-chat-bot | utils~wiki_utils.py | from langchain.agents import load_tools, initialize_agent
from langchain.llms import OpenAI
from settings import config
OpenAI.api_key = config.OPENAI_API_KEY
llm = OpenAI(temperature=0.6, openai_api_key=OpenAI.api_key)
tool_names = ['wikipedia']
tools = load_tools(tool_names)
agent = initialize_agent(
tools, llm, agent='zero-shot-react-description', verbose=True)
def wikipedia_question(question, self):
print(f'Question: {question}')
return agent.run(question)
| [] |
2024-01-10 | karljayg/twitch-gpt-chat-bot | api~twitch_bot.py | import irc.bot
import openai
import json
import random
import time
import urllib3
import threading
import signal
import sys
import logging
import math
import spawningtool.parser
import tiktoken
import pytz
from datetime import datetime
from collections import defaultdict
from settings import config
import utils.tokensArray as tokensArray
import utils.wiki_utils as wiki_utils
from models.mathison_db import Database
from models.log_once_within_interval_filter import LogOnceWithinIntervalFilter
from utils.emote_utils import get_random_emote
from utils.file_utils import find_latest_file
from utils.sound_player_utils import SoundPlayer
from .sc2_game_utils import check_SC2_game_status
from .game_event_utils import game_started_handler
from .game_event_utils import game_replay_handler
from .game_event_utils import game_ended_handler
from .chat_utils import message_on_welcome, process_pubmsg
from .sc2_game_utils import handle_SC2_game_results
# The contextHistory array is a list of tuples, where each tuple contains two elements: the message string and its
# corresponding token size. This allows us to keep track of both the message content and its size in the array. When
# a new message is added to the contextHistory array, its token size is determined using the nltk.word_tokenize()
# function. If the total number of tokens in the array exceeds the maxContextTokens threshold, the function starts
# deleting items from the end of the array until the total number of tokens is below the threshold. If the last item
# in the array has a token size less than or equal to the maxContextTokens threshold, the item is removed completely.
# However, if the last item has a token size greater than the threshold, the function removes tokens from the end of
# the message string until its token size is less than or equal to the threshold, and keeps the shortened message
# string in the array. If the total number of tokens in the array is still above the threshold after deleting the
# last item, the function repeats the process with the second-to-last item in the array, and continues deleting items
# until the total number of tokens is below the threshold. By using this logic, we can ensure that the contextHistory
# array always contains a maximum number of tokens specified by maxContextTokens, while keeping the most recent
# messages in the array.
global contextHistory
contextHistory = []
# Initialize the logger at the beginning of the script
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addFilter(LogOnceWithinIntervalFilter())
# Set logging level for urllib3 to WARNING
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Player names of streamer to check results for
player_names = config.SC2_PLAYER_ACCOUNTS
class TwitchBot(irc.bot.SingleServerIRCBot):
def __init__(self):
self.first_run = True
self.last_replay_file = None
self.conversation_mode = "normal"
self.total_seconds = 0
self.encoding = tiktoken.get_encoding(config.TOKENIZER_ENCODING)
self.encoding = tiktoken.encoding_for_model(config.ENGINE)
# handle KeyboardInterrupt in a more graceful way by setting a flag when Ctrl-C is pressed and checking that
# flag in threads that need to be terminated
self.shutdown_flag = False
signal.signal(signal.SIGINT, self.signal_handler)
# threads to be terminated as soon as the main program finishes when set as daemon threads
monitor_thread = threading.Thread(target=self.monitor_game)
monitor_thread.daemon = True
monitor_thread.start()
# Generate the current datetime timestamp in the format YYYYMMDD-HHMMSS
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
# Append the timestamp to the log file name
log_file_name = config.LOG_FILE.replace(".log", f"_{timestamp}.log")
# Set up the logging configuration
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s:%(levelname)s:%(name)s: %(message)s')
file_handler = logging.FileHandler(log_file_name)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Set bot configuration
self.token = config.TOKEN
self.channel = config.CHANNEL
self.username = config.USERNAME
self.server = config.HOST
self.port = config.PORT
self.ignore = config.IGNORE
openai.api_key = config.OPENAI_API_KEY
self.streamer_nickname = config.STREAMER_NICKNAME
self.selected_moods = [config.MOOD_OPTIONS[i]
for i in config.BOT_MOODS]
self.selected_perspectives = [
config.PERSPECTIVE_OPTIONS[i] for i in config.BOT_PERSPECTIVES]
# Initialize the IRC bot
irc.bot.SingleServerIRCBot.__init__(self, [(self.server, self.port, 'oauth:' + self.token)], self.username,
self.username)
# # SC2 sounds
self.sound_player = SoundPlayer()
# Initialize the database
self.db = Database()
def play_SC2_sound(self, game_event):
if config.PLAYER_INTROS_ENABLED:
if config.IGNORE_PREVIOUS_GAME_RESULTS_ON_FIRST_RUN and self.first_run:
logger.debug(
"Per config, ignoring previous game on the first run, so no sound will be played")
return
self.sound_player.play_sound(game_event, logger)
else:
logger.debug("SC2 player intros and other sounds are disabled")
# incorrect IDE warning here, keep parameters at 3
def signal_handler(self, signal, frame):
self.shutdown_flag = True
logger.debug(
"================================================SHUTTING DOWN BOT========================================")
self.die("Shutdown requested.")
sys.exit(0)
def monitor_game(self):
previous_game = None
heartbeat_counter = 0
heartbeat_interval = config.HEARTBEAT_MYSQL # Number of iterations before sending a heartbeat for MySQL
while not self.shutdown_flag:
try:
current_game = check_SC2_game_status(logger)
if (current_game.get_status() == "MATCH_STARTED" or current_game.get_status() == "REPLAY_STARTED"):
self.conversation_mode = "in_game"
else:
self.conversation = "normal"
if current_game:
if config.IGNORE_GAME_STATUS_WHILE_WATCHING_REPLAYS and current_game.isReplay:
pass
else:
# wait so abandoned games doesnt result in false data of 0 seconds
time.sleep(2)
# self.handle_SC2_game_results(
# previous_game, current_game)
handle_SC2_game_results(self, previous_game,
current_game, contextHistory, logger)
previous_game = current_game
time.sleep(config.MONITOR_GAME_SLEEP_SECONDS)
# Increment the heartbeat counter
heartbeat_counter += 1
# Check if it's time to send a heartbeat
if heartbeat_counter >= heartbeat_interval:
try:
self.db.keep_connection_alive()
heartbeat_counter = 0 # Reset the counter after sending the heartbeat
# heartbeat indicator
print("+", end="", flush=True)
except Exception as e:
self.logger.error(f"Error during database heartbeat call: {e}")
else:
# heartbeat indicator
print(".", end="", flush=True)
except Exception as e:
pass
# This is a callback method that is invoked when bot successfully connects to an IRC Server
def on_welcome(self, connection, event):
# Join the channel and say a greeting
connection.join(self.channel)
message_on_welcome(self, logger)
# This function is a listerner whenever there is a publish message on twitch chat room
def on_pubmsg(self, connection, event):
#process the message sent by the viewers in the twitch chat room
process_pubmsg(self, event, logger, contextHistory) | [] |
2024-01-10 | ParsaAslaniYC/Katana-IDE-Classic | IDE~helper~KatanaGPT.py | import sys
from PyQt6 import QtWidgets
import openai
# قرار دادن API key در این متغیر
openai.api_key = "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"
# تابعی برای ارسال پیام به ChatGPT و دریافت پاسخ
def send_message(message):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=message,
temperature=0.5,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].text.strip()
class ChatWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.input_label = QtWidgets.QLabel("Enter message:")
self.input_textbox = QtWidgets.QLineEdit()
self.send_button = QtWidgets.QPushButton("Send")
self.output_label = QtWidgets.QLabel("AI:")
self.output_textbox = QtWidgets.QTextEdit()
self.output_textbox.setReadOnly(True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.input_label)
layout.addWidget(self.input_textbox)
layout.addWidget(self.send_button)
layout.addWidget(self.output_label)
layout.addWidget(self.output_textbox)
self.setLayout(layout)
self.send_button.clicked.connect(self.send_message)
def send_message(self):
message = self.input_textbox.text()
response = send_message(message)
self.output_textbox.setText(response)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = ChatWindow()
window.show()
sys.exit(app.exec())
| [] |
2024-01-10 | dungru/chatGPT-discord-bot | src~responses.py | import openai
import json
from asgiref.sync import sync_to_async
def get_config() -> dict:
import os
# get config.json path
config_dir = os.path.abspath(__file__ + "/../../")
config_name = 'config.json'
config_path = os.path.join(config_dir, config_name)
with open(config_path, 'r') as f:
config = json.load(f)
return config
config = get_config()
openai.api_key = config['openAI_key']
model_name = config['fine_tuned_model']
if not model_name:
model_name = "text-davinci-003"
async def handle_response(message) -> str:
response = await sync_to_async(openai.Completion.create)(
model=model_name,
prompt=message,
temperature=0.7,
max_tokens=2048,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
)
responseMessage = response.choices[0].text
return responseMessage | [] |
2024-01-10 | cwijayasundara/langchain-investment-advisor-dashboard | sec_file_summeriser.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks import get_openai_callback
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
doc_url = 'https://abc.xyz/assets/a7/5b/9e5ae0364b12b4c883f3cf748226/goog-exhibit-99-1-q1-2023-19.pdf'
llm = ChatOpenAI(temperature=0.0)
# run the summarizer chain
def summerise_large_pdf(fileUrl):
loader = PyPDFLoader(fileUrl)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
chain = load_summarize_chain(llm, chain_type="map_reduce", verbose=True)
return chain.run(texts)
with get_openai_callback() as cb:
response = summerise_large_pdf(doc_url)
print(response)
print(f"Total Tokens: {cb.total_tokens}")
print(f"Prompt Tokens: {cb.prompt_tokens}")
print(f"Completion Tokens: {cb.completion_tokens}")
print(f"Total Cost (USD): ${cb.total_cost}")
| [] |
2024-01-10 | cwijayasundara/langchain-investment-advisor-dashboard | pinecone_loader.py | import os
import pinecone
from dotenv import load_dotenv, find_dotenv
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY') # find next to api key in console
PINECONE_ENV = os.getenv('PINECONE_ENV') # find next to api key in console
index_name = 'semantic-search-openai'
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
loader = DirectoryLoader(
'earning_releases_2023', # my local directory
glob='**/*.pdf', # we only get pdfs
show_progress=True
)
docs = loader.load()
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=0
)
docs_split = text_splitter.split_documents(docs)
# we use the openAI embedding model
embeddings = OpenAIEmbeddings()
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
doc_db = Pinecone.from_documents(
docs_split,
embeddings,
index_name=index_name
)
query = "What was the revenue for Apple Inc. in 2023?"
search_docs = doc_db.similarity_search(query, top_k=3)
print(search_docs) | [] |
2024-01-10 | cwijayasundara/langchain-investment-advisor-dashboard | investment_advisor_agent.py | import os
import pinecone
import yfinance as yf
import investment_advisor_util
import pandas as pd
import streamlit as st
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from PIL import Image
from dotenv import load_dotenv, find_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Pinecone
from yahooquery import Ticker
from datetime import datetime, timedelta
from langchain.chains import RetrievalQA
from langchain.embeddings.openai import OpenAIEmbeddings
from investment_advisor_util import stocks
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY') # find next to api key in console
PINECONE_ENV = os.getenv('PINECONE_ENV') # find next to api key in console
index_name = 'semantic-search-openai'
EMBEDDING_MODEL_NAME = 'text-embedding-ada-002'
llm = ChatOpenAI(temperature=0.0)
# embedding model
embed = OpenAIEmbeddings(
model=EMBEDDING_MODEL_NAME,
openai_api_key=OPENAI_API_KEY
)
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
# connect to index assuming its already created
index = pinecone.Index(index_name)
print('Pinecone index status is', index.describe_index_stats())
text_field = "text"
vectorstore = Pinecone(
index, embed.embed_query, text_field
)
def get_recommendation(question, stock_cik, expression):
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=vectorstore.as_retriever()
)
enhanced_question = f"{stock_cik} {question} {expression} "
print(enhanced_question)
result = qa.run(enhanced_question)
return result.translate(str.maketrans("", "", "_*"))
def summerise_large_pdf_document(fileUrl):
url = fileUrl
loader = PyPDFLoader(url)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
chain = load_summarize_chain(llm, chain_type="map_reduce", verbose=True)
return chain.run(texts)
# page construction
st.set_page_config(page_title="Relationship Manager Investment Dashboard ABC Plc", layout="wide",
initial_sidebar_state="collapsed", page_icon="agent.png")
col1, col2 = st.columns((1, 3))
icon = Image.open("agent.png")
col1.image(icon, width=100)
st.title("Relationship Manager Investment Dashboard ABC Plc")
selected_stock = col1.selectbox("Select a stock", options=list(stocks.keys()))
# Get stock data from yfinance
ticker = yf.Ticker(stocks[selected_stock]["symbol"])
# Calculate the date range for the last 365 days
end_date = datetime.now()
start_date = end_date - timedelta(days=365)
# Get the closing prices for the selected stock in the last 365 days
data = ticker.history(start=start_date, end=end_date)
closing_prices = data["Close"]
# Plot the line chart in the first column
col1.line_chart(closing_prices, use_container_width=True)
# Get the company long description
long_description = ticker.info["longBusinessSummary"]
# Display the long description in a text box in the second column
col2.title("Company Overview")
col2.write(long_description)
# Use yahooquery to get earnings and revenue
ticker_yq = Ticker(stocks[selected_stock]["symbol"])
earnings = ticker_yq.earnings
financials_data = earnings[stocks[selected_stock]["symbol"]]['financialsChart']['yearly']
df_financials = pd.DataFrame(financials_data)
df_financials['revenue'] = df_financials['revenue']
df_financials['earnings'] = df_financials['earnings']
df_financials = df_financials.rename(columns={'earnings': 'yearly earnings', 'revenue': 'yearly revenue'})
numeric_cols = ['yearly earnings', 'yearly revenue']
df_financials[numeric_cols] = df_financials[numeric_cols].applymap(investment_advisor_util.format_large_number)
df_financials['date'] = df_financials['date'].astype(str)
df_financials.set_index('date', inplace=True)
# Display earnings and revenue in the first column
col1.write(df_financials)
summary_detail = ticker_yq.summary_detail[stocks[selected_stock]["symbol"]]
obj = yf.Ticker(stocks[selected_stock]["symbol"])
pe_ratio = '{0:.2f}'.format(summary_detail["trailingPE"])
price_to_sales = summary_detail["fiftyTwoWeekLow"]
target_price = summary_detail["fiftyTwoWeekHigh"]
market_cap = summary_detail["marketCap"]
ebitda = ticker.info["ebitda"]
tar = ticker.info["targetHighPrice"]
rec = ticker.info["recommendationKey"].upper()
# Format large numbers
market_cap = investment_advisor_util.format_large_number(market_cap)
ebitda = investment_advisor_util.format_large_number(ebitda)
# Create a dictionary for additional stock data
additional_data = {
"P/E Ratio": pe_ratio,
"52 Week Low": price_to_sales,
"52 Week High": target_price,
"Market Capitalisation": market_cap,
"EBITDA": ebitda,
"Price Target": tar,
"Recommendation": rec
}
# Display additional stock data in the first column
for key, value in additional_data.items():
col1.write(f"{key}: {value}")
col2.title("Opportunities for investors")
selected_stock_name = stocks[selected_stock]["name"]
selected_stock_url = stocks[selected_stock]["url"]
col2.subheader("Summary of the Last Quarter Financial Performance")
col2.write(summerise_large_pdf_document(selected_stock_url))
col2.subheader("Other Financial considerations")
col2.write(get_recommendation(selected_stock_name, "What are the key products and services of", "?"))
col2.write(get_recommendation(selected_stock_name,
"What are the new products and growth opportunities for", "?"))
col2.write(get_recommendation(
selected_stock_name, "What are the key strengths of", "?"))
col2.write(
get_recommendation(selected_stock_name, "Who are the key competitors of", "?"))
col2.write(get_recommendation(selected_stock_name, "What are the principal threats to", "?"))
| [] |
2024-01-10 | aadibharane/Langchain_use_cases | Querying_Tabular_Data~sql_agents.py | from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
import os
os.environ["OPENAI_API_KEY"] ="OPENAI_API_KEY"
def sql_agent():
db = SQLDatabase.from_uri("sqlite:///C:/Program Files/SQLiteStudio/mydb.db")
llm = OpenAI(temperature=0)
toolkit = SQLDatabaseToolkit(db=db,llm=llm)
agent_executor = create_sql_agent(
llm=OpenAI(temperature=0),
toolkit=toolkit,
verbose=True
)
#Example: describing a table
agent_executor.run("Describe the mydb table")
sql_agent() | [] |
2024-01-10 | aadibharane/Langchain_use_cases | Agent_Simulation~Simulations_with_Multiple_Agents~auth_speaker_selection.py | #Multi-agent authoritarian speaker selection
'''
This notebook showcases how to implement a multi-agent simulation where a privileged agent decides who to speak. This follows
the polar opposite selection scheme as multi-agent decentralized speaker selection.
We show an example of this approach in the context of a fictitious simulation of a news network.
This example will showcase how we can implement agents that
1.think before speaking
2.terminate the conversation
'''
import os
os.environ["OPENAI_API_KEY"] ="your_api_key"
#serpapi_key="your_serpapi_key"
#Import LangChain related modules
from collections import OrderedDict
import functools
import random
import re
import tenacity
from typing import List, Dict, Callable
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate
)
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RegexParser
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
)
#DialogueAgent and DialogueSimulator classes:
#We will use the same DialogueAgent and DialogueSimulator classes defined in our
#other examples Multi-Player Dungeons & Dragons and Decentralized Speaker Selection.
def auth_speaker_selection():
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
# increment time
self._step += 1
def step(self) -> tuple[str, str]:
# 1. choose the next speaker
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
# 2. next speaker sends message
message = speaker.send()
# 3. everyone receives message
for receiver in self.agents:
receiver.receive(speaker.name, message)
# 4. increment time
self._step += 1
return speaker.name, message
#DirectorDialogueAgent class:
'''
The DirectorDialogueAgent is a privileged agent that chooses which of the other agents to speak next.
This agent is responsible for
1.steering the conversation by choosing which agent speaks when
2.terminating the conversation.
'''
##In order to implement such an agent, we need to solve several problems.
'''
First, to steer the conversation, the DirectorDialogueAgent needs to (1) reflect on what has been said, (2) choose the next agent,
and (3) prompt the next agent to speak, all in a single message. While it may be possible to prompt an LLM to perform all three
steps in the same call, this requires writing custom code to parse the outputted message to extract which next agent is chosen to speak.
This is less reliable the LLM can express how it chooses the next agent in different ways.
'''
'''
What we can do instead is to explicitly break steps (1-3) into three separate LLM calls. First we will ask the DirectorDialogueAgent
to reflect on the conversation so far and generate a response. Then we prompt the DirectorDialogueAgent to output the index of the
next agent, which is easily parseable. Lastly, we pass the name of the selected next agent back to DirectorDialogueAgent to ask
it prompt the next agent to speak.
Second, simply prompting the DirectorDialogueAgent to decide when to terminate the conversation often results in the
DirectorDialogueAgent terminating the conversation immediately. To fix this problem, we randomly sample a Bernoulli
variable to decide whether the conversation should terminate. Depending on the value of this variable, we will inject a
custom prompt to tell the DirectorDialogueAgent to either continue the conversation or terminate the conversation.
'''
class IntegerOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return 'Your response should be an integer delimited by angled brackets, like this: <int>.'
class DirectorDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
model: ChatOpenAI,
speakers: List[DialogueAgent],
stopping_probability: float,
) -> None:
super().__init__(name, system_message, model)
self.speakers = speakers
self.next_speaker = ''
self.stop = False
self.stopping_probability = stopping_probability
self.termination_clause = 'Finish the conversation by stating a concluding message and thanking everyone.'
self.continuation_clause = 'Do not end the conversation. Keep the conversation going by adding your own ideas.'
# 1. have a prompt for generating a response to the previous speaker
self.response_prompt_template = PromptTemplate(
input_variables=["message_history", "termination_clause"],
template=f"""{{message_history}}
Follow up with an insightful comment.
{{termination_clause}}
{self.prefix}
""")
# 2. have a prompt for deciding who to speak next
self.choice_parser = IntegerOutputParser(
regex=r'<(\d+)>',
output_keys=['choice'],
default_output_key='choice')
self.choose_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "speaker_names"],
template=f"""{{message_history}}
Given the above conversation, select the next speaker by choosing index next to their name:
{{speaker_names}}
{self.choice_parser.get_format_instructions()}
Do nothing else.
""")
# 3. have a prompt for prompting the next speaker to speak
self.prompt_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "next_speaker"],
template=f"""{{message_history}}
The next speaker is {{next_speaker}}.
Prompt the next speaker to speak with an insightful question.
{self.prefix}
""")
def _generate_response(self):
# if self.stop = True, then we will inject the prompt with a termination clause
sample = random.uniform(0,1)
self.stop = sample < self.stopping_probability
print(f'\tStop? {self.stop}\n')
response_prompt = self.response_prompt_template.format(
message_history='\n'.join(self.message_history),
termination_clause=self.termination_clause if self.stop else ''
)
self.response = self.model(
[
self.system_message,
HumanMessage(content=response_prompt),
]
).content
return self.response
@tenacity.retry(stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."),
retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted
def _choose_next_speaker(self) -> str:
speaker_names = '\n'.join([f'{idx}: {name}' for idx, name in enumerate(self.speakers)])
choice_prompt = self.choose_next_speaker_prompt_template.format(
message_history='\n'.join(self.message_history + [self.prefix] + [self.response]),
speaker_names=speaker_names
)
choice_string = self.model(
[
self.system_message,
HumanMessage(content=choice_prompt),
]
).content
choice = int(self.choice_parser.parse(choice_string)['choice'])
return choice
def select_next_speaker(self):
return self.chosen_speaker_id
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
# 1. generate and save response to the previous speaker
self.response = self._generate_response()
if self.stop:
message = self.response
else:
# 2. decide who to speak next
self.chosen_speaker_id = self._choose_next_speaker()
self.next_speaker = self.speakers[self.chosen_speaker_id]
print(f'\tNext speaker: {self.next_speaker}\n')
# 3. prompt the next speaker to speak
next_prompt = self.prompt_next_speaker_prompt_template.format(
message_history="\n".join(self.message_history + [self.prefix] + [self.response]),
next_speaker=self.next_speaker
)
message = self.model(
[
self.system_message,
HumanMessage(content=next_prompt),
]
).content
message = ' '.join([self.response, message])
return message
#Define participants and topic
topic = "The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze"
director_name = "Jon Stewart"
agent_summaries = OrderedDict({
"Jon Stewart": ("Host of the Daily Show", "New York"),
"Samantha Bee": ("Hollywood Correspondent", "Los Angeles"),
"Aasif Mandvi": ("CIA Correspondent", "Washington D.C."),
"Ronny Chieng": ("Average American Correspondent", "Cleveland, Ohio"),
})
word_limit = 50
#Generate system messages
agent_summary_string = '\n- '.join([''] + [f'{name}: {role}, located in {location}' for name, (role, location) in agent_summaries.items()])
conversation_description = f"""This is a Daily Show episode discussing the following topic: {topic}.
The episode features {agent_summary_string}."""
agent_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each person.")
def generate_agent_description(agent_name, agent_role, agent_location):
agent_specifier_prompt = [
agent_descriptor_system_message,
HumanMessage(content=
f"""{conversation_description}
Please reply with a creative description of {agent_name}, who is a {agent_role} in {agent_location}, that emphasizes their particular role and location.
Speak directly to {agent_name} in {word_limit} words or less.
Do not add anything else."""
)
]
agent_description = ChatOpenAI(temperature=1.0)(agent_specifier_prompt).content
return agent_description
def generate_agent_header(agent_name, agent_role, agent_location, agent_description):
return f"""{conversation_description}
Your name is {agent_name}, your role is {agent_role}, and you are located in {agent_location}.
Your description is as follows: {agent_description}
You are discussing the topic: {topic}.
Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.
"""
def generate_agent_system_message(agent_name, agent_header):
return SystemMessage(content=(
f"""{agent_header}
You will speak in the style of {agent_name}, and exaggerate your personality.
Do not say the same things over and over again.
Speak in the first person from the perspective of {agent_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {agent_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
))
agent_descriptions = [generate_agent_description(name, role, location) for name, (role, location) in agent_summaries.items()]
agent_headers = [generate_agent_header(name, role, location, description) for (name, (role, location)), description in zip(agent_summaries.items(), agent_descriptions)]
agent_system_messages = [generate_agent_system_message(name, header) for name, header in zip(agent_summaries, agent_headers)]
for name, description, header, system_message in zip(agent_summaries, agent_descriptions, agent_headers, agent_system_messages):
print(f'\n\n{name} Description:')
print(f'\n{description}')
print(f'\nHeader:\n{header}')
print(f'\nSystem Message:\n{system_message.content}')
#Use an LLM to create an elaborate on debate topic
topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(content=
f"""{conversation_description}
Please elaborate on the topic.
Frame the topic as a single question to be answered.
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Do not add anything else."""
)
]
specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content
print(f"Original topic:\n{topic}\n")
print(f"Detailed topic:\n{specified_topic}\n")
#Define the speaker selection function
'''
Lastly we will define a speaker selection function select_next_speaker that takes each agent’s bid and
selects the agent with the highest bid (with ties broken randomly).
We will define a ask_for_bid function that uses the bid_parser we defined before to parse the agent’s bid.
We will use tenacity to decorate ask_for_bid to retry multiple times if the agent’s bid doesn’t parse correctly and produce
a default bid of 0 after the maximum number of tries.
'''
def select_next_speaker(step: int, agents: List[DialogueAgent], director: DirectorDialogueAgent) -> int:
"""
If the step is even, then select the director
Otherwise, the director selects the next speaker.
"""
# the director speaks on odd steps
if step % 2 == 1:
idx = 0
else:
# here the director chooses the next speaker
idx = director.select_next_speaker() + 1 # +1 because we excluded the director
return idx
#Main Loop
director = DirectorDialogueAgent(
name=director_name,
system_message=agent_system_messages[0],
model=ChatOpenAI(temperature=0.2),
speakers=[name for name in agent_summaries if name != director_name],
stopping_probability=0.2
)
agents = [director]
for name, system_message in zip(list(agent_summaries.keys())[1:], agent_system_messages[1:]):
agents.append(DialogueAgent(
name=name,
system_message=system_message,
model=ChatOpenAI(temperature=0.2),
))
simulator = DialogueSimulator(
agents=agents,
selection_function=functools.partial(select_next_speaker, director=director)
)
simulator.reset()
simulator.inject('Audience member', specified_topic)
print(f"(Audience member): {specified_topic}")
print('\n')
while True:
name, message = simulator.step()
print(f"({name}): {message}")
print('\n')
if director.stop:
break
auth_speaker_selection() | [
"\n",
"PLACEHOLDER\n You will speak in the style of PLACEHOLDER, and exaggerate your personality.\n Do not say the same things over and over again.\n Speak in the first person from the perspective of PLACEHOLDER\n For describing your own body movements, wrap your description in '*'.\n Do not change roles!\n Do not speak from the perspective of anyone else.\n Speak only from the perspective of PLACEHOLDER.\n Stop speaking the moment you finish speaking from your perspective.\n Never forget to keep your response to PLACEHOLDER words!\n Do not add anything else.\n ",
"You can add detail to the description of each person.",
"You can make a task more specific.",
"PLACEHOLDER\n Please reply with a creative description of PLACEHOLDER, who is a PLACEHOLDER in PLACEHOLDER, that emphasizes their particular role and location.\n Speak directly to PLACEHOLDER in PLACEHOLDER words or less.\n Do not add anything else.",
"PLACEHOLDER\n \n Please elaborate on the topic. \n Frame the topic as a single question to be answered.\n Be creative and imaginative.\n Please reply with the specified topic in PLACEHOLDER words or less. \n Do not add anything else."
] |
2024-01-10 | aadibharane/Langchain_use_cases | Chatbots~voice_assistant.py | # #Voice Assistant
# '''
# This chain creates a clone of ChatGPT with a few modifications to make it a voice assistant. It uses the pyttsx3 and speech_recognition
# libraries to convert text to speech and speech to text respectively. The prompt template is also changed to make it more suitable for
# voice assistant use.
# '''
# from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
# from langchain.memory import ConversationBufferWindowMemory
# import os
# os.environ["OPENAI_API_KEY"] ="OPENAI_API_KEY"
# #serpapi_key="serpapi_key"
# template = """Assistant is a large language model trained by OpenAI.
# Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
# Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
# Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
# Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
# {history}
# Human: {human_input}
# Assistant:"""
# prompt = PromptTemplate(
# input_variables=["history", "human_input"],
# template=template
# )
# chatgpt_chain = LLMChain(
# llm=OpenAI(temperature=0),
# prompt=prompt,
# verbose=True,
# memory=ConversationBufferWindowMemory(k=2),
# )
# import speech_recognition as sr
# import pyttsx3
# engine = pyttsx3.init()
# def listen():
# r = sr.Recognizer()
# with sr.Microphone() as source:
# print('Calibrating...')
# r.adjust_for_ambient_noise(source, duration=5)
# # optional parameters to adjust microphone sensitivity
# # r.energy_threshold = 200
# # r.pause_threshold=0.5
# print('Okay, go!')
# while(1):
# text = ''
# print('listening now...')
# try:
# audio = r.listen(source, timeout=5, phrase_time_limit=30)
# print('Recognizing...')
# # whisper model options are found here: https://github.com/openai/whisper#available-models-and-languages
# # other speech recognition models are also available.
# text = r.recognize_whisper(audio, model='medium.en', show_dict=True, )['text']
# except Exception as e:
# unrecognized_speech_text = f'Sorry, I didn\'t catch that. Exception was: {e}s'
# text = unrecognized_speech_text
# print(text)
# response_text = chatgpt_chain.predict(human_input=text)
# print(response_text)
# engine.say(response_text)
# engine.runAndWait()
# listen()
# #print(res)
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
import os
os.environ["OPENAI_API_KEY"] ="OPENAI_API_KEY"
#serpapi_key="serpapi_key"
template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
{history}
Human: {human_input}
Assistant:"""
prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=2),
)
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
def listen():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Calibrating...")
r.adjust_for_ambient_noise(source, duration=5)
# optional parameters to adjust microphone sensitivity
# r.energy_threshold = 200
# r.pause_threshold=0.5
print("Okay, go!")
while 1:
text = ""
print("listening now...")
try:
audio = r.listen(source, timeout=5, phrase_time_limit=30)
print("Recognizing...")
# whisper model options are found here: https://github.com/openai/whisper#available-models-and-languages
# other speech recognition models are also available.
text = r.recognize_whisper(
audio,
model="medium.en",
show_dict=True,
)["text"]
except Exception as e:
unrecognized_speech_text = (
f"Sorry, I didn't catch that. Exception was: {e}s"
)
text = unrecognized_speech_text
print(text)
response_text = chatgpt_chain.predict(human_input=text)
print(response_text)
engine.say(response_text)
engine.runAndWait()
listen() | [
"human_input",
"Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\nAssistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.\n\n{history}\nHuman: {human_input}\nAssistant:"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.