date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | PTG-Kitware/angel_system | ros~angel_system_nodes~angel_system_nodes~audio~intent~gpt_intent_detector.py | from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import openai
import os
import rclpy
from angel_system_nodes.audio.intent.base_intent_detector import (
BaseIntentDetector,
INTENT_LABELS,
)
from angel_utils import make_default_main
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
# The following are few shot examples when prompting GPT.
FEW_SHOT_EXAMPLES = [
{"utterance": "Go back to the previous step!", "label": "prev_step."},
{"utterance": "Next step, please.", "label": "next_step"},
{"utterance": "How should I wrap this tourniquet?", "label": "inquiry"},
{"utterance": "The sky is blue", "label": "other"},
]
class GptIntentDetector(BaseIntentDetector):
def __init__(self):
super().__init__()
self.log = self.get_logger()
# This node additionally includes fields for interacting with OpenAI
# via LangChain.
if not os.getenv("OPENAI_API_KEY"):
self.log.info("OPENAI_API_KEY environment variable is unset!")
else:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_ORG_ID"):
self.log.info("OPENAI_ORG_ID environment variable is unset!")
else:
self.openai_org_id = os.getenv("OPENAI_ORG_ID")
if not bool(self.openai_api_key and self.openai_org_id):
raise ValueError("Please configure OpenAI API Keys.")
self.chain = self._configure_langchain()
def _configure_langchain(self):
def _labels_list_parenthetical_str(labels):
concat_labels = ", ".join(labels)
return f"({concat_labels})"
def _labels_list_str(labels):
return ", ".join(labels[:-1]) + f" or {labels[-1]}"
all_intents_parenthetical = _labels_list_parenthetical_str(INTENT_LABELS)
all_intents = _labels_list_str(INTENT_LABELS)
# Define the few shot template.
template = (
f"Utterance: {{utterance}}\nIntent {all_intents_parenthetical}: {{label}}"
)
example_prompt = PromptTemplate(
input_variables=["utterance", "label"], template=template
)
prompt_instructions = f"Classify each utterance as {all_intents}.\n"
inference_sample = (
f"Utterance: {{utterance}}\nIntent {all_intents_parenthetical}:"
)
few_shot_prompt = FewShotPromptTemplate(
examples=FEW_SHOT_EXAMPLES,
example_prompt=example_prompt,
prefix=prompt_instructions,
suffix=inference_sample,
input_variables=["utterance"],
example_separator="\n",
)
# Please refer to https://github.com/hwchase17/langchain/blob/master/langchain/llms/openai.py
openai_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=self.openai_api_key,
temperature=0.0,
# Only 2 tokens needed for classification (tokens are delimited by use of '_', i.e.
# 'next_step' counts as 2 tokens).
max_tokens=2,
)
return LLMChain(llm=openai_llm, prompt=few_shot_prompt)
def detect_intents(self, msg):
"""
Detects the user intent via langchain execution of GPT.
"""
return self.chain.run(utterance=msg), 0.5
main = make_default_main(GptIntentDetector)
if __name__ == "__main__":
main()
| [
"Utterance: {utterance}\nIntent PLACEHOLDER: {label}",
"utterance",
"\n",
"Classify each utterance as PLACEHOLDER.\n"
] |
2024-01-10 | PTG-Kitware/angel_system | ros~angel_system_nodes~angel_system_nodes~audio~emotion~gpt_emotion_detector.py | from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import openai
import os
from angel_system_nodes.audio.emotion.base_emotion_detector import (
BaseEmotionDetector,
LABEL_MAPPINGS,
)
from angel_utils import make_default_main
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
# The following are few shot examples when prompting GPT.
FEW_SHOT_EXAMPLES = [
{
"utterance": "Go back to the previous step you dumb machine!",
"label": "negative.",
},
{"utterance": "Next step, please.", "label": "neutral"},
{"utterance": "We're doing great and I'm learning a lot!", "label": "positive"},
]
class GptEmotionDetector(BaseEmotionDetector):
def __init__(self):
super().__init__()
self.log = self.get_logger()
# This node additionally includes fields for interacting with OpenAI
# via LangChain.
if not os.getenv("OPENAI_API_KEY"):
self.log.info("OPENAI_API_KEY environment variable is unset!")
else:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_ORG_ID"):
self.log.info("OPENAI_ORG_ID environment variable is unset!")
else:
self.openai_org_id = os.getenv("OPENAI_ORG_ID")
if not bool(self.openai_api_key and self.openai_org_id):
raise ValueError("Please configure OpenAI API Keys.")
self.chain = self._configure_langchain()
def _configure_langchain(self):
def _labels_list_parenthetical_str(labels):
concat_labels = ", ".join(labels)
return f"({concat_labels})"
def _labels_list_str(labels):
return ", ".join(labels[:-1]) + f" or {labels[-1]}"
all_labels_parenthetical = _labels_list_parenthetical_str(
list(LABEL_MAPPINGS.values())
)
all_labels = _labels_list_str(list(LABEL_MAPPINGS.values()))
# Define the few shot template.
template = (
f"Utterance: {{utterance}}\nEmotion {all_labels_parenthetical}: {{label}}"
)
example_prompt = PromptTemplate(
input_variables=["utterance", "label"], template=template
)
prompt_instructions = f"Classify each utterance as {all_labels}.\n"
inference_sample = (
f"Utterance: {{utterance}}\nIntent {all_labels_parenthetical}:"
)
few_shot_prompt = FewShotPromptTemplate(
examples=FEW_SHOT_EXAMPLES,
example_prompt=example_prompt,
prefix=prompt_instructions,
suffix=inference_sample,
input_variables=["utterance"],
example_separator="\n",
)
openai_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=self.openai_api_key,
temperature=0.0,
max_tokens=1,
)
return LLMChain(llm=openai_llm, prompt=few_shot_prompt)
def get_inference(self, msg):
"""
Detects the user intent via langchain execution of GPT.
"""
return (self.chain.run(utterance=msg.utterance_text), 0.5)
main = make_default_main(GptEmotionDetector)
if __name__ == "__main__":
main()
| [
"Utterance: {utterance}\nEmotion PLACEHOLDER: {label}",
"utterance",
"\n",
"Classify each utterance as PLACEHOLDER.\n"
] |
2024-01-10 | PTG-Kitware/angel_system | ros~angel_system_nodes~angel_system_nodes~audio~question_answerer.py | import json
import openai
import os
import queue
from rclpy.node import Node
import requests
from termcolor import colored
import threading
from angel_msgs.msg import InterpretedAudioUserEmotion, SystemTextResponse
from angel_utils import declare_and_get_parameters
from angel_utils import make_default_main
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
IN_EMOTION_TOPIC = "user_emotion_topic"
OUT_QA_TOPIC = "system_text_response_topic"
FEW_SHOT_PROMPT = "few_shot_prompt_file"
class QuestionAnswerer(Node):
def __init__(self):
super().__init__(self.__class__.__name__)
self.log = self.get_logger()
param_values = declare_and_get_parameters(
self,
[
(IN_EMOTION_TOPIC,),
(OUT_QA_TOPIC,),
(FEW_SHOT_PROMPT,),
],
)
self._in_emotion_topic = param_values[IN_EMOTION_TOPIC]
self._out_qa_topic = param_values[OUT_QA_TOPIC]
self.prompt_file = param_values[FEW_SHOT_PROMPT]
self.question_queue = queue.Queue()
self.handler_thread = threading.Thread(target=self.process_question_queue)
self.handler_thread.start()
with open(self.prompt_file, "r") as file:
self.prompt = file.read()
self.log.info(f"Initialized few-shot prompt to:\n\n {self.prompt}\n\n")
self.is_openai_ready = True
if not os.getenv("OPENAI_API_KEY"):
self.log.info("OPENAI_API_KEY environment variable is unset!")
self.is_openai_ready = False
else:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_ORG_ID"):
self.log.info("OPENAI_ORG_ID environment variable is unset!")
self.is_openai_ready = False
else:
self.openai_org_id = os.getenv("OPENAI_ORG_ID")
# Handle subscription/publication topics.
self.subscription = self.create_subscription(
InterpretedAudioUserEmotion,
self._in_emotion_topic,
self.question_answer_callback,
1,
)
self._qa_publisher = self.create_publisher(
SystemTextResponse, self._out_qa_topic, 1
)
def get_response(self, user_utterance: str, user_emotion: str):
"""
Generate a response to the utterance, enriched with the addition of
the user's detected emotion. Inference calls can be added and revised
here.
"""
return_msg = ""
try:
if self.is_openai_ready:
return_msg = colored(
self.prompt_gpt(user_utterance) + "\n", "light_green"
)
except RuntimeError as err:
self.log.info(err)
colored_apology = colored(
"I'm sorry. I don't know how to answer your statement.", "light_red"
)
colored_emotion = colored(user_emotion, "light_red")
return_msg = (
f"{colored_apology} I understand that you feel {colored_emotion}."
)
return return_msg
def question_answer_callback(self, msg):
"""
This is the main ROS node listener callback loop that will process
all messages received via subscribed topics.
"""
self.log.debug(f"Received message:\n\n{msg.utterance_text}")
if not self._apply_filter(msg):
return
self.question_queue.put(msg)
def process_question_queue(self):
"""
Constant loop to process received questions.
"""
while True:
msg = self.question_queue.get()
emotion = msg.user_emotion
response = self.get_response(msg.utterance_text, emotion)
self.publish_generated_response(msg.utterance_text, response)
def publish_generated_response(self, utterance: str, response: str):
msg = SystemTextResponse()
msg.header.frame_id = "GPT Question Answering"
msg.header.stamp = self.get_clock().now().to_msg()
msg.utterance_text = utterance
msg.response = response
colored_utterance = colored(utterance, "light_blue")
colored_response = colored(response, "light_green")
self.log.info(
f'Responding to utterance:\n>>> "{colored_utterance}"\n>>> with:\n'
+ f'>>> "{colored_response}"'
)
self._qa_publisher.publish(msg)
def prompt_gpt(self, question, model: str = "gpt-3.5-turbo"):
prompt = self.prompt.format(question)
self.log.debug(f"Prompting OpenAI with\n {prompt}\n")
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.7,
"max_tokens": 64,
}
req = requests.post(
"https://api.openai.com/v1/chat/completions",
json=payload,
headers={"Authorization": "Bearer {}".format(self.openai_api_key)},
)
return (
json.loads(req.text)["choices"][0]["message"]["content"]
.split("A:")[-1]
.lstrip()
)
def _apply_filter(self, msg):
"""
Abstracts away any filtering to apply on received messages. Return
none if the message should be filtered out. Else, return the incoming
msg if it can be included.
"""
return msg
main = make_default_main(QuestionAnswerer)
if __name__ == "__main__":
main()
| [
"few_shot_prompt_file"
] |
2024-01-10 | kaixindelele/gpt_academic | crazy_functions~crazy_utils.py | from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
import threading
import os
import logging
def input_clipping(inputs, history, max_token_limit):
import numpy as np
from request_llm.bridge_all import model_info
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs)
if input_token_num < max_token_limit//2:
mode = 'only-history'
max_token_limit = max_token_limit - input_token_num
everything = [inputs] if mode == 'input-and-history' else ['']
everything.extend(history)
n_token = get_token_num('\n'.join(everything))
everything_token = [get_token_num(e) for e in everything]
delta = max(everything_token) // 16 # 截断时的颗粒度
while n_token > max_token_limit:
where = np.argmax(everything_token)
encoded = enc.encode(everything[where], disallowed_special=())
clipped_encoded = encoded[:len(encoded)-delta]
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
everything_token[where] = get_token_num(everything[where])
n_token = get_token_num('\n'.join(everything))
if mode == 'input-and-history':
inputs = everything[0]
else:
pass
history = everything[1:]
return inputs, history
def request_gpt_model_in_new_thread_with_ui_alive(
inputs, inputs_show_user, llm_kwargs,
chatbot, history, sys_prompt, refresh_interval=0.2,
handle_token_exceed=True,
retry_times_at_unknown_error=2,
):
"""
Request GPT model,请求GPT模型同时维持用户界面活跃。
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs (string): List of inputs (输入)
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
history (list): List of chat history (历史,对话历史列表)
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
retry_times_at_unknown_error:失败时的重试次数
输出 Returns:
future: 输出,GPT返回的结果
"""
import time
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
# 用户反馈
chatbot.append([inputs_show_user, ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
executor = ThreadPoolExecutor(max_workers=16)
mutable = ["", time.time(), ""]
# 看门狗耐心
watch_dog_patience = 5
# 请求任务
def _req_gpt(inputs, history, sys_prompt):
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
while True:
# watchdog error
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
result = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs,
history=history, sys_prompt=sys_prompt, observe_window=mutable)
return result
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
return mutable[0] # 放弃
except:
# 【第三种情况】:其他错误:重试几次
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if retry_op > 0:
retry_op -= 1
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
time.sleep(30)
time.sleep(5)
continue # 返回重试
else:
time.sleep(5)
return mutable[0] # 放弃
# 提交任务
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
# “喂狗”(看门狗)
mutable[1] = time.time()
if future.done():
break
chatbot[-1] = [chatbot[-1][0], mutable[0]]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
final_result = future.result()
chatbot[-1] = [chatbot[-1][0], final_result]
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
return final_result
def can_multi_process(llm):
if llm.startswith('gpt-'): return True
if llm.startswith('api2d-'): return True
if llm.startswith('azure-'): return True
return False
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
inputs_array, inputs_show_user_array, llm_kwargs,
chatbot, history_array, sys_prompt_array,
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
handle_token_exceed=True, show_user_at_complete=False,
retry_times_at_unknown_error=2,
):
"""
Request GPT model using multiple threads with UI and high efficiency
请求GPT模型的[多线程]版。
具备以下功能:
实时在UI上反馈远程数据流
使用线程池,可调节线程池的大小避免openai的流量限制错误
处理中途中止的情况
网络等出问题时,会把traceback和已经接收的数据转入输出
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
inputs_array (list): List of inputs (每个子任务的输入)
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
llm_kwargs: llm_kwargs参数
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
retry_times_at_unknown_error:子任务失败时的重试次数
输出 Returns:
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
"""
import time, random
from concurrent.futures import ThreadPoolExecutor
from request_llm.bridge_all import predict_no_ui_long_connection
assert len(inputs_array) == len(history_array)
assert len(inputs_array) == len(sys_prompt_array)
if max_workers == -1: # 读取配置文件
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
except: max_workers = 15
if max_workers <= 0: max_workers = 3
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
if not can_multi_process(llm_kwargs['llm_model']):
max_workers = 1
executor = ThreadPoolExecutor(max_workers=max_workers)
n_frag = len(inputs_array)
# 用户反馈
chatbot.append(["请开始多线程操作。", ""])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
# 跨线程传递
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
# 看门狗耐心
watch_dog_patience = 5
# 子线程任务
def _req_gpt(index, inputs, history, sys_prompt):
gpt_say = ""
retry_op = retry_times_at_unknown_error
exceeded_cnt = 0
mutable[index][2] = "执行中"
while True:
# watchdog error
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience:
raise RuntimeError("检测到程序终止。")
try:
# 【第一种情况】:顺利完成
# time.sleep(10); raise RuntimeError("测试")
gpt_say = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
)
mutable[index][2] = "已成功"
return gpt_say
except ConnectionAbortedError as token_exceeded_error:
# 【第二种情况】:Token溢出,
if handle_token_exceed:
exceeded_cnt += 1
# 【选择处理】 尝试计算比例,尽可能多地保留文本
from toolbox import get_reduce_token_percent
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
MAX_TOKEN = 4096
EXCEED_ALLO = 512 + 512 * exceeded_cnt
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
mutable[index][2] = f"截断重试"
continue # 返回重试
else:
# 【选择放弃】
tb_str = '```\n' + trimmed_format_exc() + '```'
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
mutable[index][2] = "输入过长已放弃"
return gpt_say # 放弃
except:
# 【第三种情况】:其他错误
tb_str = '```\n' + trimmed_format_exc() + '```'
print(tb_str)
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
if retry_op > 0:
retry_op -= 1
wait = random.randint(5, 20)
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
wait = wait * 3
fail_info = "OpenAI绑定信用卡可解除频率限制 "
else:
fail_info = ""
# 也许等待十几秒后,情况会好转
for i in range(wait):
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
# 开始重试
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
continue # 返回重试
else:
mutable[index][2] = "已失败"
wait = 5
time.sleep(5)
return gpt_say # 放弃
# 异步任务开始
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
cnt = 0
while True:
# yield一次以刷新前端页面
time.sleep(refresh_interval)
cnt += 1
worker_done = [h.done() for h in futures]
# 更好的UI视觉效果
observe_win = []
# 每个线程都要“喂狗”(看门狗)
for thread_index, _ in enumerate(worker_done):
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
replace('\n', '').replace('`', '.').replace(
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
if not done else f'`{mutable[thread_index][2]}`\n\n'
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
# 在前端打印些好玩的东西
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
if all(worker_done):
executor.shutdown()
break
# 异步任务结束
gpt_response_collection = []
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
gpt_response_collection.extend([inputs_show_user, gpt_res])
# 是否在结束时,在界面上显示结果
if show_user_at_complete:
for inputs_show_user, f in zip(inputs_show_user_array, futures):
gpt_res = f.result()
chatbot.append([inputs_show_user, gpt_res])
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
time.sleep(0.5)
return gpt_response_collection
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
def cut(txt_tocut, must_break_at_empty_line): # 递归
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
print(cnt)
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
raise RuntimeError("存在一行极长的文本!")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line))
return result
try:
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
return cut(txt, must_break_at_empty_line=False)
def force_breakdown(txt, limit, get_token_fn):
"""
当无法用标点、空行分割时,我们用最暴力的方法切割
"""
for i in reversed(range(len(txt))):
if get_token_fn(txt[:i]) < limit:
return txt[:i], txt[i:]
return "Tiktoken未知错误", "Tiktoken未知错误"
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
# 递归
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
if get_token_fn(txt_tocut) <= limit:
return [txt_tocut]
else:
lines = txt_tocut.split('\n')
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
estimated_line_cut = int(estimated_line_cut)
cnt = 0
for cnt in reversed(range(estimated_line_cut)):
if must_break_at_empty_line:
if lines[cnt] != "":
continue
prev = "\n".join(lines[:cnt])
post = "\n".join(lines[cnt:])
if get_token_fn(prev) < limit:
break
if cnt == 0:
if break_anyway:
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
else:
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
# print(len(post))
# 列表递归接龙
result = [prev]
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
return result
try:
# 第1次尝试,将双空行(\n\n)作为切分点
return cut(txt, must_break_at_empty_line=True)
except RuntimeError:
try:
# 第2次尝试,将单空行(\n)作为切分点
return cut(txt, must_break_at_empty_line=False)
except RuntimeError:
try:
# 第3次尝试,将英文句号(.)作为切分点
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
return [r.replace('。\n', '.') for r in res]
except RuntimeError as e:
try:
# 第4次尝试,将中文句号(。)作为切分点
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
return [r.replace('。。\n', '。') for r in res]
except RuntimeError as e:
# 第5次尝试,没办法了,随便切一下敷衍吧
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
def read_and_clean_pdf_text(fp):
"""
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
**输入参数说明**
- `fp`:需要读取和清理文本的pdf文件路径
**输出参数说明**
- `meta_txt`:清理后的文本内容字符串
- `page_one_meta`:第一页清理后的文本内容列表
**函数功能**
读取pdf文件并清理其中的文本内容,清理规则包括:
- 提取所有块元的文本信息,并合并为一个字符串
- 去除短块(字符数小于100)并替换为回车符
- 清理多余的空行
- 合并小写字母开头的段落块并替换为空格
- 清除重复的换行
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
"""
import fitz, copy
import re
import numpy as np
from colorful import print亮黄, print亮绿
fc = 0 # Index 0 文本
fs = 1 # Index 1 字体
fb = 2 # Index 2 框框
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
def primary_ffsize(l):
"""
提取文本块主字体
"""
fsize_statiscs = {}
for wtf in l['spans']:
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get)
def ffsize_same(a,b):
"""
提取字体大小是否近似相等
"""
return abs((a-b)/max(a,b)) < 0.02
with fitz.open(fp) as doc:
meta_txt = []
meta_font = []
meta_line = []
meta_span = []
############################## <第 1 步,搜集初始信息> ##################################
for index, page in enumerate(doc):
# file_content += page.get_text()
text_areas = page.get_text("dict") # 获取页面上的文本信息
for t in text_areas['blocks']:
if 'lines' in t:
pf = 998
for l in t['lines']:
txt_line = "".join([wtf['text'] for wtf in l['spans']])
if len(txt_line) == 0: continue
pf = primary_ffsize(l)
meta_line.append([txt_line, pf, l['bbox'], l])
for wtf in l['spans']: # for l in t['lines']:
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
# meta_line.append(["NEW_BLOCK", pf])
# 块元提取 for each word segment with in line for each line cross-line words for each block
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
if index == 0:
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
############################## <第 2 步,获取正文主字体> ##################################
try:
fsize_statiscs = {}
for span in meta_span:
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
fsize_statiscs[span[1]] += span[2]
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
if REMOVE_FOOT_NOTE:
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
except:
raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。')
############################## <第 3 步,切分和重新整合> ##################################
mega_sec = []
sec = []
for index, line in enumerate(meta_line):
if index == 0:
sec.append(line[fc])
continue
if REMOVE_FOOT_NOTE:
if meta_line[index][fs] <= give_up_fize_threshold:
continue
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
# 尝试识别段落
if meta_line[index][fc].endswith('.') and\
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
sec[-1] += line[fc]
sec[-1] += "\n\n"
else:
sec[-1] += " "
sec[-1] += line[fc]
else:
if (index+1 < len(meta_line)) and \
meta_line[index][fs] > main_fsize:
# 单行 + 字体大
mega_sec.append(copy.deepcopy(sec))
sec = []
sec.append("# " + line[fc])
else:
# 尝试识别section
if meta_line[index-1][fs] > meta_line[index][fs]:
sec.append("\n" + line[fc])
else:
sec.append(line[fc])
mega_sec.append(copy.deepcopy(sec))
finals = []
for ms in mega_sec:
final = " ".join(ms)
final = final.replace('- ', ' ')
finals.append(final)
meta_txt = finals
############################## <第 4 步,乱七八糟的后处理> ##################################
def 把字符太少的块清除为回车(meta_txt):
for index, block_txt in enumerate(meta_txt):
if len(block_txt) < 100:
meta_txt[index] = '\n'
return meta_txt
meta_txt = 把字符太少的块清除为回车(meta_txt)
def 清理多余的空行(meta_txt):
for index in reversed(range(1, len(meta_txt))):
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
meta_txt.pop(index)
return meta_txt
meta_txt = 清理多余的空行(meta_txt)
def 合并小写开头的段落块(meta_txt):
def starts_with_lowercase_word(s):
pattern = r"^[a-z]+"
match = re.match(pattern, s)
if match:
return True
else:
return False
for _ in range(100):
for index, block_txt in enumerate(meta_txt):
if starts_with_lowercase_word(block_txt):
if meta_txt[index-1] != '\n':
meta_txt[index-1] += ' '
else:
meta_txt[index-1] = ''
meta_txt[index-1] += meta_txt[index]
meta_txt[index] = '\n'
return meta_txt
meta_txt = 合并小写开头的段落块(meta_txt)
meta_txt = 清理多余的空行(meta_txt)
meta_txt = '\n'.join(meta_txt)
# 清除重复的换行
for _ in range(5):
meta_txt = meta_txt.replace('\n\n', '\n')
# 换行 -> 双换行
meta_txt = meta_txt.replace('\n', '\n\n')
############################## <第 5 步,展示分割效果> ##################################
# for f in finals:
# print亮黄(f)
# print亮绿('***************************')
return meta_txt, page_one_meta
def get_files_from_everything(txt, type): # type='.md'
"""
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
下面是对每个参数和返回值的说明:
参数
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
- type: 字符串,表示要搜索的文件类型。默认是.md。
返回值
- success: 布尔值,表示函数是否成功执行。
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
该函数详细注释已添加,请确认是否满足您的需要。
"""
import glob, os
success = True
if txt.startswith('http'):
# 网络的远程文件
import requests
from toolbox import get_conf
from toolbox import get_log_folder, gen_time_str
proxies, = get_conf('proxies')
try:
r = requests.get(txt, proxies=proxies)
except:
raise ConnectionRefusedError(f"无法下载资源{txt},请检查。")
path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type)
with open(path, 'wb+') as f: f.write(r.content)
project_folder = get_log_folder(plugin_name='web_download')
file_manifest = [path]
elif txt.endswith(type):
# 直接给定文件
file_manifest = [txt]
project_folder = os.path.dirname(txt)
elif os.path.exists(txt):
# 本地路径,递归搜索
project_folder = txt
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
if len(file_manifest) == 0:
success = False
else:
project_folder = None
file_manifest = []
success = False
return success, file_manifest, project_folder
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class knowledge_archive_interface():
def __init__(self) -> None:
self.threadLock = threading.Lock()
self.current_id = ""
self.kai_path = None
self.qa_handle = None
self.text2vec_large_chinese = None
def get_chinese_text2vec(self):
if self.text2vec_large_chinese is None:
# < -------------------预热文本向量化模组--------------- >
from toolbox import ProxyNetworkActivate
print('Checking Text2vec ...')
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
return self.text2vec_large_chinese
def feed_archive(self, file_manifest, id="default"):
self.threadLock.acquire()
# import uuid
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
def get_current_archive_id(self):
return self.current_id
def get_loaded_file(self):
return self.qa_handle.get_loaded_file()
def answer_with_archive_by_id(self, txt, id):
self.threadLock.acquire()
if not self.current_id == id:
self.current_id = id
from zh_langchain import construct_vector_store
self.qa_handle, self.kai_path = construct_vector_store(
vs_id=self.current_id,
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)
self.threadLock.release()
return resp, prompt
@Singleton
class nougat_interface():
def __init__(self):
self.threadLock = threading.Lock()
def nougat_with_timeout(self, command, cwd, timeout=3600):
import subprocess
logging.info(f'正在执行命令 {command}')
process = subprocess.Popen(command, shell=True, cwd=cwd)
try:
stdout, stderr = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
print("Process timed out!")
return False
return True
def NOUGAT_parse_pdf(self, fp, chatbot, history):
from toolbox import update_ui_lastest_msg
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
chatbot=chatbot, history=history, delay=0)
self.threadLock.acquire()
import glob, threading, os
from toolbox import get_log_folder, gen_time_str
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
os.makedirs(dst)
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
chatbot=chatbot, history=history, delay=0)
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
res = glob.glob(os.path.join(dst,'*.mmd'))
if len(res) == 0:
self.threadLock.release()
raise RuntimeError("Nougat解析论文失败。")
self.threadLock.release()
return res[0]
def try_install_deps(deps, reload_m=[]):
import subprocess, sys, importlib
for dep in deps:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
import site
importlib.reload(site)
for m in reload_m:
importlib.reload(__import__(m))
HTML_CSS = """
.row {
display: flex;
flex-wrap: wrap;
}
.column {
flex: 1;
padding: 10px;
}
.table-header {
font-weight: bold;
border-bottom: 1px solid black;
}
.table-row {
border-bottom: 1px solid lightgray;
}
.table-cell {
padding: 5px;
}
"""
TABLE_CSS = """
<div class="row table-row">
<div class="column table-cell">REPLACE_A</div>
<div class="column table-cell">REPLACE_B</div>
</div>
"""
class construct_html():
def __init__(self) -> None:
self.css = HTML_CSS
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
def add_row(self, a, b):
tmp = TABLE_CSS
from toolbox import markdown_convertion
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
self.html_string += tmp
def save_file(self, file_name):
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
f.write(self.html_string.encode('utf-8', 'ignore').decode())
return os.path.join(get_log_folder(), file_name)
def get_plugin_arg(plugin_kwargs, key, default):
# 如果参数是空的
if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
# 正常情况
return plugin_kwargs.get(key, default)
| [] |
2024-01-10 | TheoremOne/video-captioning | app2.py | import streamlit as st
import av
import numpy as np
import torch
from transformers import AutoImageProcessor, AutoTokenizer, VisionEncoderDecoderModel
from pytube import YouTube
import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# Check if CUDA is available and set the device accordingly
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load pretrained processor, tokenizer, and model
image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = VisionEncoderDecoderModel.from_pretrained(
"Neleac/timesformer-gpt2-video-captioning"
).to(device)
def main():
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.title("Video Captioning with TimesFormer-GPT2")
# Option to choose between uploading a video file or pasting a YouTube link
option = st.radio(
"Choose an option:", ("Upload a video file", "Paste a YouTube link")
)
if option == "Upload a video file":
video_file = st.file_uploader("Upload a video file", type=["mp4"])
if video_file is not None:
st.video(video_file)
frames, seg_len = extract_frames(video_file)
if frames:
generate_captions(frames, seg_len)
elif option == "Paste a YouTube link":
youtube_link = st.text_input("Paste a YouTube link")
if youtube_link:
st.write("Downloading video from YouTube...")
video_file_path = download_youtube_video(youtube_link)
if video_file_path:
st.video(video_file_path)
frames, seg_len = extract_frames(video_file_path)
if frames:
generate_captions(frames, seg_len)
# Add a function to download a YouTube video
def download_youtube_video(youtube_link):
try:
yt = YouTube(youtube_link)
stream = yt.streams.filter(file_extension="mp4", progressive=True).first()
video_file_path = stream.download()
return video_file_path
except Exception as e:
st.error(f"Error downloading YouTube video: {e}")
return None
# Add a function to extract frames from a video
def extract_frames(video_file):
container = av.open(video_file)
seg_len = container.streams.video[0].frames
clip_len = model.config.encoder.num_frames
indices = set(
np.linspace(0, seg_len, num=clip_len, endpoint=False).astype(np.int64)
)
frames = []
container.seek(0)
for i, frame in enumerate(container.decode(video=0)):
if i in indices:
frames.append(frame.to_ndarray(format="rgb24"))
return frames, seg_len
# Add a function to generate captions
def generate_captions(frames, seg_len):
st.write("Generating captions...")
gen_kwargs = {
"min_length": 10,
"max_length": 20,
"num_beams": 8,
}
pixel_values = image_processor(frames, return_tensors="pt").pixel_values.to(device)
tokens = model.generate(pixel_values, **gen_kwargs)
caption = tokenizer.batch_decode(tokens, skip_special_tokens=True)[0]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are given a caption line from a video summarizer model. Expand it into a few more lines. Don't get too creative to avoid writing something that is not in the video"},
{"role": "user", "content": f"Caption: {caption}"},
],
)
st.write("Generated Caption:")
if not completion:
st.write(caption)
else:
st.write(completion.choices[0].message["content"])
if __name__ == "__main__":
main()
| [
"Caption: PLACEHOLDER",
"You are given a caption line from a video summarizer model. Expand it into a few more lines. Don't get too creative to avoid writing something that is not in the video"
] |
2024-01-10 | gem5bootcamp/gem5-bootcamp-env | materials~using-gem5~02-stdlib~complete~x86-full-system-complete.py | from gem5.utils.requires import requires
from gem5.components.boards.x86_board import X86Board
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.coherence_protocol import CoherenceProtocol
from gem5.isas import ISA
from gem5.components.processors.cpu_types import CPUTypes
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
from gem5.simulate.exit_event import ExitEvent
# This runs a check to ensure the gem5 binary is compiled to X86 and supports
# the MESI Two Level coherence protocol.
requires(
isa_required=ISA.X86, coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL
)
# Here we setup a MESI Two Level Cache Hierarchy.
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="32KiB",
l1d_assoc=8,
l1i_size="32KiB",
l1i_assoc=8,
l2_size="256KiB",
l2_assoc=16,
num_l2_banks=1,
)
# Setup the system memory.
# Note, by default DDR3_1600 defaults to a size of 8GiB. However, a current
# limitation with the X86 board is it can only accept memory systems up to 3GiB.
# As such, we must fix the size.
memory = SingleChannelDDR3_1600("2GiB")
# Here we setup the processor. This is a special switchable processor in which
# a starting core type and a switch core type must be specified. Once a
# configuration is instantiated a user may call `processor.switch()` to switch
# from the starting core types to the switch core types. In this simulation
# we start with TIMING cores to simulate the OS boot, then switch to the O3
# cores for the command we wish to run after boot.
processor = SimpleSwitchableProcessor(
starting_core_type=CPUTypes.TIMING, switch_core_type=CPUTypes.O3, num_cores=2
)
# Here we setup the board. The X86Board allows for Full-System X86 simulations.
board = X86Board(
clk_freq="3GHz", processor=processor, memory=memory, cache_hierarchy=cache_hierarchy
)
# This is the command to run after the system has booted. The first `m5 exit`
# will stop the simulation so we can switch the CPU cores from TIMING to O3d
# and continue the simulation to run the echo command, sleep for a second,
# then, again, call `m5 exit` to terminate the simulation. After simulation
# has ended you may inspect `m5out/system.pc.com_1.device` to see the echo
# output.
command = (
"m5 exit;" + "echo 'This is running on O3 CPU cores.';" + "sleep 1;" + "m5 exit;"
)
# Here we set the Full System workload.
# The `set_workload` function for the X86Board takes a kernel, a disk image,
# and, optionally, a the contents of the "readfile". In the case of the
# "x86-ubuntu-18.04-img", a file to be executed as a script after booting the
# system.
board.set_kernel_disk_workload(
kernel=Resource("x86-linux-kernel-5.4.49"),
disk_image=Resource("x86-ubuntu-18.04-img"),
readfile_contents=command,
)
simulator = Simulator(
board=board,
on_exit_event={
# Here we want override the default behavior for the first m5 exit
# exit event. Instead of exiting the simulator, we just want to
# switch the processor. The 2nd 'm5 exit' after will revert to using
# default behavior where the simulator run will exit.
ExitEvent.EXIT: (func() for func in [processor.switch])
},
)
simulator.run()
| [] |
2024-01-10 | gem5bootcamp/gem5-bootcamp-env | materials~using-gem5~08-fullsystem~x86-npb-benchmarks.py | # Copyright (c) 2021 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Script to run NAS parallel benchmarks with gem5. The script expects the
benchmark program to run. The input is in the format
<benchmark_prog>.<class>.x .The system is fixed with 2 CPU cores, MESI
Two Level system cache and 3 GB DDR4 memory. It uses the x86 board.
This script will count the total number of instructions executed
in the ROI. It also tracks how much wallclock and simulated time.
Usage:
------
```
scons build/X86/gem5.opt
./build/X86/gem5.opt \
configs/example/gem5_library/x86-npb-benchmarks.py \
--benchmark <benchmark_name> \
--size <benchmark_class>
```
"""
import argparse
import time
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.x86_board import X86Board
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_switchable_processor import(
SimpleSwitchableProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource, CustomResource, CustomDiskImageResource
from m5.stats.gem5stats import get_simstat
from m5.util import warn
requires(
isa_required = ISA.X86,
coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL,
kvm_required=True,
)
# Following are the list of benchmark programs for npb.
benchmark_choices = ["bt", "cg", "ep", "ft", "is", "lu", "mg", "sp"]
# We are restricting classes of NPB to A, B and C as the other classes (D and
# F) require main memory size of more than 3 GB. The X86Board is currently
# limited to 3 GB of memory. This limitation is explained later in line 136.
# The resource disk has binaries for class D. However, only `ep` benchmark
# works with class D in the current configuration. More information on the
# memory footprint for NPB is available at https://arxiv.org/abs/2010.13216
size_choices = ["A", "B", "C"]
parser = argparse.ArgumentParser(
description="An example configuration script to run the npb benchmarks."
)
# The only positional argument accepted is the benchmark name in this script.
parser.add_argument(
"--benchmark",
type = str,
required=True,
help = "Input the benchmark program to execute.",
choices = benchmark_choices,
)
parser.add_argument(
"--size",
type = str,
required=True,
help = "Input the class of the program to simulate.",
choices = size_choices,
)
parser.add_argument(
"--ticks",
type = int,
help = "Optionally put the maximum number of ticks to execute during the "\
"ROI. It accepts an integer value."
)
args = parser.parse_args()
# The simulation may fail in the case of `mg` with class C as it uses 3.3 GB
# of memory (more information is availabe at https://arxiv.org/abs/2010.13216).
# We warn the user here.
if args.benchmark == "mg" and args.size == "C":
warn("mg.C uses 3.3 GB of memory. Currently we are simulating 3 GB\
of main memory in the system.")
# The simulation will fail in the case of `ft` with class C. We warn the user
# here.
elif args.benchmark == "ft" and args.size == "C":
warn("There is not enough memory for ft.C. Currently we are\
simulating 3 GB of main memory in the system.")
# Checking for the maximum number of instructions, if provided by the user.
# Setting up all the fixed system parameters here
# Caches: MESI Two Level Cache Hierarchy
from gem5.components.cachehierarchies.ruby.\
mesi_two_level_cache_hierarchy import(
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size = "32kB",
l1d_assoc = 8,
l1i_size="32kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=2,
)
# Memory: Dual Channel DDR4 2400 DRAM device.
# The X86 board only supports 3 GB of main memory.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. This is a special switchable processor in which
# a starting core type and a switch core type must be specified. Once a
# configuration is instantiated a user may call `processor.switch()` to switch
# from the starting core types to the switch core types. In this simulation
# we start with KVM cores to simulate the OS boot, then switch to the Timing
# cores for the command we wish to run after boot.
processor = SimpleSwitchableProcessor(
starting_core_type=CPUTypes.KVM,
switch_core_type=CPUTypes.TIMING,
isa=ISA.X86,
num_cores=2,
)
# Here we setup the board. The X86Board allows for Full-System X86 simulations
board = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the FS workload, i.e., npb benchmark program
# After simulation has ended you may inspect
# `m5out/system.pc.com_1.device` to the stdout, if any.
# After the system boots, we execute the benchmark program and wait till the
# ROI `workbegin` annotation is reached (m5_work_begin()). We start collecting
# the number of committed instructions till ROI ends (marked by `workend`).
# We then finish executing the rest of the benchmark.
# Also, we sleep the system for some time so that the output is printed
# properly.
command="/home/gem5/NPB3.3-OMP/bin/{}.{}.x;".format(args.benchmark,args.size)\
+ "sleep 5;" \
+ "m5 exit;"
board.set_kernel_disk_workload(
# The x86 linux kernel will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# npb benchamarks was tested with kernel version 4.19.83
# kernel=Resource(
# "x86-linux-kernel-4.19.83",
# ),
kernel=CustomResource(
"PATH_TO_YOUR_KERNEL",
),
# The x86-npb image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# disk_image=Resource(
# "x86-npb",
# ),
disk_image=CustomResource(
"PATH_TO_YOUR_DISK_IMG",
),
# disk_image=CustomDiskImageResource(
# "PATH_TO_YOUR_DISK_IMG",
# ),
readfile_contents=command,
)
# We need this for long running processes.
m5.disableAllListeners()
root = Root(full_system = True, system = board)
# sim_quantum must be set when KVM cores are used.
root.sim_quantum = int(1e9)
m5.instantiate()
# We maintain the wall clock time.
globalStart = time.time()
print("Running the simulation")
print("Using KVM cpu")
# We start the simulation.
exit_event = m5.simulate()
# The first exit_event ends with a `workbegin` cause. This means that the
# system started successfully and the execution on the program started.
if exit_event.getCause() == "workbegin":
print("Done booting Linux")
print("Resetting stats at the start of ROI!")
m5.stats.reset()
start_tick = m5.curTick()
# We have completed up to this step using KVM cpu. Now we switch to timing
# cpu for detailed simulation.
processor.switch()
else:
# `workbegin` call was never encountered.
print("Unexpected termination of simulation before ROI was reached!")
print(
"Exiting @ tick {} because {}.".format(
m5.curTick(),
exit_event.getCause()
)
)
exit(-1)
# The next exit_event is to simulate the ROI. It should be exited with a cause
# marked by `workend`.
# Next, we need to check if the user passed a value for --ticks. If yes,
# then we limit out execution to this number of ticks during the ROI.
# Otherwise, we simulate until the ROI ends.
if args.ticks:
exit_event = m5.simulate(args.ticks)
else:
exit_event = m5.simulate()
# Reached the end of ROI.
# We dump the stats here.
# We exepect that ROI ends with `workend` or `simulate() limit reached`.
# Otherwise the simulation ended unexpectedly.
if exit_event.getCause() == "workend":
print("Dump stats at the end of the ROI!")
m5.stats.dump()
end_tick = m5.curTick()
elif exit_event.getCause() == "simulate() limit reached" and \
args.ticks is not None:
print("Dump stats at the end of {} ticks in the ROI".format(args.ticks))
m5.stats.dump()
end_tick = m5.curTick()
else:
print("Unexpected termination of simulation while ROI was being executed!")
print(
"Exiting @ tick {} because {}.".format(
m5.curTick(),
exit_event.getCause()
)
)
exit(-1)
# We need to note that the benchmark is not executed completely till this
# point, but, the ROI has. We collect the essential statistics here before
# resuming the simulation again.
# We get simInsts using get_simstat and output it in the final
# print statement.
gem5stats = get_simstat(root)
# We get the number of committed instructions from the timing
# cores. We then sum and print them at the end.
roi_insts = float(\
gem5stats.to_json()\
["system"]["processor"]["cores2"]["core"]["exec_context.thread_0"]\
["numInsts"]["value"]
) + float(\
gem5stats.to_json()\
["system"]["processor"]["cores3"]["core"]["exec_context.thread_0"]\
["numInsts"]["value"]\
)
# Simulation is over at this point. We acknowledge that all the simulation
# events were successful.
print("All simulation events were successful.")
# We print the final simulation statistics.
print("Done with the simulation")
print()
print("Performance statistics:")
print("Simulated time in ROI: %.2fs" % ((end_tick-start_tick)/1e12))
print("Instructions executed in ROI: %d" % ((roi_insts)))
print("Ran a total of", m5.curTick()/1e12, "simulated seconds")
print("Total wallclock time: %.2fs, %.2f min" % \
(time.time()-globalStart, (time.time()-globalStart)/60))
| [] |
2024-01-10 | gem5bootcamp/gem5-bootcamp-env | materials~developing-gem5-models~10-ruby-network~mi_example_cache_network.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .caches.mi_example.l1_cache import L1Cache
from .caches.mi_example.dma_controller import DMAController
from .caches.mi_example.directory import Directory
from .topologies.garnet_mesh import GarnetMesh
from .topologies.garnet_pt2pt import GarnetPt2Pt
from .topologies.simple_pt2pt import SimplePt2Pt
from .abstract_ruby_cache_hierarchy import AbstractRubyCacheHierarchy
from ..abstract_cache_hierarchy import AbstractCacheHierarchy
from ...boards.abstract_board import AbstractBoard
from ....coherence_protocol import CoherenceProtocol
from ....isas import ISA
from ....utils.override import overrides
from ....utils.requires import requires
from m5.objects import (
RubySystem,
RubySequencer,
DMASequencer,
RubyPortProxy,
)
class MIExampleCacheNetwork(AbstractRubyCacheHierarchy):
"""
The MI_Example cache hierarchy creates a Ruby cache for each code in a
simple point-to-point topology.
"""
def __init__(
self,
size: str,
assoc: str,
network = str,
):
"""
:param size: The size of each cache in the heirarchy.
:param assoc: The associativity of each cache.
"""
super().__init__()
self._size = size
self._assoc = assoc
self._network = network
@overrides(AbstractCacheHierarchy)
def incorporate_cache(self, board: AbstractBoard) -> None:
requires(coherence_protocol_required=CoherenceProtocol.MI_EXAMPLE)
self.ruby_system = RubySystem()
# Ruby's global network.
if self._network == 'SimplePt2Pt':
self.ruby_system.network = SimplePt2Pt(self.ruby_system)
elif self._network == 'GarnetPt2Pt':
self.ruby_system.network = GarnetPt2Pt(self.ruby_system)
elif self._network == 'GarnetMesh':
self.ruby_system.network = GarnetMesh(self.ruby_system)
else:
raise ValueError(f"network {self._network} is not implemented.")
# MI Example users 5 virtual networks.
self.ruby_system.number_of_virtual_networks = 5
self.ruby_system.network.number_of_virtual_networks = 5
# There is a single global list of all of the controllers to make it
# easier to connect everything to the global network. This can be
# customized depending on the topology/network requirements.
# Create one controller for each L1 cache (and the cache mem obj.)
# Create a single directory controller (Really the memory cntrl).
self._controllers = []
for i, core in enumerate(board.get_processor().get_cores()):
cache = L1Cache(
size=self._size,
assoc=self._assoc,
network=self.ruby_system.network,
core=core,
cache_line_size=board.get_cache_line_size(),
target_isa=board.get_processor().get_isa(),
clk_domain=board.get_clock_domain(),
)
cache.sequencer = RubySequencer(
version=i,
dcache=cache.cacheMemory,
clk_domain=cache.clk_domain,
)
if board.has_io_bus():
cache.sequencer.connectIOPorts(board.get_io_bus())
cache.ruby_system = self.ruby_system
core.connect_icache(cache.sequencer.in_ports)
core.connect_dcache(cache.sequencer.in_ports)
core.connect_walker_ports(
cache.sequencer.in_ports, cache.sequencer.in_ports
)
# Connect the interrupt ports
if board.get_processor().get_isa() == ISA.X86:
int_req_port = cache.sequencer.interrupt_out_port
int_resp_port = cache.sequencer.in_ports
core.connect_interrupt(int_req_port, int_resp_port)
else:
core.connect_interrupt()
cache.ruby_system = self.ruby_system
self._controllers.append(cache)
# Create the directory controllers
self._directory_controllers = []
for range, port in board.get_memory().get_mem_ports():
dir = Directory(
self.ruby_system.network,
board.get_cache_line_size(),
range,
port,
)
dir.ruby_system = self.ruby_system
self._directory_controllers.append(dir)
# Create the DMA Controllers, if required.
self._dma_controllers = []
if board.has_dma_ports():
dma_ports = board.get_dma_ports()
for i, port in enumerate(dma_ports):
ctrl = DMAController(
self.ruby_system.network, board.get_cache_line_size()
)
ctrl.dma_sequencer = DMASequencer(version=i, in_ports=port)
ctrl.ruby_system = self.ruby_system
ctrl.dma_sequencer.ruby_system = self.ruby_system
self._dma_controllers.append(ctrl)
self.ruby_system.num_of_sequencers = len(self._controllers) + len(
self._dma_controllers
)
# Connect the controllers.
self.ruby_system.controllers = self._controllers
self.ruby_system.directory_controllers = self._directory_controllers
if len(self._dma_controllers) != 0:
self.ruby_system.dma_controllers = self._dma_controllers
if (self._network == 'GarnetMesh'):
self.ruby_system.network.connectControllers(
self._controllers
+ self._directory_controllers
+ self._dma_controllers,
len(self._controllers)
)
else:
self.ruby_system.network.connectControllers(
self._controllers
+ self._directory_controllers
+ self._dma_controllers
)
if (self._network == 'SimplePt2Pt'):
self.ruby_system.network.setup_buffers()
# Set up a proxy port for the system_port. Used for load binaries and
# other functional-only things.
self.ruby_system.sys_port_proxy = RubyPortProxy()
board.connect_system_port(self.ruby_system.sys_port_proxy.in_ports)
| [] |
2024-01-10 | gem5bootcamp/gem5-bootcamp-env | materials~using-gem5~02-stdlib~x86-full-system.py | from gem5.utils.requires import requires
from gem5.components.boards.x86_board import X86Board
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.coherence_protocol import CoherenceProtocol
from gem5.isas import ISA
from gem5.components.processors.cpu_types import CPUTypes
from gem5.resources.resource import Resource
from gem5.simulate.simulator import Simulator
from gem5.simulate.exit_event import ExitEvent
| [] |
2024-01-10 | rezeroworld/QnA_chatgpt | streamlit_app.py | import streamlit as st
import openai
from prompt_engineering import answer_query_with_context
import toml
import pandas as pd
with open('config.toml', 'r') as f:
config = toml.load(f)
df = pd.read_csv('qa_embeddings.csv', index_col=0, header=0)
q2embed = dict(zip(df.index, df.loc[:, df.columns != "answers"].to_numpy()))
q2a = dict(zip(df.index, df.loc[:,df.columns == "answers"].to_numpy()))
# App title
st.set_page_config(page_title="❔💬 Dental Aligner Q&A with ChatGPT")
# Hugging Face Credentials
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
st.title('❔💬 DentalGPT by Mahdi')
st.markdown('This chat bot is powered by ChatGPT language model. It uses \
external knowledge to answer questions about dental aligners. \
In this case, prior to the Q&A, a 300 pages knowledge book was \
transformed into embeddings, and used to calculate a similarity \
metric against the presented query. Finally, the most relevent information \
is inserted into the prompt. The prompt is displayed with the bot answer \
to better understand where the answer comes from.')
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Function for generating LLM response
def generate_response(prompt_input, openai_api_key):
openai.api_key = openai_api_key
return answer_query_with_context(prompt_input, embeds=q2embed, answers=q2a, config=config, show_prompt=False)
# User-provided prompt
if prompt := st.chat_input(disabled=not openai_api_key):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_response(prompt, openai_api_key)
st.write(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message) | [
"How may I help you?"
] |
2024-01-10 | microsoft/ToolTalk | src~tooltalk~generation~scenario_generator.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
This script randomly samples API suites from API Bank and prompts GPT4 to generate queries that make use of at least
one API from each sampled suite.
"""
import os
import re
import json
import logging
import argparse
from itertools import combinations
from typing import Optional, List
import openai
from tqdm import tqdm
from tooltalk.apis import ALL_SUITES
from tooltalk.utils.file_utils import chunkify
from tooltalk.utils.openai_utils import openai_completion
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
API_DOC_KEY = "{{API_DOCS}}"
REQUIRED_API_KEY = "{{REQUIRED_API}}"
def extract_scenarios(responses):
extracted_scenarios = list()
scenario_regex = re.compile(r"\s*- Scenario \d: (?P<scenario>.*)\s*")
for response in responses:
full_response = response
raw_scenarios = scenario_regex.split(full_response)
# TODO extract APIs used and scenarios from format
scenarios = list()
for scenario in raw_scenarios:
scenario = scenario.strip()
if scenario == "":
continue
scenarios.append(scenario)
extracted_scenarios.append(scenarios)
return extracted_scenarios
def get_arg_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--prompt", type=str, help="Path to prompt file")
parser.add_argument("--model", type=str, default="gpt-4", help="OpenAI model to use")
parser.add_argument("--api_counts", type=int, nargs="+", default=[3], help="Number of suites to use")
parser.add_argument("--api_key", type=str, help="Optional API key for endpoint")
parser.add_argument("--max_tokens", type=int, nargs="+", default=[25000], help="Maximum number of tokens to generate")
parser.add_argument("--temperature", type=float, default=0, help="Temperature for sampling")
parser.add_argument("--beams", type=int, default=1, help="Number of beams to use for generation")
parser.add_argument("--batch_size", type=int, default=10, help="Batch size for generation")
parser.add_argument("--output_dir", type=str, help="Path to output directory")
parser.add_argument("--reset", action="store_true", help="Reset output directory if it exists")
return parser
# TODO use every API, then sample remainder in actual dataset generation run
# TODO special consideration for Account API?
def main(flags: Optional[List[str]] = None) -> None:
parser = get_arg_parser()
args = parser.parse_args(flags)
# load template
with open(args.prompt, 'r', encoding='utf-8') as reader:
prompt_template = reader.read()
if API_DOC_KEY not in prompt_template:
raise ValueError(f"Prompt template must contain key {API_DOC_KEY}")
os.makedirs(args.output_dir, exist_ok=True)
# TODO make async
for k in tqdm(args.api_counts):
if k > len(ALL_SUITES):
logger.warning(f"Skipping {k} API suites because there are only {len(ALL_SUITES)}")
continue
output_dicts = list()
for combination in combinations(ALL_SUITES, k):
formatted_apis = [
api.to_docstring()
for suite in combination
for api in suite.apis
]
api_doc_prompt = prompt_template.replace(API_DOC_KEY, '\n\n'.join(formatted_apis))
for suite in combination:
for api in suite.apis:
prompt = api_doc_prompt.replace(REQUIRED_API_KEY, api.__name__)
output_dicts.append({
"prompt": prompt,
"apis": {
"suites": [suite.name for suite in combination],
"required_api": api.__name__
}
})
for batch in tqdm(chunkify(output_dicts, args.batch_size)):
prompts = [output_dict["prompt"] for output_dict in batch]
for max_tokens in args.max_tokens:
try:
response_texts = openai_completion(
model=args.model,
prompt=prompts,
max_tokens=max_tokens,
temperature=args.temperature,
stop="```"
)
except ValueError as error:
logger.error(f"Failed output using {max_tokens}: {error}")
logger.info(response_texts)
continue
else:
scenarios = extract_scenarios(response_texts)
logger.info(f"Number of scenarios generated {list(map(len, scenarios))}")
for output_dict, response, scenario in zip(batch, response_texts, scenarios):
output_dict["response"] = response
output_dict["scenarios"] = scenario
break
else:
logger.warning("Reached max token limit skipping example")
for output_dict in batch:
if "response" not in output_dict:
continue
# Write to file
output_name = "-".join(output_dict["apis"]["suites"]) + "-" + output_dict["apis"]["required_api"] + ".json"
output_path = os.path.join(args.output_dir, output_name)
with open(output_path, 'w', encoding='utf-8') as writer:
json.dump(output_dict, writer, indent=4)
if __name__ == '__main__':
main()
| [
"\n\n"
] |
2024-01-10 | microsoft/ToolTalk | src~tooltalk~evaluation~evaluate_openai.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Evaluate Tool LLM on API-Talk dataset.
"""
import os
import json
import logging
import argparse
from enum import Enum
from typing import List
from collections import Counter
import openai
from tqdm import tqdm
from tooltalk.apis import APIS_BY_NAME, ALL_APIS
from tooltalk.evaluation.tool_executor import ToolExecutor, BaseAPIPredictor
from tooltalk.utils.file_utils import get_names_and_paths
from tooltalk.utils.openai_utils import openai_chat_completion
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class OpenAIPredictor(BaseAPIPredictor):
system_prompt = "You are a helpful assistant. Here is some user data:" \
"\nlocation: {location}" \
"\ntimestamp: {timestamp}" \
"\nusername (if logged in): {username}"
def __init__(self, model, apis_used, disable_docs=False):
self.model = model
self.api_docs = [api.to_openai_doc(disable_docs) for api in apis_used]
def predict(self, metadata: dict, conversation_history: dict) -> dict:
system_prompt = self.system_prompt.format(
location=metadata["location"],
timestamp=metadata["timestamp"],
username=metadata.get("username")
)
openai_history = [{
"role": "system",
"content": system_prompt
}]
for turn in conversation_history:
if turn["role"] == "user" or turn["role"] == "assistant":
openai_history.append({
"role": turn["role"],
"content": turn["text"]
})
elif turn["role"] == "api":
openai_history.append({
"role": "assistant",
"content": None,
"function_call": {
"name": turn["request"]["api_name"],
"arguments": json.dumps(turn["request"]["parameters"])
}
})
response_content = {
"response": turn["response"],
"exception": turn["exception"]
}
openai_history.append({
"role": "function",
"name": turn["request"]["api_name"],
"content": json.dumps(response_content)
})
openai_response = openai_chat_completion(
model=self.model,
messages=openai_history,
functions=self.api_docs,
)
logger.debug(f"OpenAI full response: {openai_response}")
openai_message = openai_response["choices"][0]["message"]
metadata = {
"openai_request": {
"model": self.model,
"messages": openai_history,
"functions": self.api_docs,
},
"openai_response": openai_response
}
if "function_call" in openai_message:
function_call = openai_message["function_call"]
api_name = function_call["name"]
try:
parameters = json.loads(function_call["arguments"])
except json.decoder.JSONDecodeError:
# check termination reason
logger.info(f"Failed to decode arguments for {api_name}: {function_call['arguments']}")
parameters = None
return {
"role": "api",
"request": {
"api_name": api_name,
"parameters": parameters
},
# store metadata about call
"metadata": metadata,
}
else:
return {
"role": "assistant",
"text": openai_message["content"],
# store metadata about call
"metadata": metadata,
}
class EvalModes(str, Enum):
PREDICT = "predict"
EVALUATE = "evaluate"
VALIDATE = "validate"
def get_arg_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--dataset", type=str, help="Path to dataset for models to evaluate")
parser.add_argument("--database", type=str, help="Path to database used in evaluation")
parser.add_argument("--api_key", type=str, default="openai.key", help="Path to OpenAI API key")
parser.add_argument("--api_mode", type=str, choices=["exact", "suite", "all"], default="all",
help="API mode to use for evaluation, determines which api docs to include")
parser.add_argument("--model", type=str, default="gpt-4", help="Model to use for generation")
parser.add_argument("--output_dir", type=str, help="Path to output model predictions")
parser.add_argument("--reset", action="store_true", help="reset evaluation writing over any cached results")
parser.add_argument("--disable_documentation", action="store_true",
help="disabled documentation sent to GPT-4 replacing with empty strings")
parser.add_argument("--modes", choices=list(EvalModes), type=str, nargs='+', default=list(EvalModes),
help="Evaluation modes")
return parser
def main(flags: List[str] = None):
parser = get_arg_parser()
args = parser.parse_args(flags)
# get api key
openai_key = os.environ.get("OPENAI_KEY", None)
if openai_key is None:
with open(args.api_key, "r") as f:
openai_key = f.read().strip()
openai.api_key = openai_key
total_metrics = Counter()
os.makedirs(args.output_dir, exist_ok=True)
tool_executor = ToolExecutor(init_database_dir=args.database)
for file_name, file_path in tqdm(get_names_and_paths(args.dataset)):
output_file_path = os.path.join(args.output_dir, file_name)
if os.path.exists(output_file_path) and not args.reset:
logger.info(f"Skipping {file_name} because it already exists")
with open(output_file_path, 'r', encoding='utf-8') as reader:
conversation_with_metrics = json.load(reader)
total_metrics += conversation_with_metrics["metrics"]
total_metrics["num_conversations"] += 1
continue
logger.info(f"Running {file_name}")
with open(file_path, 'r', encoding='utf-8') as reader:
conversation = json.load(reader)
if EvalModes.PREDICT in args.modes:
logger.info("Running prediction...")
if args.api_mode == "exact":
apis_used = [APIS_BY_NAME[api_name] for api_name in conversation["apis_used"]]
elif args.api_mode == "suite":
apis_used = [api for suite in conversation["suites_used"] for api in suite.apis]
elif args.api_mode == "all":
apis_used = ALL_APIS
else:
raise ValueError(f"Invalid api mode: {args.api_mode}")
predictor_func = OpenAIPredictor(
model=args.model,
apis_used=apis_used,
disable_docs=args.disable_documentation
)
conversation = tool_executor.run_conversation(conversation, predictor_func)
if EvalModes.EVALUATE in args.modes:
logger.info("Running evaluation...")
conversation = tool_executor.evaluate_predictions(conversation)
logger.info(f"Conversation {file_name} pass: {conversation['metrics']['success']}")
total_metrics += conversation["metrics"]
total_metrics["num_conversations"] += 1
if EvalModes.VALIDATE in args.modes:
logger.info("Validating evaluation...")
for turn in conversation["conversation"]:
if "predictions" not in turn:
continue
for prediction in turn["predictions"]:
if prediction["role"] == "api":
assert "match" in prediction
assert "bad_action" in prediction
with open(output_file_path, 'w', encoding='utf-8') as writer:
json.dump(conversation, writer, indent=4)
logger.info("Finished processing conversations")
if EvalModes.EVALUATE in args.modes:
metrics = {
"num_conversations": total_metrics["num_conversations"],
"precision": total_metrics["matches"] / total_metrics["predictions"],
"recall": total_metrics["matches"] / total_metrics["ground_truths"],
"action_precision": total_metrics["valid_actions"] / total_metrics["actions"],
"bad_action_rate": total_metrics["bad_actions"] / total_metrics["actions"],
"success_rate": total_metrics["success"] / total_metrics["num_conversations"]
}
logger.info(f"Metrics: {json.dumps(metrics, indent=4)}")
if __name__ == "__main__":
main()
| [
"username",
"location",
"You are a helpful assistant. Here is some user data:\nlocation: {location}\ntimestamp: {timestamp}\nusername (if logged in): {username}",
"None"
] |
2024-01-10 | linpz/chat-gpt-1 | 11.py | # -*- coding: utf-8 -*-
"""Untitled11.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17cs-SFLUH6DfgArLl4hzemQIttLIYht6
"""
!pip install langchain
!pip install sentence_transformers chromadb tiktoken openai
!pip install pypdf
pip install Pillow
import csv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
# 設定 OpenAI API 金鑰
import os
os.environ["OPENAI_API_KEY"] ="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" # 請更換為您的 API 金鑰
class Document:
def __init__(self, content, metadata=None):
self.page_content = content
self.metadata = metadata or {}
class CSVLoader:
def __init__(self, file_path):
self.file_path = file_path
def load(self):
with open(self.file_path, 'r', encoding='utf-8') as file:
reader = csv.reader(file)
return [Document(", ".join(row)) for row in reader]
# 指定檔案路徑
file_path = "/content/test - 工作表1.csv" # 請更換為您的 CSV 文件路徑
# 建立加載器並加載文本
loader = CSVLoader(file_path)
texts = loader.load()
# 嵌入和向量存儲
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings)
# 建立對話鏈
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0.2), vectorstore.as_retriever())
chat_history = []
while True:
query = input('\nQuestion: ')
if not query:
break
result = qa({"question": query + '(用繁體中文回答)', "chat_history": chat_history})
print('A:', result['answer'])
chat_history.append((query, result['answer'])) | [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~utils~make_chain.py | from langchain.vectorstores.base import VectorStore
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from api.utils.get_openai_llm import get_llm
from langchain import PromptTemplate, LLMChain
from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrievalChain
from api.utils.get_prompts import get_qa_prompt_by_mode
load_dotenv()
def get_qa_chain(vector_store: VectorStore, mode, initial_prompt: str) -> RetrievalQA:
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vector_store.as_retriever(),
chain_type_kwargs={"prompt": prompt},
return_source_documents=True
)
return qa_chain
def getRetrievalQAWithSourcesChain(vector_store: VectorStore, mode, initial_prompt: str):
llm = get_llm()
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=vector_store.as_retriever())
return chain
def getConversationRetrievalChain(vector_store: VectorStore, mode, initial_prompt: str):
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
chain = ConversationalRetrievalChain.from_llm(
llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
verbose=True,
combine_docs_chain_kwargs={"prompt": prompt}
)
return chain | [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~utils~get_embeddings.py | from langchain.embeddings.openai import OpenAIEmbeddings
from api.enums import EmbeddingProvider
import os
from dotenv import load_dotenv
from langchain.embeddings.base import Embeddings
load_dotenv()
# https://github.com/easonlai/azure_openai_langchain_sample/blob/main/chat_with_pdf.ipynb
import os
def get_embedding_provider():
"""Gets the chosen embedding provider from environment variables."""
return os.environ.get("EMBEDDING_PROVIDER")
def get_azure_embedding():
"""Gets embeddings using the Azure embedding provider."""
deployment = os.environ.get("AZURE_OPENAI_EMBEDDING_MODEL_NAME")
openai_api_key = os.environ.get("AZURE_OPENAI_API_KEY")
client = os.environ.get("AZURE_OPENAI_API_TYPE")
openai_api_base = os.environ['AZURE_OPENAI_API_BASE']
openai_api_version = os.environ['AZURE_OPENAI_API_VERSION']
return OpenAIEmbeddings(
openai_api_key=openai_api_key,
deployment=deployment,
client=client,
chunk_size=8,
openai_api_base=openai_api_base,
openai_api_version=openai_api_version
)
def get_openai_embedding():
"""Gets embeddings using the OpenAI embedding provider."""
openai_api_key = os.environ.get("OPENAI_API_KEY")
return OpenAIEmbeddings(openai_api_key=openai_api_key, chunk_size=1)
def choose_embedding_provider():
"""Chooses and returns the appropriate embedding provider instance."""
embedding_provider = get_embedding_provider()
if embedding_provider == EmbeddingProvider.azure.value:
return get_azure_embedding()
elif embedding_provider == EmbeddingProvider.OPENAI.value:
return get_openai_embedding()
else:
available_providers = ", ".join([service.value for service in EmbeddingProvider])
raise ValueError(
f"Embedding service '{embedding_provider}' is not currently available. "
f"Available services: {available_providers}"
)
# Main function to get embeddings
def get_embeddings() -> Embeddings:
"""Gets embeddings using the chosen embedding provider."""
return choose_embedding_provider() | [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~data_sources~pdf_handler.py | # views.py
import json
from django.views.decorators.csrf import csrf_exempt
from langchain.text_splitter import RecursiveCharacterTextSplitter
from api.utils import get_embeddings
from langchain.document_loaders.directory import DirectoryLoader
from api.utils import init_vector_store
from langchain.document_loaders import PyPDFium2Loader
import os
from web.utils.delete_foler import delete_folder
from api.interfaces import StoreOptions
@csrf_exempt
def pdf_handler(shared_folder: str, namespace: str):
try:
directory_path = os.path.join("website_data_sources", shared_folder)
directory_loader = DirectoryLoader(path=directory_path, glob="**/*.pdf", loader_cls=PyPDFium2Loader, use_multithreading=True)
raw_docs = directory_loader.load_and_split()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200,length_function=len)
docs = text_splitter.split_documents(raw_docs)
embeddings = get_embeddings()
init_vector_store(docs, embeddings, StoreOptions(namespace))
delete_folder(folder_path=directory_path)
print('All is done, folder deleted')
except Exception as e:
import traceback
print(e)
traceback.print_exc()
| [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~utils~init_vector_store.py | from langchain.docstore.document import Document
from langchain.vectorstores.qdrant import Qdrant
from api.enums import StoreType
from langchain.embeddings.openai import OpenAIEmbeddings
from api.interfaces import StoreOptions
from api.configs import PINECONE_TEXT_KEY, VECTOR_STORE_INDEX_NAME
import pinecone
from langchain.vectorstores.pinecone import Pinecone
from dotenv import load_dotenv
import os
import threading
init_lock = threading.Lock()
# Load environment variables from .env file
load_dotenv()
initialized = False
def initialize_pinecone():
global initialized
# Only initialize Pinecone if the store type is Pinecone and the initialization lock is not acquired
with init_lock:
if not initialized:
# Initialize Pinecone
pinecone.init(
api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
environment=os.getenv("PINECONE_ENV"), # next to api key in console
)
initialized = True
def init_vector_store(docs: list[Document], embeddings: OpenAIEmbeddings, options: StoreOptions) -> None:
store_type = StoreType[os.environ['STORE']]
if store_type == StoreType.PINECONE:
initialize_pinecone()
# Use the Pinecone vector store
# docs, embeddings, VECTOR_STORE_INDEX_NAME, options.namespace, PINECONE_TEXT_KEY
Pinecone.from_documents(documents=docs, embedding=embeddings, index_name=VECTOR_STORE_INDEX_NAME, namespace=options.namespace)
elif store_type == StoreType.QDRANT:
print("called qdrant.from_documents")
Qdrant.from_documents(docs, embeddings, collection_name=options.namespace, url=os.environ['QDRANT_URL'])
else:
valid_stores = ", ".join(StoreType._member_names())
raise ValueError(f"Invalid STORE environment variable value: {os.environ['STORE']}. Valid values are: {valid_stores}") | [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~data_sources~website_handler.py | import os
from django.http import JsonResponse
from langchain.document_loaders.directory import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader
from api.utils import init_vector_store
from api.utils.get_embeddings import get_embeddings
from api.interfaces import StoreOptions
# from import delete_folder
from web.models.website_data_sources import WebsiteDataSource
from web.enums.website_data_source_status_enum import WebsiteDataSourceStatusType
def website_handler(shared_folder, namespace):
website_data_source = WebsiteDataSource.objects.get(id=shared_folder)
try:
directory_path = os.path.join("website_data_sources", shared_folder)
directory_loader = DirectoryLoader(directory_path, glob="**/*.txt", loader_cls=TextLoader, use_multithreading=True)
raw_docs = directory_loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, length_function=len)
docs = text_splitter.split_documents(raw_docs)
print("docs -->", docs);
embeddings = get_embeddings()
init_vector_store(docs, embeddings, StoreOptions(namespace=namespace))
website_data_source.crawling_status = WebsiteDataSourceStatusType.COMPLETED.value
website_data_source.save()
# delete_folder(folder_path=directory_path)
print('All is done, folder deleted...')
except Exception as e:
website_data_source.crawling_status = WebsiteDataSourceStatusType.FAILED.value
website_data_source.save()
import traceback
print(e)
traceback.print_exc()
| [] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~views~views_chat.py | from django.http import JsonResponse
from django.views.decorators.http import require_POST
from langchain import QAWithSourcesChain
from api.utils import get_vector_store
from api.utils.make_chain import getConversationRetrievalChain, getRetrievalQAWithSourcesChain
import json
from django.views.decorators.csrf import csrf_exempt
from api.interfaces import StoreOptions
from web.models.chat_histories import ChatHistory
from django.shortcuts import get_object_or_404
from web.models.chatbot import Chatbot
from uuid import uuid4
import logging
import traceback
from web.services.chat_history_service import get_chat_history_for_retrieval_chain
import os
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
@csrf_exempt
@require_POST
def chat(request):
try:
body = json.loads(request.body.decode('utf-8'))
question = body.get('question')
namespace = body.get('namespace')
mode = body.get('mode')
initial_prompt = body.get('initial_prompt')
token = body.get('token')
session_id = body.get('session_id')
bot = get_object_or_404(Chatbot, token=token)
if not question:
return JsonResponse({'error': 'No question in the request'}, status=400)
sanitized_question = question.strip().replace('\n', ' ')
vector_store = get_vector_store(StoreOptions(namespace=namespace))
response_text = get_completion_response(vector_store=vector_store, initial_prompt=initial_prompt,mode=mode, sanitized_question=sanitized_question, session_id=session_id)
ChatHistory.objects.bulk_create([
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=True,
message=sanitized_question,
session_id=session_id
),
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=False,
message=response_text,
session_id=session_id
)
])
return JsonResponse({'text': response_text})
except json.JSONDecodeError:
return JsonResponse({'error': 'Invalid JSON in request body'}, status=400)
except Chatbot.DoesNotExist:
return JsonResponse({'error': 'Chatbot not found'}, status=404)
except Exception as e:
logger.error(str(e))
logger.error(traceback.format_exc())
return JsonResponse({'error': 'An error occurred'}, status=500)
def get_completion_response(vector_store, mode, initial_prompt, sanitized_question, session_id):
chain_type = os.getenv("CHAIN_TYPE", "conversation_retrieval")
chain: QAWithSourcesChain
if chain_type == 'retrieval_qa':
chain = getRetrievalQAWithSourcesChain(vector_store, mode, initial_prompt)
response = chain({"question": sanitized_question}, return_only_outputs=True)
response_text = response['answer']
elif chain_type == 'conversation_retrieval':
chain = getConversationRetrievalChain(vector_store, mode, initial_prompt)
chat_history = get_chat_history_for_retrieval_chain(session_id, limit=40)
response = chain({"question": sanitized_question, "chat_history": chat_history}, return_only_outputs=True)
response_text = response['answer']
return response_text
| [
"initial_prompt"
] |
2024-01-10 | winnerdev2012/OpenChat | dj_backend_server~api~utils~get_openai_llm.py | from langchain.llms import AzureOpenAI, OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Azure OpenAI Language Model client
def get_azure_openai_llm():
"""Returns AzureOpenAI instance configured from environment variables"""
openai_api_type = os.environ['OPENAI_API_TYPE']
openai_api_key = os.environ['AZURE_OPENAI_API_KEY']
openai_deployment_name = os.environ['AZURE_OPENAI_DEPLOYMENT_NAME']
openai_model_name = os.environ['AZURE_OPENAI_COMPLETION_MODEL']
openai_api_version = os.environ['AZURE_OPENAI_API_VERSION']
openai_api_base=os.environ['AZURE_OPENAI_API_BASE']
return AzureOpenAI(
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
deployment_name=openai_deployment_name,
model_name=openai_model_name,
openai_api_type=openai_api_type,
openai_api_version=openai_api_version,
temperature=0,
batch_size=8
)
# OpenAI Language Model client
def get_openai_llm():
"""Returns OpenAI instance configured from environment variables"""
openai_api_key = os.environ['OPENAI_API_KEY']
return OpenAI(
temperature=0,
openai_api_key=openai_api_key
)
# recommend not caching initially, and optimizing only if you observe a clear performance benefit from caching the clients.
# The simplest thing that works is often best to start.
def get_llm():
"""Returns LLM client instance based on OPENAI_API_TYPE"""
clients = {
'azure': get_azure_openai_llm,
'openai': get_openai_llm
}
api_type = os.environ.get('OPENAI_API_TYPE')
if api_type not in clients:
raise ValueError(f"Invalid OPENAI_API_TYPE: {api_type}")
return clients[api_type]() | [] |
2024-01-10 | kkatodus/kokkai_analysis | data_prepping~collect_local_gov_json_for_member_list.py | #%%
import os
import openai
import json
from scrape.general_scraper import GeneralScraper
from selenium.webdriver.common.by import By
from secrets.api_keys import OPENAI_API_KEY
from file_handling.file_read_writer import read_json, write_json, create_dir
from params.paths import ROOT_DIR
RESOURCE_DIR = os.path.join(ROOT_DIR, 'resource')
DATA_DIR = os.path.join(ROOT_DIR, 'data')
LOCAL_DATA_DIR = os.path.join(DATA_DIR, 'data_local_gov')
os.makedirs(LOCAL_DATA_DIR, exist_ok=True)
openai.organization = "org-KwrqfnvZUjabTOAFL3QUAhk2"
openai.api_key = OPENAI_API_KEY
scraping_resource_path = os.path.join(RESOURCE_DIR, 'local_gov_repr_scrape.json')
scraping_resource = read_json(scraping_resource_path)
gs = GeneralScraper(firefox=True)
#%%
def all_reprs_on_one_page():
urls = []
xpath = ''
while True:
url = input('Get all the urls and press 0 when you are done. You can also press x to change the xpath.If you leave the input black, the city will be skipped.')
if url == '0':
break
if url == 'x':
xpath = input('Enter the xpath for the representative list.')
continue
urls.append(url)
return urls, xpath
def all_reprs_on_multiple_pages():
urls_to_main_pages = []
xpath_to_individual_pages = ''
xpath_to_repr_info = ''
while True:
url = input('Get all the urls and press 0 when you are done. \nYou can also type "xind" to change the xpath to the links to individual pages. \n You can also type "xpage" to enter the xpath to repr info. If you leave the list empty the city will be skipped')
if url == '0':
break
if url == 'xind':
xpath_to_individual_pages = input('Enter the xpath for the links to individual pages.')
continue
if url == 'xpage':
xpath_to_repr_info = input('Enter the xpath for the representative info.')
continue
urls_to_main_pages.append(url)
return urls_to_main_pages, xpath_to_individual_pages, xpath_to_repr_info
def get_text_from_local_repr_page(city_name):
city_resource = scraping_resource[city_name]
urls = city_resource['urls']
all_texts = []
for url in urls:
gs.get_url(url)
reprs_component = gs.get_site_components_by(By.XPATH, city_resource['reprs_xpath'])
if len(reprs_component) > 0:
all_texts.append(reprs_component[0].text)
return all_texts
#%%
## code to create the scraping resource file
for idx, city_name in enumerate(scraping_resource.keys()):
do_it_again = False
while True:
print(city_name, f'{idx}/{len(scraping_resource.keys())}')
if 'urls' in scraping_resource[city_name].keys() and not do_it_again:
break
gs.get_url('https://www.google.com/search?q=' + city_name + '議会議員')
if input('They have individual pages for reprs, type "ind", otherwise press enter') == 'ind':
urls_to_main_pages, xpath_to_individual_pages, xpath_to_repr_info = all_reprs_on_multiple_pages()
scraping_resource[city_name]['multiple_pages'] = True
scraping_resource[city_name]['urls'] = urls_to_main_pages
scraping_resource[city_name]['ind_reprs_xpath'] = xpath_to_individual_pages
scraping_resource[city_name]['ind_reprs_info_xpath'] = xpath_to_repr_info
else:
urls, xpath = all_reprs_on_one_page()
scraping_resource[city_name]['urls'] = urls
scraping_resource[city_name]['reprs_xpath'] = xpath
approved = input(f'The following is the final result\n {city_name}\n{scraping_resource[city_name]}\nDo you want to continue?(y/n)')
if approved == 'y' or approved == '':
break
else:
print('Please enter the information again.')
do_it_again = True
continue
write_json(scraping_resource, scraping_resource_path)
| [] |
2024-01-10 | justchillinghere/document-chat-bot | PDF_handlerlangchain.py | import os
from typing import List
from uuid import uuid4
import warnings
import dotenv
import chromadb
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.text_splitter import TokenTextSplitter
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
from langchain.prompts import ChatPromptTemplate
from error_handler import logger
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # TO SURPRESS Tensorflow warnings
warnings.filterwarnings("default")
dotenv.load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
vector_db_path = os.getenv("VECTOR_DB_PATH")
class ChatWithPDF:
def __init__(self, user_tg_id: int,
api_key: str,
message_limit=1200):
self.message_limit = message_limit
self.llm = OpenAI(temperature=0.2, openai_api_key=api_key, max_tokens=1000)
self.embeddings = OpenAIEmbeddings(openai_api_key=api_key)
self.user_id = user_tg_id
def load_file(self, file_path: str, file_name: str):
self.chunks = PyPDFLoader(file_path).load()
# Change filename in metadata from temporary to actual one
for i in range(len(self.chunks)):
self.chunks[i].metadata['source'] = file_name
logger.info(f"Read {file_name} file from {file_path}")
def split_docs(self, chunk_size:int=1000, chunk_overlap:int=100):
token_splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
self.chunks = token_splitter.split_documents(self.chunks)
logger.info(f"Split {len(self.chunks)} chunks")
def create_db_collection(self):
vec_database = Chroma.from_documents(
self.chunks,
self.embeddings,
persist_directory=vector_db_path,
collection_name=f'{self.user_id}_collection',
)
vec_database.persist()
vec_database = None
logger.info(f"The file has been recorded to vec db")
def get_qa_chain(self):
vec_database = Chroma(
embedding_function=self.embeddings,
persist_directory=vector_db_path,
collection_name=f'{self.user_id}_collection',
)
self.retriever = vec_database.as_retriever(
search_type="mmr", search_kwargs={"k": 4}
)
template = """Use this information in order to answer the question.
Context: {context}
Question: {question}
Answer in the language used in question.
Your answer must also be complete and consistent.
"""
QA_PROMPT = PromptTemplate.from_template(template)
self.qa_chain = RetrievalQA.from_chain_type(
self.llm,
retriever=self.retriever,
chain_type_kwargs={"prompt": QA_PROMPT},
verbose=False,
)
logger.info("QA chain created")
def ask_question(self, question_text):
#translator_promt = ChatPromptTemplate.from_strings("What language is the following question? : \n {question}")
reply = self.qa_chain({"query": question_text})
return reply
class Dialog:
def __init__(self, user_id):
# initialize chat
self.chat = ChatWithPDF(user_id, api_key=OPENAI_API_KEY)
def load_document_to_vec_db(self, file_name, file_path):
self.chat.load_file(file_path=file_path, file_name=file_name)
self.chat.split_docs()
self.chat.create_db_collection()
def ask(self, query):
self.chat.get_qa_chain()
reply = self.chat.ask_question(query)
logger.info(f"Raw reply: {reply}")
logger.info("Question answered")
return reply["result"]
| [
"Use this information in order to answer the question. \n\t\t\t\tContext: {context}\n\t\t\t\tQuestion: {question}\n\n\t\t\t\tAnswer in the language used in question.\n\t\t\t\tYour answer must also be complete and consistent.\n\t\t\t "
] |
2024-01-10 | justchillinghere/document-chat-bot | old~add_file_command.py | from telegram.ext import CommandHandler, ContextTypes
from telegram import File, Update, Document
import typing
from langchain.document_loaders import PyPDFLoader
from error_handler import logger
from PDF_handlerlangchain import Dialog
async def add_file(update: Update, context: ContextTypes.DEFAULT_TYPE) -> File | None:
logger.info("Checking file format...")
if (not hasattr(update.message.document, "mime_type")
or update.message.document.mime_type != "application/pdf"):
await update.message.reply_text("Please load PDF")
return
logger.info("Started loading file")
file = await context.bot.get_file(update.message.document.file_id)
await context.bot.send_message(
chat_id=update.effective_chat.id, text="Please wait for the file to be uploaded"
)
Dialog(user_id=update.message.from_user.id).load_document_to_vec_db(
file_name=update.message.document.file_name,
file_path=file.file_path
)
await context.bot.send_message(
chat_id=update.effective_chat.id, text="File has been uploaded successfully!"
) | [] |
2024-01-10 | justchillinghere/document-chat-bot | old~echo_command.py | from telegram import Update
from telegram.ext import ContextTypes
from langchain.document_loaders import OnlinePDFLoader, PyPDFLoader
async def echo(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
# if (not hasattr(update.message.document, "mime_type")
# or update.message.document.mime_type != "application/pdf"):
# await update.message.reply_text("Please load PDF")
# return
# file = await context.bot.get_file(update.message.document.file_id)
# loader = PyPDFLoader(file.file_path)
# pages = loader.load_and_split()
# print(pages[0])
# print(await context.bot.get_file(update.message.document.file_id))
# print(update.message.document)
await update.message.reply_text("No commands to handle the input!")
| [] |
2024-01-10 | EswarDivi/DocuConverse | Talkwithpdf.py | # Import Required Libraries
__import__("pysqlite3")
import sys
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
import os
import streamlit as st
from streamlit_chat import message
from langchain.document_loaders import OnlinePDFLoader
from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.embeddings import CohereEmbeddings
from langchain.prompts import PromptTemplate
from langchain.llms import Cohere
from datetime import datetime
# Setting Up API Tokens
# Create .streamlit Folder in Root Directory
# Create a File secrets.toml
# TOML format
# cohere_apikey="Enter you Key"
# Setting Up Streamlit Page
st.set_page_config(page_title="Chat With PDF", page_icon=":smile:")
# Creating Temp Folder
if not os.path.exists("./tempfolder"):
os.makedirs("./tempfolder")
# tabs
tab1, tab2 = st.tabs(["📈 Chat Here", "🗃 Relevant Chunks"])
tab1.markdown(
"""
<h1 style='text-align: center;'>Chat With PDF</h1>
<h4 style='text-align: center;'>Powered by Cohere</h4>
<p style='text-align: center;'>For uninterrupted usage, visit the <a href='https://huggingface.co/spaces/eswardivi/ChatwithPdf' target='_blank'>HuggingFace Space</a></p>
""",
unsafe_allow_html=True,
)
# Saving Upload file to tempfolder
def save_uploadedfile(uploadedfile):
with open(
os.path.join("tempfolder", uploadedfile.name),
"wb",
) as f:
f.write(uploadedfile.getbuffer())
return st.sidebar.success("Saved File")
# Creating Sidebar for Utilites
with st.sidebar:
st.title("Upload PDF")
st.write("For any Queries, please feel free to contact")
st.write("Email: [[email protected]](mailto:[email protected])")
st.write("GitHub: [github.com/EswarDivi](https://github.com/EswarDivi)")
uploaded_file = st.file_uploader("Choose a file", type=["pdf"])
temp_r = st.slider("Temperature", 0.1, 0.9, 0.45, 0.1)
chunksize = st.slider("Chunk Size for Splitting Document ", 256, 1024, 400, 10)
clear_button = st.button("Clear Conversation", key="clear")
# Initialzing Text Splitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunksize, chunk_overlap=10, separators=[" ", ",", "\n"])
# Intializing Cohere Embdedding
embeddings = CohereEmbeddings(model="large", cohere_api_key=st.secrets["cohere_apikey"])
def PDF_loader(document):
loader = OnlinePDFLoader(document)
documents = loader.load()
prompt_template = """
System Prompt:
Your are an AI chatbot that helps users chat with PDF documents. How may I help you today?
{context}
{question}
"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}
texts = text_splitter.split_documents(documents)
global db
db = Chroma.from_documents(texts, embeddings)
retriever = db.as_retriever()
global qa
qa = RetrievalQA.from_chain_type(
llm=Cohere(
model="command-xlarge-nightly",
temperature=temp_r,
cohere_api_key=st.secrets["cohere_apikey"],
),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs,
)
return "Ready"
if uploaded_file is not None:
save_uploadedfile(uploaded_file)
file_size = os.path.getsize(f"tempfolder/{uploaded_file.name}") / (
1024 * 1024
) # Size in MB
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{current_time}] Uploaded PDF: {file_size} MB")
PDF_loader("tempfolder/" + uploaded_file.name)
tab1.markdown(
"<h3 style='text-align: center;'>Now You Are Chatting With "
+ uploaded_file.name
+ "</h3>",
unsafe_allow_html=True,
)
# Session State
if "chat_history" not in st.session_state:
st.session_state["chat_history"] = []
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
# Generating Response
def generate_response(query):
result = qa({"query": query, "chat_history": st.session_state["chat_history"]})
tab2.markdown(
"<h3 style='text-align: center;'>Relevant Documents Metadata</h3>",
unsafe_allow_html=True,
)
tab2.write(result["source_documents"])
result["result"] = result["result"]
return result["result"]
# Creating Containers
response_container = tab1.container()
container = tab1.container()
with container:
with st.form(key="my_form", clear_on_submit=True):
user_input = st.text_input("You:", key="input")
submit_button = st.form_submit_button(label="Send")
if user_input and submit_button:
if uploaded_file is not None:
output = generate_response(user_input)
print(output)
st.session_state["past"].append(user_input)
st.session_state["generated"].append(output)
st.session_state["chat_history"] = [(user_input, output)]
else:
st.session_state["past"].append(user_input)
st.session_state["generated"].append(
"Please go ahead and upload the PDF in the sidebar, it would be great to have it there."
)
if st.session_state["generated"]:
with response_container:
for i in range(len(st.session_state["generated"])):
message(
st.session_state["past"][i],
is_user=True,
key=str(i) + "_user",
avatar_style="adventurer",
seed=123,
)
message(st.session_state["generated"][i], key=str(i))
# Enabling Clear button
if clear_button:
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["chat_history"] = []
| [
"question",
"context",
" \n System Prompt:\n Your are an AI chatbot that helps users chat with PDF documents. How may I help you today?\n\n {context}\n\n {question}\n "
] |
2024-01-10 | MobileLLM/AutoDroid | query_lmql.py | import lmql
import os
import openai
# openai.api_key = 'sk-dMHkagT7vyUQmldu49cDH3bOkdaU8Ue4dUXjnT93I70KNxMu'
# openai.base_url = 'https://api.openai-proxy.org/v1'
os.environ['OPENAI_API_KEY'] = 'sk-dMHkagT7vyUQmldu49cDH3bOkdaU8Ue4dUXjnT93I70KNxMu'
model=lmql.model("openai/gpt-3.5-turbo-instruct") # OpenAI API model
# model=lmql.model("llama.cpp:<YOUR_WEIGHTS>.gguf") # llama.cpp model
@lmql.query(model=model,decoder='argmax')
def prompt_llm_with_history(task,history,ui_desc,ids):
'''lmql
"""You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.
Task:{task}
Previous UI actions: {history}
Current UI State:{ui_desc}
Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>
- id=[ID] - action=[ACTION] - input text=[INPUT_TEXT]. """ where ACTION in ["tap", "input", "N/A"] and ID in {ids} and len(TOKENS(INPUT_TEXT))<6
return ID,ACTION,INPUT_TEXT
'''
| [] |
2024-01-10 | karan842/RAG-with-Qdrant-and-Mixtral | RAG_pipeline.py | from langchain.vectorstores import Qdrant
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
import qdrant_client
from dotenv import load_dotenv
import os
load_dotenv()
qdrant_uri = os.getenv('QDRANT_URI')
qdrant_api_key = os.getenv('QDRANT_API_KEY')
'''
Building RAG pipeline using QdrantDB and LangChain
'''
# Create a Qdrant Client
client = qdrant_client.QdrantClient(
qdrant_uri,
api_key=qdrant_api_key
)
# Create a collection
vectors_config = qdrant_client.http.models.VectorParams(
size=384,
distance=qdrant_client.http.models.Distance.COSINE
)
client.recreate_collection(
collection_name="my-collection",
vectors_config=vectors_config
)
# Define Embeddings using HF
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
# Load document from data directory in .pdf format
def load_documents():
loader = DirectoryLoader('data/', glob="*.pdf", loader_cls=PyPDFLoader)
documents = loader.load()
return documents
# Split texts
def get_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_documents(text)
return chunks
documents = load_documents()
text_chunks = get_chunks(documents)
qdrant = Qdrant.from_documents(
text_chunks,
embeddings,
qdrant_uri,
qdrant_api_key,
prefer_grpc=True,
collection_name='my-collection',
)
| [] |
2024-01-10 | ceteri/reinforcement_learning | DPPO~dppo_cont_gae_dist_gpu.py | # -*- coding: utf-8 -*-
"""DPPO_cont_GAE_dist_GPU.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_GZ3wt0ydAf9Fx7YsFuuvOQduCN_NlDC
"""
"""
Distributed Proximal Policy Optimization (Distributed PPO or DPPO) continuous
version implementation with distributed Tensorflow and Python’s multiprocessing
package. This implementation uses normalized running rewards with GAE. The code
is tested with Gym’s continuous action space environment, Pendulum-v0 on Colab.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install -q tf-nightly
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from multiprocessing import Process
# The following class is adapted from OpenAI's baseline:
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# This class is used for the normalization of rewards in this program before GAE computation.
class RunningStats(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
class PPO(object):
def __init__(self, scope, sess, env, global_PPO=None):
self.sess = sess
self.env = env
#OPT_A = tf.train.AdamOptimizer(A_LR, beta1=0.99, beta2=0.999, name='OPT_A')
#OPT_C = tf.train.AdamOptimizer(C_LR, beta1=0.99, beta2=0.999, name='OPT_C')
OPT_A = tf.train.AdamOptimizer(A_LR, name='OPT_A')
OPT_C = tf.train.AdamOptimizer(C_LR, name='OPT_C')
with tf.variable_scope(scope): # scope is either global or wid
self.state = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=True)
self.val = tf.layers.dense(h1, 1, name='val', trainable=True)
self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.discounted_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.discounted_r - self.val
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = OPT_C.minimize(self.closs)
with tf.variable_scope('cgrads'):
self.critic_grad_op = tf.gradients(self.closs, self.critic_params)
# actor
self.pi, self.pi_params = self._build_anet(scope, 'pi', self.env, trainable=True)
self.oldpi, self.oldpi_params = self._build_anet(scope, 'oldpi', self.env, trainable=True) # originally trainable=False
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, self.oldpi_params)]
self.act = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.adv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
ratio = self.pi.prob(self.act) / self.oldpi.prob(self.act)
surr = ratio * self.adv
self.aloss = -tf.reduce_mean(tf.minimum(surr, tf.clip_by_value(ratio, 1.-epsilon, 1.+epsilon)*self.adv))
with tf.variable_scope('atrain'):
self.atrain_op = OPT_A.minimize(self.aloss)
with tf.variable_scope('agrads'):
self.pi_grad_op = tf.gradients(self.aloss, self.pi_params)
if scope != net_scope: # not global
with tf.name_scope('params'): # push/pull from local/worker perspective
with tf.name_scope('push_to_global'):
self.push_actor_pi_params = OPT_A.apply_gradients(zip(self.pi_grad_op, global_PPO.pi_params))
self.push_critic_params = OPT_C.apply_gradients(zip(self.critic_grad_op, global_PPO.critic_params))
with tf.name_scope('pull_fr_global'):
self.pull_actor_pi_params = [local_params.assign(global_params) for local_params, global_params in zip(self.pi_params, global_PPO.pi_params)]
self.pull_critic_params = [local_params.assign(global_params) for local_params, global_params in zip(self.critic_params, global_PPO.critic_params)]
def update(self, s, a, r, adv):
self.sess.run(self.update_oldpi_op)
for _ in range(A_EPOCH): # train actor
self.sess.run(self.atrain_op, {self.state: s, self.act: a, self.adv: adv})
# update actor
self.sess.run([self.push_actor_pi_params,
self.pull_actor_pi_params],
{self.state: s, self.act: a, self.adv: adv})
for _ in range(C_EPOCH): # train critic
# update critic
self.sess.run(self.ctrain_op, {self.state: s, self.discounted_r: r})
self.sess.run([self.push_critic_params,
self.pull_critic_params],
{self.state: s, self.discounted_r: r})
def _build_anet(self, scope, name, env, trainable):
with tf.variable_scope(name):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=trainable)
mu = self.env.action_space.high * tf.layers.dense(h1, A_DIM, tf.nn.tanh, name='mu', trainable=trainable)
sigma = tf.layers.dense(h1, A_DIM, tf.nn.softplus, name='sigma', trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/' + name)
return norm_dist, params
def choose_action(self, s):
s = s[None, :]
a = self.sess.run(self.sample_op, {self.state: s})[0]
return np.clip(a, self.env.action_space.low, self.env.action_space.high)
def get_val(self, s):
if s.ndim < 2: s = s[None, :]
return self.sess.run(self.val, {self.state: s})[0, 0]
# This function is adapted from OpenAI's Baseline
# GAE computation
# returns TD lamda return & advantage
def add_vtarg_and_adv(self, R, done, V, v_s_, gamma, lam):
# Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
# last element is only used for last vtarg, but we already zeroed it if last new = 1
done = np.append(done, 0)
V_plus = np.append(V, v_s_)
T = len(R)
adv = gaelam = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-done[t+1]
delta = R[t] + gamma * V_plus[t+1] * nonterminal - V_plus[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
#print("adv=", adv.shape)
#print("V=", V.shape)
#print("V_plus=", V_plus.shape)
tdlamret = np.vstack(adv) + V
#print("tdlamret=", tdlamret.shape)
return tdlamret, adv # tdlamret is critic_target or Qs
class Worker(object):
def __init__(self, wid, GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.g_ppo = GLOBAL_PPO
self.ppo = PPO(wid, sess, self.env, GLOBAL_PPO)
self.running_stats_r = RunningStats()
self.sess = sess
self.GLOBAL_EP = GLOBAL_EP
self.GLOBAL_RUNNING_R = GLOBAL_RUNNING_R
def work(self):
T = 0
t = 0
SESS = self.sess
GLOBAL_EP = self.GLOBAL_EP
GLOBAL_RUNNING_R = self.GLOBAL_RUNNING_R
while SESS.run(GLOBAL_EP) < EP_MAX:
s = self.env.reset()
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
ep_r = 0
for t in range(EP_LEN):
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_done.append(done)
v = self.ppo.get_val(s)
buffer_V.append(v)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
self.running_stats_r.update(np.array(buffer_r))
buffer_r = np.clip( (np.array(buffer_r) - self.running_stats_r.mean) / self.running_stats_r.std, -stats_CLIP, stats_CLIP )
v_s_ = self.ppo.get_val(s_)
tdlamret, adv = self.ppo.add_vtarg_and_adv(np.vstack(buffer_r), np.vstack(buffer_done), np.vstack(buffer_V), v_s_, GAMMA, lamda)
bs, ba, br, b_adv = np.vstack(buffer_s), np.vstack(buffer_a), tdlamret, np.vstack(adv)
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
self.ppo.update(bs, ba, br, b_adv)
SESS.run(GLOBAL_EP.assign_add(1.0))
qe = GLOBAL_RUNNING_R.enqueue(ep_r)
SESS.run(qe)
GAME = 'Pendulum-v0'
env = gym.make(GAME).unwrapped
net_scope = 'global'
EP_MAX = 500 #500 # max number of episodes
EP_LEN = 200 # episode length
GAMMA = 0.9
lamda = 0.95 #0.95
hidden = 50 #100
A_LR = 0.0001 # actor's learning rate
C_LR = 0.0002 # critic's learning rate
BATCH = 32 # minibatch size
A_EPOCH = 10 # number of epoch
C_EPOCH = 10 # number of epoch
S_DIM, A_DIM = 3, 1 # state, action dimension
stats_CLIP = 10 # upper bound of RunningStats
epsilon=0.2
cluster = tf.train.ClusterSpec({
"worker": ["localhost:3331",
"localhost:3332",
"localhost:3333",
"localhost:3334"
],
"ps": ["localhost:3330"]
})
def parameter_server():
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="ps",
task_index=0)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
sess.run(tf.global_variables_initializer())
print("Parameter server: variables initialized")
while True:
time.sleep(1.0)
if sess.run(GLOBAL_RUNNING_R.size()) >= EP_MAX: # GLOBAL_EP starts from 0, hence +1 to max_global_episodes
time.sleep(10.0)
GLOBAL_RUNNING_R_list = []
ep_r_prev = 0.0
for i in range(sess.run(GLOBAL_RUNNING_R.size())):
ep_r = sess.run(GLOBAL_RUNNING_R.dequeue())
if i==0:
GLOBAL_RUNNING_R_list.append(ep_r) # for display
else:
GLOBAL_RUNNING_R_list.append(GLOBAL_RUNNING_R_list[-1]*0.9 + ep_r*0.1) # for display
break
# display
plt.plot(np.arange(len(GLOBAL_RUNNING_R_list)), GLOBAL_RUNNING_R_list)
plt.xlabel('episode')
plt.ylabel('reward')
plt.show()
#print("Parameter server: blocking...")
#server.join() # currently blocks forever
print("Parameter server: ended...")
def worker(worker_n):
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
"""
with tf.device(tf.train.replica_device_setter(
worker_device='/job:worker/task:' + str(worker_n),
cluster=cluster)):
"""
print("Worker %d: waiting for cluster connection..." % worker_n)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % worker_n)
#while sess.run(tf.report_uninitialized_variables()):
while (sess.run(tf.report_uninitialized_variables())).any(): # ********** .any() .all() **********
print("Worker %d: waiting for variable initialization..." % worker_n)
time.sleep(1.0)
print("Worker %d: variables initialized" % worker_n)
w = Worker(str(worker_n), GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess)
print("Worker %d: created" % worker_n)
sess.run(tf.global_variables_initializer()) # got to initialize after Worker creation
w.work()
print("Worker %d: w.work()" % worker_n)
#print("Worker %d: blocking..." % worker_n)
server.join() # currently blocks forever
print("Worker %d: ended..." % worker_n)
start_time = time.time()
ps_proc = Process(target=parameter_server, daemon=True)
w1_proc = Process(target=worker, args=(0, ), daemon=True)
w2_proc = Process(target=worker, args=(1, ), daemon=True)
w3_proc = Process(target=worker, args=(2, ), daemon=True)
w4_proc = Process(target=worker, args=(3, ), daemon=True)
ps_proc.start()
w1_proc.start()
w2_proc.start()
w3_proc.start()
w4_proc.start()
# if not join, parent will terminate before children
# & children will terminate as well cuz children are daemon
ps_proc.join()
#w1_proc.join()
#w2_proc.join()
#w3_proc.join()
#w4_proc.join()
for proc in [w1_proc,
w2_proc,
w3_proc,
w4_proc,
ps_proc]:
proc.terminate() # only way to kill server is to kill it's process
print('All done.')
print("--- %s seconds ---" % (time.time() - start_time))
| [] |
2024-01-10 | photonn/danswer | backend~danswer~chat~chat_prompts.py | from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chunking.models import InferenceChunk
from danswer.configs.constants import CODE_BLOCK_PAT
from danswer.db.models import ChatMessage
from danswer.llm.utils import translate_danswer_msg_to_langchain
DANSWER_TOOL_NAME = "Current Search"
DANSWER_TOOL_DESCRIPTION = (
"A search tool that can find information on any topic "
"including up to date and proprietary knowledge."
)
DANSWER_SYSTEM_MSG = (
"Given a conversation (between Human and Assistant) and a final message from Human, "
"rewrite the last message to be a standalone question which captures required/relevant context "
"from previous messages. This question must be useful for a semantic search engine. "
"It is used for a natural language search."
)
TOOL_TEMPLATE = """
TOOLS
------
You can use tools to look up information that may be helpful in answering the user's \
original question. The available tools are:
{tool_overviews}
RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want to use a tool. Markdown code snippet formatted in the following schema:
```json
{{
"action": string, \\ The action to take. Must be one of {tool_names}
"action_input": string \\ The input to the action
}}
```
**Option #2:**
Use this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
TOOL_LESS_PROMPT = """
Respond with a markdown code snippet in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
USER_INPUT = """
USER'S INPUT
--------------------
Here is the user's input \
(remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{user_input}
"""
TOOL_FOLLOWUP = """
TOOL RESPONSE:
---------------------
{tool_output}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must \
mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES!
If the tool response is not useful, ignore it completely.
{optional_reminder}{hint}
IMPORTANT! You MUST respond with a markdown code snippet of a json blob with a single action, and NOTHING else.
"""
def form_user_prompt_text(
query: str,
tool_text: str | None,
hint_text: str | None,
user_input_prompt: str = USER_INPUT,
tool_less_prompt: str = TOOL_LESS_PROMPT,
) -> str:
user_prompt = tool_text or tool_less_prompt
user_prompt += user_input_prompt.format(user_input=query)
if hint_text:
if user_prompt[-1] != "\n":
user_prompt += "\n"
user_prompt += "\nHint: " + hint_text
return user_prompt.strip()
def form_tool_section_text(
tools: list[dict[str, str]], retrieval_enabled: bool, template: str = TOOL_TEMPLATE
) -> str | None:
if not tools and not retrieval_enabled:
return None
if retrieval_enabled:
tools.append(
{"name": DANSWER_TOOL_NAME, "description": DANSWER_TOOL_DESCRIPTION}
)
tools_intro = []
for tool in tools:
description_formatted = tool["description"].replace("\n", " ")
tools_intro.append(f"> {tool['name']}: {description_formatted}")
tools_intro_text = "\n".join(tools_intro)
tool_names_text = ", ".join([tool["name"] for tool in tools])
return template.format(
tool_overviews=tools_intro_text, tool_names=tool_names_text
).strip()
def format_danswer_chunks_for_chat(chunks: list[InferenceChunk]) -> str:
return "\n".join(
f"DOCUMENT {ind}:{CODE_BLOCK_PAT.format(chunk.content)}"
for ind, chunk in enumerate(chunks, start=1)
)
def form_tool_followup_text(
tool_output: str,
query: str,
hint_text: str | None,
tool_followup_prompt: str = TOOL_FOLLOWUP,
ignore_hint: bool = False,
) -> str:
# If multi-line query, it likely confuses the model more than helps
if "\n" not in query:
optional_reminder = f"\nAs a reminder, my query was: {query}\n"
else:
optional_reminder = ""
if not ignore_hint and hint_text:
hint_text_spaced = f"\nHint: {hint_text}\n"
else:
hint_text_spaced = ""
return tool_followup_prompt.format(
tool_output=tool_output,
optional_reminder=optional_reminder,
hint=hint_text_spaced,
).strip()
def build_combined_query(
query_message: ChatMessage,
history: list[ChatMessage],
) -> list[BaseMessage]:
user_query = query_message.message
combined_query_msgs: list[BaseMessage] = []
if not user_query:
raise ValueError("Can't rephrase/search an empty query")
combined_query_msgs.append(SystemMessage(content=DANSWER_SYSTEM_MSG))
combined_query_msgs.extend(
[translate_danswer_msg_to_langchain(msg) for msg in history]
)
combined_query_msgs.append(
HumanMessage(
content=(
"Help me rewrite this final message into a standalone query that takes into consideration the "
f"past messages of the conversation if relevant. This query is used with a semantic search engine to "
f"retrieve documents. You must ONLY return the rewritten query and nothing else."
f"\n\nQuery:\n{query_message.message}"
)
)
)
return combined_query_msgs
| [
"\n",
"Help me rewrite this final message into a standalone query that takes into consideration the ",
"past messages of the conversation if relevant. This query is used with a semantic search engine to ",
"\nHint: PLACEHOLDER",
"retrieve documents. You must ONLY return the rewritten query and nothing else.",
"\nRespond with a markdown code snippet in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n",
"\nTOOLS\n------\nYou can use tools to look up information that may be helpful in answering the user's original question. The available tools are:\n\n{tool_overviews}\n\nRESPONSE FORMAT INSTRUCTIONS\n----------------------------\nWhen responding to me, please output a response in one of two formats:\n\n**Option 1:**\nUse this if you want to use a tool. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": string, \\ The action to take. Must be one of {tool_names}\n \"action_input\": string \\ The input to the action\n}}\n```\n\n**Option #2:**\nUse this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n"
] |
2024-01-10 | photonn/danswer | backend~danswer~chat~chat_llm.py | from collections.abc import Callable
from collections.abc import Iterator
from uuid import UUID
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chat.chat_prompts import build_combined_query
from danswer.chat.chat_prompts import DANSWER_TOOL_NAME
from danswer.chat.chat_prompts import form_tool_followup_text
from danswer.chat.chat_prompts import form_user_prompt_text
from danswer.chat.chat_prompts import format_danswer_chunks_for_chat
from danswer.chat.tools import call_tool
from danswer.configs.app_configs import NUM_DOCUMENT_TOKENS_FED_TO_CHAT
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.configs.model_configs import GEN_AI_MAX_INPUT_TOKENS
from danswer.datastores.document_index import get_default_document_index
from danswer.db.models import ChatMessage
from danswer.db.models import Persona
from danswer.direct_qa.interfaces import DanswerAnswerPiece
from danswer.direct_qa.interfaces import DanswerChatModelOut
from danswer.direct_qa.qa_utils import get_usable_chunks
from danswer.llm.build import get_default_llm
from danswer.llm.llm import LLM
from danswer.llm.utils import get_default_llm_tokenizer
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.search.semantic_search import retrieve_ranked_documents
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import extract_embedded_json
from danswer.utils.text_processing import has_unescaped_quote
logger = setup_logger()
LLM_CHAT_FAILURE_MSG = "The large-language-model failed to generate a valid response."
def _parse_embedded_json_streamed_response(
tokens: Iterator[str],
) -> Iterator[DanswerAnswerPiece | DanswerChatModelOut]:
final_answer = False
just_start_stream = False
model_output = ""
hold = ""
finding_end = 0
for token in tokens:
model_output += token
hold += token
if (
final_answer is False
and '"action":"finalanswer",' in model_output.lower().replace(" ", "")
):
final_answer = True
if final_answer and '"actioninput":"' in model_output.lower().replace(
" ", ""
).replace("_", ""):
if not just_start_stream:
just_start_stream = True
hold = ""
if has_unescaped_quote(hold):
finding_end += 1
hold = hold[: hold.find('"')]
if finding_end <= 1:
if finding_end == 1:
finding_end += 1
yield DanswerAnswerPiece(answer_piece=hold)
hold = ""
model_final = extract_embedded_json(model_output)
if "action" not in model_final or "action_input" not in model_final:
raise ValueError("Model did not provide all required action values")
yield DanswerChatModelOut(
model_raw=model_output,
action=model_final["action"],
action_input=model_final["action_input"],
)
return
def _find_last_index(
lst: list[int], max_prompt_tokens: int = GEN_AI_MAX_INPUT_TOKENS
) -> int:
"""From the back, find the index of the last element to include
before the list exceeds the maximum"""
running_sum = 0
last_ind = 0
for i in range(len(lst) - 1, -1, -1):
running_sum += lst[i]
if running_sum > max_prompt_tokens:
last_ind = i + 1
break
if last_ind >= len(lst):
raise ValueError("Last message alone is too large!")
return last_ind
def danswer_chat_retrieval(
query_message: ChatMessage,
history: list[ChatMessage],
llm: LLM,
user_id: UUID | None,
) -> str:
if history:
query_combination_msgs = build_combined_query(query_message, history)
reworded_query = llm.invoke(query_combination_msgs)
else:
reworded_query = query_message.message
# Good Debug/Breakpoint
ranked_chunks, unranked_chunks = retrieve_ranked_documents(
reworded_query,
user_id=user_id,
filters=None,
datastore=get_default_document_index(),
)
if not ranked_chunks:
return "No results found"
if unranked_chunks:
ranked_chunks.extend(unranked_chunks)
filtered_ranked_chunks = [
chunk for chunk in ranked_chunks if not chunk.metadata.get(IGNORE_FOR_QA)
]
# get all chunks that fit into the token limit
usable_chunks = get_usable_chunks(
chunks=filtered_ranked_chunks,
token_limit=NUM_DOCUMENT_TOKENS_FED_TO_CHAT,
)
return format_danswer_chunks_for_chat(usable_chunks)
def _drop_messages_history_overflow(
system_msg: BaseMessage | None,
system_token_count: int,
history_msgs: list[BaseMessage],
history_token_counts: list[int],
final_msg: BaseMessage,
final_msg_token_count: int,
) -> list[BaseMessage]:
"""As message history grows, messages need to be dropped starting from the furthest in the past.
The System message should be kept if at all possible and the latest user input which is inserted in the
prompt template must be included"""
if len(history_msgs) != len(history_token_counts):
# This should never happen
raise ValueError("Need exactly 1 token count per message for tracking overflow")
prompt: list[BaseMessage] = []
# Start dropping from the history if necessary
all_tokens = history_token_counts + [system_token_count, final_msg_token_count]
ind_prev_msg_start = _find_last_index(all_tokens)
if system_msg and ind_prev_msg_start <= len(history_msgs):
prompt.append(system_msg)
prompt.extend(history_msgs[ind_prev_msg_start:])
prompt.append(final_msg)
return prompt
def llm_contextless_chat_answer(
messages: list[ChatMessage],
tokenizer: Callable | None = None,
system_text: str | None = None,
) -> Iterator[str]:
try:
prompt_msgs = [translate_danswer_msg_to_langchain(msg) for msg in messages]
if system_text:
tokenizer = tokenizer or get_default_llm_tokenizer()
system_tokens = len(tokenizer(system_text))
system_msg = SystemMessage(content=system_text)
message_tokens = [msg.token_count for msg in messages] + [system_tokens]
else:
message_tokens = [msg.token_count for msg in messages]
last_msg_ind = _find_last_index(message_tokens)
remaining_user_msgs = prompt_msgs[last_msg_ind:]
if not remaining_user_msgs:
raise ValueError("Last user message is too long!")
if system_text:
all_msgs = [system_msg] + remaining_user_msgs
else:
all_msgs = remaining_user_msgs
return get_default_llm().stream(all_msgs)
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
return (msg for msg in [LLM_CHAT_FAILURE_MSG]) # needs to be an Iterator
def llm_contextual_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user_id: UUID | None,
tokenizer: Callable,
) -> Iterator[str]:
retrieval_enabled = persona.retrieval_enabled
system_text = persona.system_text
tool_text = persona.tools_text
hint_text = persona.hint_text
last_message = messages[-1]
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
# Failure reasons include:
# - Invalid LLM output, wrong format or wrong/missing keys
# - No "Final Answer" from model after tool calling
# - LLM times out or is otherwise unavailable
# - Calling invalid tool or tool call fails
# - Last message has more tokens than model is set to accept
# - Missing user input
try:
if not last_message.message:
raise ValueError("User chat message is empty.")
# Build the prompt using the last user message
user_text = form_user_prompt_text(
query=last_message.message,
tool_text=tool_text,
hint_text=hint_text,
)
last_user_msg = HumanMessage(content=user_text)
# Count tokens once to reuse
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
system_tokens = len(tokenizer(system_text)) if system_text else 0
last_user_msg_tokens = len(tokenizer(user_text))
prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
llm = get_default_llm()
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
final_result: DanswerChatModelOut | None = None
final_answer_streamed = False
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if isinstance(result, DanswerChatModelOut):
final_result = result
break
if final_answer_streamed:
return
if final_result is None:
raise RuntimeError("Model output finished without final output parsing.")
if (
retrieval_enabled
and final_result.action.lower() == DANSWER_TOOL_NAME.lower()
):
tool_result_str = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
user_id=user_id,
)
else:
tool_result_str = call_tool(final_result, user_id=user_id)
# The AI's tool calling message
tool_call_msg_text = final_result.model_raw
tool_call_msg_token_count = len(tokenizer(tool_call_msg_text))
# Create the new message to use the results of the tool call
tool_followup_text = form_tool_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=hint_text,
)
tool_followup_msg = HumanMessage(content=tool_followup_text)
tool_followup_tokens = len(tokenizer(tool_followup_text))
# Drop previous messages, the drop order goes: previous messages in the history,
# the last user prompt and generated intermediate messages from this recent prompt,
# the system message, then finally the tool message that was the last thing generated
follow_up_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage
+ [last_user_msg, AIMessage(content=tool_call_msg_text)],
history_token_counts=previous_msg_token_counts
+ [last_user_msg_tokens, tool_call_msg_token_count],
final_msg=tool_followup_msg,
final_msg_token_count=tool_followup_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(follow_up_prompt)
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if final_answer_streamed is False:
raise RuntimeError("LLM did not to produce a Final Answer after tool call")
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
yield LLM_CHAT_FAILURE_MSG
def llm_chat_answer(
messages: list[ChatMessage],
persona: Persona | None,
user_id: UUID | None,
tokenizer: Callable,
) -> Iterator[str]:
# Common error cases to keep in mind:
# - User asks question about something long ago, due to context limit, the message is dropped
# - Tool use gives wrong/irrelevant results, model gets confused by the noise
# - Model is too weak of an LLM, fails to follow instructions
# - Bad persona design leads to confusing instructions to the model
# - Bad configurations, too small token limit, mismatched tokenizer to LLM, etc.
if persona is None:
return llm_contextless_chat_answer(messages)
elif persona.retrieval_enabled is False and persona.tools_text is None:
return llm_contextless_chat_answer(
messages, tokenizer, system_text=persona.system_text
)
return llm_contextual_chat_answer(
messages=messages, persona=persona, user_id=user_id, tokenizer=tokenizer
)
| [] |
2024-01-10 | radoshi/llm-code | llm_code~llm_code.py | import sys
from pathlib import Path
from typing import Optional, Tuple
import click
import openai
import pyperclip
from pydantic import BaseSettings
from rich.console import Console
from rich.syntax import Syntax
from llm_code import __version__, db
from llm_code.templates import Message, TemplateLibrary
class Settings(BaseSettings):
openai_api_key: str = ""
model: str = "gpt-3.5-turbo"
temperature: float = 0.8
max_tokens: int = 1000
config_dir: Path = Path("~/.llm_code").expanduser()
class Config:
env_file = Path("~/.llm_code").expanduser() / "env"
env_file_encoding = "utf-8"
def load_templates(path: Path) -> Optional[TemplateLibrary]:
path = path / "prompts"
if path.exists():
return TemplateLibrary.from_file_or_directory(path)
else:
return None
def init_db(config_dir: Path):
config_dir.mkdir(parents=True, exist_ok=True)
db_path = config_dir / "db.sqlite"
_ = db.Database.get(db_path)
def get_cached_response(settings: Settings, messages: list[dict]) -> Optional[Message]:
record = db.get_last_inserted_row()
if not record:
return None
if (
record.model != settings.model
or record.temperature != settings.temperature
or record.max_tokens != settings.max_tokens
or record.system_message != messages[0]["content"]
or record.user_message != messages[1]["content"]
):
return None
return Message(
role="assistant",
content=record.assistant_message,
)
def get_code(inputs) -> str:
files = [f for input in inputs for f in Path.cwd().glob(input)]
file_name = [f.name for f in files]
file_texts = [f.read_text() for f in files]
file_blobs = [
f"FILENAME: {name}\n```{text}\n```"
for (name, text) in zip(file_name, file_texts)
]
return "\n---\n".join(file_blobs)
def get_max_tokens(message: str) -> int:
return len(message.split(" "))
@click.command()
@click.option(
"-i",
"--inputs",
default=None,
multiple=True,
help="Glob of input files. Use repeatedly for multiple files.",
)
@click.option("-cb", "--clipboard", is_flag=True, help="Copy code to clipboard.")
@click.option("-nc", "--no-cache", is_flag=True, help="Don't use cache.")
@click.option("-4", "--gpt-4", is_flag=True, help="Use GPT-4.")
@click.option("--version", is_flag=True, help="Show version.")
@click.argument("instructions", nargs=-1)
def main(
inputs: Optional[Tuple[str, ...]],
instructions: Tuple[str, ...],
version: bool,
no_cache: bool,
gpt_4: bool,
clipboard: bool,
):
"""Coding assistant using OpenAI's chat models.
Requires OPENAI_API_KEY as an environment variable. Alternately, you can set it in
~/.llm_code/env.
"""
console = Console()
if version:
console.print(f"[bold green]llm_code[/] version {__version__}")
sys.exit(0)
settings = Settings()
if not settings.openai_api_key:
raise click.UsageError("OPENAI_API_KEY must be set.")
if gpt_4:
settings.model = "gpt-4"
init_db(settings.config_dir)
if not instructions:
raise click.UsageError("Please provide some instructions.")
library = load_templates(settings.config_dir) or load_templates(
Path(__file__).parent.parent
)
if not library:
raise click.UsageError("No templates found.")
if inputs:
code = get_code(inputs)
message = library["coding/input"].message(
code=code, instructions=" ".join(instructions)
)
else:
message = library["coding/simple"].message(instructions=" ".join(instructions))
messages = [library["coding/system"].message(), message]
cached_response = get_cached_response(settings, messages)
if no_cache or not cached_response:
with console.status("[bold green]Asking OpenAI..."):
response = openai.ChatCompletion.create(
api_key=settings.openai_api_key,
model=settings.model,
temperature=settings.temperature,
max_tokens=settings.max_tokens,
messages=messages,
)
message = Message.from_message(response.choices[0]["message"]) # type: ignore
db.write(
model=settings.model,
temperature=settings.temperature,
max_tokens=settings.max_tokens,
system_message=messages[0]["content"],
user_message=messages[1]["content"],
assistant_message=message.content,
input_tokens=response.usage["prompt_tokens"], # type: ignore
output_tokens=response.usage["completion_tokens"], # type: ignore
)
else:
message = cached_response
code_block = message.code()
if code_block:
console.print(Syntax(code_block.code, code_block.lang, word_wrap=True))
if clipboard:
pyperclip.copy(code_block.code)
else:
console.print(f"No code found in message: \n\n{message.content}")
sys.exit(1)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | alexjercan/scufris | scufris~tools~weather.py | """This module contains the WeatherTool class."""
from typing import Optional
import requests
from langchain.tools import BaseTool
class WeatherTool(BaseTool):
"""This tool allows you to get the weather using the wttr.in service."""
name = "Weather"
description = (
"This tool allows you to get the weather using the wttr.in service;"
"you can get the weather in your current location by passing no "
"arguments or in a specific location by passing the location as an "
"argument;"
)
def _run(self, *args, query: Optional[str] = None, **kwargs) -> str:
if query is None:
query = ""
try:
response = requests.get(f"https://wttr.in/{query}?format=4", timeout=5)
except requests.exceptions.Timeout:
return "Sorry, the weather service is not responding right now."
except requests.exceptions.ConnectionError:
return "Sorry, could not connect to the weather service."
except requests.exceptions.RequestException:
return "Sorry, something went wrong with the weather service."
return response.text
async def _arun(self, *args, **kwargs) -> str:
raise NotImplementedError("Weather does not support async")
| [] |
2024-01-10 | kompy99/Quiz-Generator | quizcreator.py | # %pip install "evadb[document, notebook]"
# %pip install openai
# %pip install --upgrade tiktoken
# %pip install transformers
import argparse
import os
import evadb
import openai
import numpy as np
import random
import time
import tiktoken
import json
from transformers import BartTokenizer, BartForConditionalGeneration
from timeit import default_timer as timer
#Enter your OpenAI API key here
openai.api_key = "<OPENAI-API-KEY>"
# Enter the path to your PDF here
pdf_path = '<PDF-PATH>'
path = os.path.dirname(evadb.__file__)
def loadPDF(filepath, pdf_table_name, embeddings_table_name):
cursor = evadb.connect(path).cursor()
drop_pdf_table = f""" DROP TABLE IF EXISTS {pdf_table_name};"""
load_pdf_data = f"""LOAD PDF '{filepath}' INTO {pdf_table_name};"""
create_embedding_function = f"""CREATE FUNCTION IF NOT EXISTS get_embedding IMPL '{path}/functions/sentence_feature_extractor.py'; """
drop_embeddings_table = f""" DROP TABLE IF EXISTS {embeddings_table_name};"""
get_pdf_embeddings = f"""CREATE TABLE IF NOT EXISTS {embeddings_table_name} AS SELECT get_embedding(data), data FROM {pdf_table_name};"""
drop_embeddings_index = f""" DROP INDEX IF EXISTS embedding_index;"""
build_faiss_index = f""" CREATE INDEX embedding_index ON {embeddings_table_name}(features) USING FAISS;"""
cursor.query(drop_pdf_table).execute()
cursor.query(load_pdf_data).execute()
cursor.query(create_embedding_function).execute()
cursor.query(drop_embeddings_table).execute()
cursor.query(get_pdf_embeddings).execute()
cursor.query(drop_embeddings_index).execute()
cursor.query(build_faiss_index).execute()
def getPageCount(pdf_table_name: str) -> int:
cursor = evadb.connect(path).cursor()
get_page_count = f"""SELECT MAX(page) FROM {pdf_table_name} """
page_counts_df = cursor.query(get_page_count).df()
page_count = np.max(page_counts_df.loc[:, 'MAX.page'])
return page_count
def getParagraphCount(pdf_table_name: str, page_number: int) -> int:
cursor = evadb.connect(path).cursor()
get_para_count = f"""SELECT page, MAX(paragraph) FROM {pdf_table_name} where page = {page_number}"""
para_counts_df = cursor.query(get_para_count).df()
para_count = np.max(para_counts_df.loc[:, 'MAX.paragraph'])
return para_count
def generatePageSummary(pdf_table_name: str, page_number: int) -> str:
cursor = evadb.connect(path).cursor()
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
results = cursor.query(f"""SELECT page, paragraph, data from {pdf_data_table} where page = {page_number}""").df()
dataKey = f'''{pdf_data_table}.data'''
context = "\n".join(results[dataKey])
tokenized_context = tokenizer.encode(context,truncation=True, return_tensors="pt")
outputs = model.generate(tokenized_context, max_length=150, min_length=100, num_beams=4, length_penalty=2.0, early_stopping=True)
generated_summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_summary
pdf_embeddings_table = "pdf_embeddings"
pdf_data_table = "pdf_table"
start_load_pdf = timer()
loadPDF(pdf_path, pdf_data_table, pdf_embeddings_table)
end_load_pdf = timer()
pdf_load_time = end_load_pdf - start_load_pdf
random.seed(time.time())
page_set = set()
num_pages = getPageCount(pdf_data_table)
for _ in range(5):
random_page_number = random.randint(1, num_pages)
page_set.add(random_page_number)
summaries = []
summary_generation_start_time = timer()
for page in page_set:
generated_summary = generatePageSummary(pdf_table_name=pdf_data_table, page_number = page)
print("\n Summary for page - " + str(page) + "\n---------------------\n")
print("\n", generated_summary, "\n")
summaries.append(generated_summary)
summary_generation_end_time = timer()
summary_generation_time = summary_generation_end_time - summary_generation_start_time
summarized_context = "\n".join(summaries)
gpt_api_response_start_time = timer()
gptResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": """You are a question generator. User will give content. You need to create multiple choice questions. Questions and answers have to be from the content only. Generate conceptual questions. Do not use any extra knowledge you may have on the subject.
Every option must have a number assoicated with it. The answer must be the correct option number only. Generate 5 questions. Ensure that your output is in the following JSON format only. Sample question has been provided below:
questions : [
{
question: What is 1+1?,
options : [(1) 3, (2) 4, (3) 5, (4) 2],
answer: 4
}
]
"""},
{"role": "user", "content": f"""{summarized_context}"""},
]
)
gpt_api_response_end_time = timer()
gpt_api_response_time = gpt_api_response_end_time - gpt_api_response_start_time
encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
num_tokens = len(encoding.encode(summarized_context))
print("\nMETRICS\n-------")
print("\nToken Usage\n-----------\nSummary tokens - " + str(num_tokens) + "\t Summary + Prompt tokens - " + str(gptResponse.usage.prompt_tokens) + "\t\tCompletion tokens - " + str(gptResponse.usage.completion_tokens))
print("\nTotal Tokens - " + str(gptResponse.usage.total_tokens))
print("\n\nPerformance\n-----------\nPdf loading - " + str(pdf_load_time) + " seconds \tSummary generation - " + str(summary_generation_time) + " seconds \tGPT API response - " + str(gpt_api_response_time) + " seconds")
quiz_data = json.loads(gptResponse.choices[0].message.content)
score = 0
num_questions = len(quiz_data['questions'])
print("\nYour practice quiz is ready!")
print("\nPRACTICE QUIZ\n--------------\n\n")
print(f"Instructions\n-------------\n\nThere will be {num_questions} questions in total. \nFor each question, enter only your choice (1,2,3 or 4). \nYou will see your score at the end.\n\nGood luck!!")
question_num = 0
print("\n\nQuiz\n------\n\n")
for question in quiz_data['questions']:
question_num+=1
print("Q" + str(question_num) + ") " + question['question'])
for option in question['options']:
print(option)
user_answer = int(input("Your answer: "))
if user_answer == question['answer']:
print("Correct!\n")
score+=1
else:
print(f"Sorry, the correct answer is: {question['answer']}\n")
print(f"\n\nYour score: {score}/{num_questions}")
| [
"PLACEHOLDER",
"You are a question generator. User will give content. You need to create multiple choice questions. Questions and answers have to be from the content only. Generate conceptual questions. Do not use any extra knowledge you may have on the subject.\n Every option must have a number assoicated with it. The answer must be the correct option number only. Generate 5 questions. Ensure that your output is in the following JSON format only. Sample question has been provided below:\n\n questions : [\n {\n question: What is 1+1?,\n options : [(1) 3, (2) 4, (3) 5, (4) 2],\n answer: 4\n }\n ]\n "
] |
2024-01-10 | code-greg-42/AutoGrocer3 | utils~authenticate_all.py | import os
import boto3
import openai
from google.cloud import vision
from utils.print_messages import cook_tim_sys_prompt, cook_tim_intro
# NECESSARY ENV VARIABLES:
# 1. AWS_ACCESS_KEY_ID
# 2. AWS_SECRET_ACCESS_KEY
# 3. GOOGLE_APPLICATION_CREDENTIALS
# 4. OPENAI_API_KEY
# OPTIONAL ENV VARIABLES:
# 1. TWILIO_ACCOUNT_SID
# 2. TWILIO_AUTH_TOKEN
def auth_all(user_name, use_twilio=False):
# aws auth
aws_key = os.environ.get("AWS_ACCESS_KEY_ID")
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
if not aws_key or not aws_secret_key:
raise ValueError("AWS environment variables not set up correctly.")
session = boto3.Session(
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret_key,
)
aws_s3_client = session.client('s3')
aws_bucket_name = f"autogrocer-user-{user_name.strip().lower().replace(' ', '-')}"
# Check if the bucket exists
try:
aws_s3_client.head_bucket(Bucket=aws_bucket_name)
except:
# If the bucket does not exist, create it
aws_s3_client.create_bucket(Bucket=aws_bucket_name)
try:
aws_response = aws_s3_client.get_object(Bucket=aws_bucket_name, Key='chat_history.txt')
chat_history = aws_response['Body'].read().decode('utf-8')
except:
# if chat history doesn't exist yet, init it
chat_history = [{
"role": "system",
"content": cook_tim_sys_prompt
},
{
"role": "assistant",
"content": cook_tim_intro
}]
# print("AWS S3 initialized.")
# google cloud auth
credentials_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
if not credentials_path:
raise ValueError("Google Cloud environment variable not set up correctly.")
google_cloud_client = vision.ImageAnnotatorClient.from_service_account_json(credentials_path)
# print("Google Cloud client initialized.")
# openai auth
openai_api_key = os.environ.get("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError("OpenAI environment variable not set up correctly.")
openai.api_key = os.environ.get("OPENAI_API_KEY")
# print("OpenAI client initialized.")
twilio_client = None
if use_twilio:
from twilio.rest import Client
# twilio auth
account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
if not account_sid or not auth_token:
raise ValueError("Twilio environment variables not set up correctly.")
twilio_client = Client(account_sid, auth_token)
# print("Twilio client initialized.")
return {
"chat_history": chat_history,
"aws_s3_client": aws_s3_client,
"aws_bucket_name": aws_bucket_name,
"google_cloud_client": google_cloud_client,
"twilio_client": twilio_client
} | [] |
2024-01-10 | ThomasMyrseth/webscrape | gptReview.py | import openai
openai.api_key = "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"
conversation_history = [{"role": "system", "content": "You are a financial data analacyst. Summarise the data ypu are given and point out important changes compared to previous reports"}]
class gptReview:
def __init__(self):
None
def askGpt(self, prompt):
message = {"role": "user", "content": prompt}
conversation_history.append(message)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation_history
)
generatedText = response.choices[0].message["content"]
print("askGpt ran succesfully")
return generatedText | [
"You are a financial data analacyst. Summarise the data ypu are given and point out important changes compared to previous reports"
] |
2024-01-10 | liubo0902/Grounded-Segment-Anything | automatic_label_tag2text_demo.py | import argparse
import os
import copy
import numpy as np
import json
import torch
import torchvision
from PIL import Image, ImageDraw, ImageFont
import nltk
# Grounding DINO
import GroundingDINO.groundingdino.datasets.transforms as T
from GroundingDINO.groundingdino.models import build_model
from GroundingDINO.groundingdino.util import box_ops
from GroundingDINO.groundingdino.util.slconfig import SLConfig
from GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
# segment anything
from segment_anything import build_sam, SamPredictor
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Tag2Text
import sys
sys.path.append('Tag2Text')
from Tag2Text.models import tag2text
from Tag2Text import inference
import torchvision.transforms as TS
# ChatGPT or nltk is required when using captions
# import openai
# import nltk
def load_image(image_path):
# load image
image_pil = Image.open(image_path).convert("RGB") # load image
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image_pil, image
def generate_caption(raw_image, device):
# unconditional image captioning
if device == "cuda":
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
else:
inputs = processor(raw_image, return_tensors="pt")
out = blip_model.generate(**inputs)
caption = processor.decode(out[0], skip_special_tokens=True)
return caption
def generate_tags(caption, split=',', max_tokens=100, model="gpt-3.5-turbo"):
lemma = nltk.wordnet.WordNetLemmatizer()
if openai_key:
prompt = [
{
'role': 'system',
'content': 'Extract the unique nouns in the caption. Remove all the adjectives. ' + \
f'List the nouns in singular form. Split them by "{split} ". ' + \
f'Caption: {caption}.'
}
]
response = openai.ChatCompletion.create(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
reply = response['choices'][0]['message']['content']
# sometimes return with "noun: xxx, xxx, xxx"
tags = reply.split(':')[-1].strip()
else:
nltk.download(['punkt', 'averaged_perceptron_tagger', 'wordnet'])
tags_list = [word for (word, pos) in nltk.pos_tag(nltk.word_tokenize(caption)) if pos[0] == 'N']
tags_lemma = [lemma.lemmatize(w) for w in tags_list]
tags = ', '.join(map(str, tags_lemma))
return tags
def check_caption(caption, pred_phrases, max_tokens=100, model="gpt-3.5-turbo"):
object_list = [obj.split('(')[0] for obj in pred_phrases]
object_num = []
for obj in set(object_list):
object_num.append(f'{object_list.count(obj)} {obj}')
object_num = ', '.join(object_num)
print(f"Correct object number: {object_num}")
if openai_key:
prompt = [
{
'role': 'system',
'content': 'Revise the number in the caption if it is wrong. ' + \
f'Caption: {caption}. ' + \
f'True object number: {object_num}. ' + \
'Only give the revised caption: '
}
]
response = openai.ChatCompletion.create(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)
reply = response['choices'][0]['message']['content']
# sometimes return with "Caption: xxx, xxx, xxx"
caption = reply.split(':')[-1].strip()
return caption
def load_model(model_config_path, model_checkpoint_path, device):
args = SLConfig.fromfile(model_config_path)
args.device = device
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
print(load_res)
_ = model.eval()
return model
def get_grounding_output(model, image, caption, box_threshold, text_threshold,device="cpu"):
caption = caption.lower()
caption = caption.strip()
if not caption.endswith("."):
caption = caption + "."
model = model.to(device)
image = image.to(device)
with torch.no_grad():
outputs = model(image[None], captions=[caption])
logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
logits.shape[0]
# filter output
logits_filt = logits.clone()
boxes_filt = boxes.clone()
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
logits_filt = logits_filt[filt_mask] # num_filt, 256
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
logits_filt.shape[0]
# get phrase
tokenlizer = model.tokenizer
tokenized = tokenlizer(caption)
# build pred
pred_phrases = []
scores = []
for logit, box in zip(logits_filt, boxes_filt):
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
scores.append(logit.max().item())
return boxes_filt, torch.Tensor(scores), pred_phrases
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_box(box, ax, label):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
ax.text(x0, y0, label)
def save_mask_data(output_dir, caption, mask_list, box_list, label_list):
value = 0 # 0 for background
mask_img = torch.zeros(mask_list.shape[-2:])
for idx, mask in enumerate(mask_list):
mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1
plt.figure(figsize=(10, 10))
plt.imshow(mask_img.numpy())
plt.axis('off')
plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)
json_data = {
'caption': caption,
'mask':[{
'value': value,
'label': 'background'
}]
}
for label, box in zip(label_list, box_list):
value += 1
name, logit = label.split('(')
logit = logit[:-1] # the last is ')'
json_data['mask'].append({
'value': value,
'label': name,
'logit': float(logit),
'box': box.numpy().tolist(),
})
with open(os.path.join(output_dir, 'label.json'), 'w') as f:
json.dump(json_data, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)
parser.add_argument("--config", type=str, required=True, help="path to config file")
parser.add_argument(
"--tag2text_checkpoint", type=str, required=True, help="path to checkpoint file"
)
parser.add_argument(
"--grounded_checkpoint", type=str, required=True, help="path to checkpoint file"
)
parser.add_argument(
"--sam_checkpoint", type=str, required=True, help="path to checkpoint file"
)
parser.add_argument("--input_image", type=str, required=True, help="path to image file")
parser.add_argument("--split", default=",", type=str, help="split for text prompt")
parser.add_argument("--openai_key", type=str, help="key for chatgpt")
parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")
parser.add_argument(
"--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
)
parser.add_argument("--box_threshold", type=float, default=0.25, help="box threshold")
parser.add_argument("--text_threshold", type=float, default=0.2, help="text threshold")
parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")
parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
args = parser.parse_args()
# cfg
config_file = args.config # change the path of the model config file
tag2text_checkpoint = args.tag2text_checkpoint # change the path of the model
grounded_checkpoint = args.grounded_checkpoint # change the path of the model
sam_checkpoint = args.sam_checkpoint
image_path = args.input_image
split = args.split
openai_key = args.openai_key
openai_proxy = args.openai_proxy
output_dir = args.output_dir
box_threshold = args.box_threshold
text_threshold = args.text_threshold
iou_threshold = args.iou_threshold
device = args.device
# ChatGPT or nltk is required when using captions
# openai.api_key = openai_key
# if openai_proxy:
# openai.proxy = {"http": openai_proxy, "https": openai_proxy}
# make dir
os.makedirs(output_dir, exist_ok=True)
# load image
image_pil, image = load_image(image_path)
# load model
model = load_model(config_file, grounded_checkpoint, device=device)
# visualize raw image
image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
# initialize Tag2Text
normalize = TS.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = TS.Compose([
TS.Resize((384, 384)),
TS.ToTensor(), normalize
])
# filter out attributes and action categories which are difficult to grounding
delete_tag_index = []
for i in range(3012, 3429):
delete_tag_index.append(i)
specified_tags='None'
# load model
tag2text_model = tag2text.tag2text_caption(pretrained=tag2text_checkpoint,
image_size=384,
vit='swin_b',
delete_tag_index=delete_tag_index)
# threshold for tagging
# we reduce the threshold to obtain more tags
tag2text_model.threshold = 0.64
tag2text_model.eval()
tag2text_model = tag2text_model.to(device)
raw_image = image_pil.resize(
(384, 384))
raw_image = transform(raw_image).unsqueeze(0).to(device)
res = inference.inference(raw_image , tag2text_model, specified_tags)
# Currently ", " is better for detecting single tags
# while ". " is a little worse in some case
text_prompt=res[0].replace(' |', ',')
caption=res[2]
print(f"Caption: {caption}")
print(f"Tags: {text_prompt}")
# run grounding dino model
boxes_filt, scores, pred_phrases = get_grounding_output(
model, image, text_prompt, box_threshold, text_threshold, device=device
)
# initialize SAM
predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
predictor.set_image(image)
size = image_pil.size
H, W = size[1], size[0]
for i in range(boxes_filt.size(0)):
boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
boxes_filt[i][2:] += boxes_filt[i][:2]
boxes_filt = boxes_filt.cpu()
# use NMS to handle overlapped boxes
print(f"Before NMS: {boxes_filt.shape[0]} boxes")
nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()
boxes_filt = boxes_filt[nms_idx]
pred_phrases = [pred_phrases[idx] for idx in nms_idx]
print(f"After NMS: {boxes_filt.shape[0]} boxes")
caption = check_caption(caption, pred_phrases)
print(f"Revise caption with number: {caption}")
transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)
masks, _, _ = predictor.predict_torch(
point_coords = None,
point_labels = None,
boxes = transformed_boxes.to(device),
multimask_output = False,
)
# draw output image
plt.figure(figsize=(10, 10))
plt.imshow(image)
for mask in masks:
show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
for box, label in zip(boxes_filt, pred_phrases):
show_box(box.numpy(), plt.gca(), label)
plt.title('Tag2Text-Captioning: ' + caption + '\n' + 'Tag2Text-Tagging' + text_prompt + '\n')
plt.axis('off')
plt.savefig(
os.path.join(output_dir, "automatic_label_output.jpg"),
bbox_inches="tight", dpi=300, pad_inches=0.0
)
save_mask_data(output_dir, caption, masks, boxes_filt, pred_phrases)
| [
"Revise the number in the caption if it is wrong. Caption: PLACEHOLDER. True object number: PLACEHOLDER. Only give the revised caption: ",
" |",
"Extract the unique nouns in the caption. Remove all the adjectives. List the nouns in singular form. Split them by \"PLACEHOLDER \". Caption: PLACEHOLDER."
] |
2024-01-10 | hitchon1/Twitter_Sentiment | Twitter_Musk.py | import tweepy
from datetime import datetime
from datetime import timedelta
import openai
import pandas as pd
import matplotlib.pyplot as plt
# Replace with your own API key and API secret key
api_key = "your key"
api_secret_key = "your secret key"
openai.api_key = "your openAi key"
# Authenticate to Twitter
auth = tweepy.OAuth1UserHandler(api_key, api_secret_key)
# Create API object
api = tweepy.API(auth)
# Create a function to handle pagination and rate limits
def get_tweets(keyword, until, count):
tweets = []
for tweet in tweepy.Cursor(api.search_tweets, q=keyword,until=until).items(count):
tweets.append(tweet)
return tweets
# Get tweets that contain the keyword "Elon Musk" in the last 7 days
keyword = 'Elon Musk'
count = 1000
tweets = []
i=0
while i < 7:
until = (datetime.now() - timedelta(days=i)).strftime("%Y-%m-%d")
tweets_store = get_tweets(keyword,until, count)
tweets.append(tweets_store)
i=i+1
chatbot = openai.Completion()
df = pd.DataFrame(columns=['Date', 'Sentiment'])
for sub_list in tweets:
for tweet in sub_list:
print(tweet.text)
print(tweet.created_at.strftime('%Y-%m-%d'))
response = chatbot.create(engine="text-babbage-001", prompt="return the most likely sentiment opinion of elon musk of the person who posted this tweet as only the word \"positive\", \"negative\", or \"neutral\" : " + tweet.text, max_tokens=20, temperature=0)
print(response.choices[0].text.strip())
df = df.append({'Date': tweet.created_at.strftime('%Y-%m-%d'), 'Sentiment':response.choices[0].text.strip()}
,ignore_index=True)
df = df.loc[df['Sentiment'].isin(['positive','negative','neutral'])]
df['Sentiment'] = df['Sentiment'].replace({'positive':1,'negative':-1,'neutral':0})
df = df.groupby('Date').sum()
df.plot(kind='bar')
plt.show()
| [] |
2024-01-10 | CarperAI/squeakily | squeakily~helpers.py | # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/03_helpers.ipynb.
# %% auto 0
__all__ = ['english_flagged_words', 'flagged_words', 'stopword_ratios', 'stopwords', 'KENLM_MODEL_REPO', 'get_words',
'FastTextLanguageDetector', 'SentencePiece', 'KenlmModel', 'LLMLabelerParser', 'LLMLabeler']
# %% ../nbs/03_helpers.ipynb 2
import os
import re
import squeakily
import unicodedata
import urllib.request
from huggingface_hub import cached_download, hf_hub_url
from pydantic import BaseModel, Field
from requests.exceptions import HTTPError
from typing import Dict, List
# %% ../nbs/03_helpers.ipynb 6
def get_words(
text: str, # the text to extract words from
) -> list:
"""custom regex to extract all the words in a string"""
return re.findall(r"\w+", text.lower())
# %% ../nbs/03_helpers.ipynb 7
# Built from native speakers, with inspiration from
# https://github.com/zacanger/profane-words
# and
# https://github.com/thisandagain/washyourmouthoutwithsoap/blob/develop/data/build.json
# and
# https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words
english_flagged_words = [
"anal",
# "bareback", # not sure about this one
"bbw",
"bdsm",
"blowjob",
"blowjobs",
"brazzers",
"bukkake",
"camgirl",
"camwhore",
"cocksucking",
# "cougar", # not sure about this one
"creampie",
"cuckold",
"cum",
"cumming",
"cums",
"cumshot",
"cumshots",
"cumslut",
"cunnilingus",
"deepthroat",
"deepthroating",
"dildo",
"dildos",
"dogging",
"doggystyle",
# "dominatrix", # not sure about this one
"erotic",
"fellatio",
"femdom",
"fingering",
"fisting",
"footjob",
"gangbang",
"handjob",
"hentai",
"horney",
"horniest",
"horny",
"jism",
"jizz",
"lolli",
"lolling",
"masterbating",
"masturbate",
"masturbating",
"masturbation",
"milf",
"orgies",
"orgy",
"pegging",
"porn",
"pornhub",
"porno",
"pornos",
"pornstar",
"pornstars",
"redtube",
"rimming",
"slutty",
# "squirting", # not sure about this one
"strapon",
"threesome",
"vibrator",
"xhamster",
"xnxx",
"xvideos",
"xxx",
"youporn",
]
flagged_words = {
"ar": english_flagged_words
+ [
"إباحي",
"احتلام",
"است",
"استمناء",
"اغتصاب",
"أورغازم",
"إيروتيك",
"أير",
"بز",
"بزاز",
"بظر",
"بورن",
"بيضان",
"مص",
"ثدي",
"جماع",
"جنس",
"حلمة",
"خلاعة",
"خنثي",
"خول",
"دعارة",
"زب",
"سحاق",
"سحاقية",
"سكس",
"سيكس",
"شاذ",
"شبق",
"شرج",
"شرموطة",
"شهواني",
"شهوة",
"طيز",
"عادة السرية",
"عاهرة",
"عرص",
"فاسقة",
"فرج",
"قحبة",
"قضيب",
"كس",
"لحس",
"لعق",
"لواط",
"لوطي",
"مؤخرة",
"متناك",
"متناكة",
"مومس",
"مثير",
"مص",
"مضاجعة",
"مفلقسة",
"مني",
"مهتاج",
"نشوة",
"نكاح",
"نيك",
],
"bn": english_flagged_words
+ [
"আঙ্গুলি করা",
"আচোদা",
"খানকি",
"খানকি মাগি",
"গান্ড মারানো",
"গুদ মারানি",
"চুচুক",
"চোদ",
"চোদনা",
"চোদা",
"চোদা বোন",
"চোদাচুদি",
"জারজ",
"নাঙ্গ",
"নেংটা",
"পর্ণহাব",
"পর্ন",
"পর্নস্টার",
"পর্নোগ্রাফি",
"পোঁদ",
"পোঁদ মারানি",
"পোদ মারানি",
"বাঁড়া",
"বানচোদ",
"বেশ্যা",
"বেশ্যার ছেলে",
"বোকাচোদা",
"ভগ",
"মা চোদা",
"মাগী",
"মাদারচোদ",
"মুখে নিবি",
"মোরগ",
"রেন্ডি",
"শিশ্ন",
"স্তন",
"স্তনবৃন্ত",
"হস্তমৈথুন",
],
"ca": english_flagged_words
+ [
"cagarro",
"cagarros",
"cipote",
"cipotes",
"collons",
"colló",
"consolador",
"consoladors",
"cony",
"conys",
"corre's",
"corre't",
"corregut",
"cunillingus",
"córrer-se",
"escorreguda",
"escorregudes",
"escorregut",
"escrot",
"escrots",
"escórre's",
"escórre't",
"escórrer-se",
"mamada",
"mamadera",
"mamaderes",
"mamades",
"masturba",
"masturbacions",
"masturbació",
"masturbant",
"masturbar",
"masturbar-se",
"masturbat",
"masturbats",
"masturbes",
"orgasme",
"orgasmes",
"ou",
"ous",
"palla",
"palles",
"pornografia",
"semen",
"semens",
"verga",
"vergues",
"xxx",
],
"en": english_flagged_words,
"es": english_flagged_words
+ [
"chupar el coño",
"chupar la concha",
"chupar la polla",
"chupar la verga",
"comer el coño",
"comer la concha",
"comer la polla",
"comer la verga",
"coprofagía",
"correrse",
"cunillingus",
"fagging",
"felación",
"felching",
"follada",
"follador de culo",
"folladores",
"fudge packer",
"hacer una paja",
"hacerse una paja",
"hore",
"kock",
"macizorra",
"madre folladora",
"mamada",
"perro follador",
"pisser",
"pornografía",
"sado",
"sadomasoquismo",
"sadomasoquista",
"sexo anal",
"skank",
"smegma",
"x clasificado",
],
"eu": english_flagged_words + [],
"fr": english_flagged_words
+ [
"baiseurs",
"baiseur",
"baiseuse",
"baiseuses",
"branlette",
"branlettes",
"branleuse",
"branleuses",
"cunillingus",
"cunilingus",
"enculée",
"enculées",
"enculation",
"enculations",
"enculement",
"enculements",
"fellation",
"fellations",
"porno",
"pornos",
"pornographie",
"pornographique",
"pornographiques",
"salope",
"salopes",
"suceuse",
"suceuses",
"xxx",
],
"hi": english_flagged_words
+ [
"अंडकोश की थैली",
"एक्स रेटेड",
"ओगाज़्म",
"कामोद्दीपक चित्र",
"कालीन का चूरा",
"कून",
"कॉक",
"गेंद का थैला",
"चाकलेट का रंग",
"चूची",
"चूतड़",
"झटका बंद",
"ठगना पैकर",
"डिल्डो",
"नितंब",
"पिछाड़ी",
"पीड़न कामुक",
"पॉर्न",
"फटना",
"फूहड़",
"बट",
"बहुत मदहोश",
"बेल अंत",
"भगवान-शापित",
"भगशेफ",
"माँ कमीने",
"मुखमैथुन",
"मुर्गा चूसने वाला",
"रक्तरंजित",
"लेबिया",
"वहशी",
"वहशीता",
"वैंग",
"शिश्नमल",
"संभोग सुख",
"सह शॉट",
"सींग का बना हुआ",
"होर",
"घपा घप",
"चुदाई",
"चुदक्कड़",
],
"id": english_flagged_words
+ [
"bokep",
"coli",
"colmek",
"grepe",
"horni",
"janda",
"jembut",
"jilat memek",
"jilmek",
"kontol",
"masturbasi",
"memek",
"ngentot",
"ngewe",
"peju",
"pepek",
"pornografi",
"sange",
"sepong",
"tusbol",
],
"pt": english_flagged_words
+ [
"balalao",
"bate uma",
"beijo grego",
"boceta",
"boquete",
"buceta",
"caralho",
"chochota",
"coito",
"cona",
"consolo",
"corno",
"cu",
"dar a bunda",
"dar o rabo",
"dildo",
"dildos",
"esporrar",
"estrovenga",
"felação",
"filho da puta",
"filhos da puta",
"gozada",
"jeba",
"perereca",
"pica",
"piru",
"porno",
"pornografia",
"pornô",
"porra",
"prostituta",
"pube",
"punheta",
"punheteiro",
"putaria",
"queca",
"sexo",
"siririca",
"tesão",
"trepada",
"verga",
"vibrador",
"xana",
"xochota",
"xoxota",
],
"ur": english_flagged_words
+ [
"انگلی کرنا",
"ایکس ریٹیڈ",
"بلو جاب",
"بٹ",
"جھٹکا بند",
"دلڈو",
"رنڈی",
"سلٹ",
"سکلیرا",
"سیڈسٹ",
"سیکس بم",
"شہوانی",
"شہوت انگیز",
"فحش نگاری",
"لن",
"لنڈ",
"لنڈ چوسنے والا",
"لوڑہ",
"ماں کمینے",
"مشت زنی",
"ممے",
"مٹھ",
"مٹھی",
"ویشیا",
"پورن",
"پھدی",
"پیگنگ",
"چدائ",
"چدک",
"چوت",
"چودنا",
"چوچی",
"کسبی",
"کسنگ",
"گانڈ",
"گدا",
"ہینڈ جاب",
],
"vi": english_flagged_words
+ [
"cặc",
"lồn",
"địt",
"buồi",
"đụ",
"nứng",
"phò",
"bú lol",
"bú lon",
"bú cac",
"bú cak",
"bú cax",
"bú buoi",
"bú mút",
"húp lol",
"húp lon",
"húp sò",
"nung lol",
"nung lon",
"nug lon",
"nung cac",
"nung buoi",
"thèm dit",
"thèm lon",
"thèm lol",
"thèm buoi",
"thèm dit",
"thèm chich",
"thèm chịch",
"thèm cax",
"thèm cak",
"móc lol",
"móc lon",
"buscu",
"lol non",
"bướm non",
"vú to",
"vú bự",
"lon to",
"phim sex",
"phim xex",
"phim xxx",
],
"zh": english_flagged_words
+ [
"三级片",
"乳交",
"卖B",
"卖比",
"卖淫",
"口交",
"姣西",
"打炮",
"打飞机",
"抽插",
"招妓",
"插你",
"插死你",
"淫水",
"肉壁",
"肉棍子",
"肉缝",
"肏",
"肛交",
],
}
# %% ../nbs/03_helpers.ipynb 8
# From Bigscience https://github.com/bigscience-workshop/data-preparation/blob/main/preprocessing/training/01a_catalogue_cleaning_and_filtering/clean_helpers/stopwords.py
stopword_ratios = {
"ar": 0.07,
"bn": 0.002,
"ca": 0.25,
"en": 0.3,
"es": 0.4,
"eu": 0.05,
"fr": 0.27,
"hi": 0.01,
"id": 0.15,
"pt": 0.2,
"ur": 0.01,
"vi": 0.08,
"zh": 0.1691,
}
stopwords = {
"ar": [
"آنذاك",
"أبداً",
"أثناء",
"أسفل",
"أعلى",
"أغلب",
"أكثر",
"ألا",
"ألم",
"أم",
"أمام",
"أمس",
"أن",
"أنا",
"أنت",
"أنتم",
"أنتما",
"أنتن",
"أو",
"أولئك",
"أي",
"أيان",
"أياً",
"أية",
"أيضاً",
"أين",
"أينما",
"إبان",
"إثر",
"إثر ذلك",
"إذا",
"إزاء",
"إلا",
"إلا أن",
"إلى",
"إما",
"إن",
"إنما",
"إياك",
"إياكم",
"إياكما",
"إياكن",
"إيانا",
"إياه",
"إياها",
"إياهم",
"إياهما",
"إياهن",
"إياي",
"الآن",
"البتة",
"التي",
"الذي",
"الذين",
"اللائي",
"اللات",
"اللاتي",
"اللتان",
"اللتين",
"اللذان",
"اللذين",
"اللهم",
"اللوات",
"اللواتي",
"الليلة",
"اليوم",
"اي",
"بألا",
"بأن",
"بئس",
"بئست",
"باتجاه",
"بالأخص",
"بالأمس",
"بالتالي",
"بالذات",
"بالرغم من",
"بالضبط",
"بالطبع",
"بالفعل",
"بالقرب",
"بالكامل",
"بالنسبة ل",
"بتاتاً",
"بجانب",
"بحسب",
"بحوالي",
"بحيث",
"بذلك",
"برغم",
"برمته",
"بشتى",
"بصرف النظر عن",
"بضع",
"بضعة",
"بعد",
"بعدما",
"بعض",
"بغض الطرف عن",
"بغض النظر عن",
"بغية",
"بـ",
"بقرب",
"بل",
"بلا",
"بلى",
"بم",
"بما",
"بما أن",
"بمفرده",
"بمقتضى",
"بمنأى عن",
"بموجب",
"بين",
"بينما",
"تاماً",
"تباعاً",
"تبعاً",
"تجاه",
"تحت",
"تحديداً",
"تحسباً",
"تقريباً",
"تلك",
"تلو",
"تماماً",
"تمشياً",
"ثم",
"ثمة",
"جانب",
"جاهداً",
"جداً",
"جدياً",
"جراء",
"جل",
"جميع",
"جميعاً",
"جنوب",
"جنوبي",
"حتماً",
"حتمياً",
"حتى",
"حسب",
"حسبما",
"حوالي",
"حول",
"حيال",
"حيث",
"حيث أن",
"حيثما",
"حين",
"حينئذ",
"حيناً",
"حينذاك",
"حينما",
"خارج",
"ختاماً",
"خلال",
"خلف",
"دائماً",
"داخل",
"دوماً",
"دون",
"دونما",
"ذاك",
"ذلك",
"رغم",
"رغم أن",
"ريثما",
"زهاء",
"ساعة",
"سنة",
"سوف",
"سوى",
"سوياً",
"شتى",
"شرق",
"شريطة",
"شكراً",
"شمال",
"صبيحة",
"صوب",
"ضد",
"طالما",
"طبقاً",
"طواعية",
"طوعاً",
"طيلة",
"عادة",
"عام",
"عامة",
"عبر",
"عدا",
"عدة",
"عسى",
"عشية",
"عقب",
"علاوة على",
"علاوة على ذلك",
"على",
"على الرغم من",
"على حد قول",
"على غرار",
"على هذا",
"عما",
"عمن",
"عموماً",
"عن",
"عند",
"عندئذ",
"عندما",
"عنوة",
"عوضا عن",
"غالب",
"غالباً",
"غداة",
"غداً",
"غرب",
"غير",
"غير أن",
"ـك",
"ـكم",
"ـكما",
"ـكن",
"ـنا",
"ـه",
"ـها",
"ـهم",
"ـهما",
"ـهن",
"ـي",
"فجأة",
"فجر",
"فحسب",
"فصاعداً",
"فضلاً",
"فـ",
"فور",
"فوراً",
"فوق",
"في",
"في تلك الأثناء",
"في غضون ذلك",
"في هذه الأثناء",
"فيما",
"فيما يلي",
"قبالة",
"قبل",
"قبيل",
"قد",
"قدماً",
"قرابة",
"قرب",
"قسراً",
"قطعياً",
"قليلاً",
"كأن",
"كالمعتاد",
"كثيراً",
"كذا",
"كذلك",
"كـ",
"كل",
"كلا",
"كلتا",
"كلما",
"كم",
"كما",
"كما أن",
"كي",
"كيف",
"لأن",
"لئلا",
"لا",
"لا بأس أن",
"لا بد",
"لا سيما",
"لا لبس أن",
"لا مانع",
"لابد",
"لاحقاً",
"لاسيما",
"لحظة",
"لحوالي",
"لدى",
"لذا",
"لذلك",
"لعل",
"لـ",
"لقد",
"لكن",
"لكي",
"للتو",
"لم",
"لما",
"لماذا",
"لن",
"لو",
"لولا",
"ليت",
"ليلة",
"مؤخراً",
"مؤقتاً",
"ما",
"ماذا",
"مباشرة",
"متى",
"مثل",
"مثلاً",
"مثلما",
"مجاناً",
"مجدداً",
"مجرد",
"محض",
"مراراً",
"مساء",
"مطلقاً",
"مع",
"مع أن",
"مع ذلك",
"معاً",
"معظم",
"مما",
"مما زاد الطين بلة",
"مما يزيد الطين بلة",
"ممن",
"من",
"من الجدير بالذكر أن",
"من المؤسف",
"من المؤكد",
"من المؤمل",
"من المرجح",
"من المفترض",
"من الممكن",
"من ثم",
"من جهة أخرى",
"من غير المرجح",
"من غير الممكن",
"من ناحية أخرى",
"منذ",
"مهما",
"نادراً",
"ناهيك عن",
"نحن",
"نحو",
"نسبياً",
"نعم",
"نعمت",
"نفس",
"نهار",
"نهاراً",
"هؤلاء",
"هاتان",
"هاتين",
"هدراً",
"هذا",
"هذان",
"هذه",
"هذين",
"هكذا",
"هكذا دواليك",
"هل",
"هم",
"هما",
"هن",
"هنا",
"هناك",
"هنالك",
"هو",
"هي",
"و",
"وراء",
"وسط",
"وفق",
"وفقاً",
"وقت",
"وقتما",
"يا",
"يذكر أن",
"يوم",
"يوماً",
"يومياً",
],
"bn": [
"অনেক",
"অনেক ",
"অন্য ",
"অন্যথায়",
"আমরা ",
"আমার ",
"আমি",
"আর জন্য ",
"আর, ও, এবং ",
"আরও সাথে , আরো সঙ্গে ",
"উদাহরণ স্বরূপ",
"উপর",
"এ ",
"এ, এটা, এইটা ",
"এখানে , এইখানে ",
"ও ,ওটা ,ওইটা",
"ওখানে, সেখানে ",
"ওদের মধ্যে ",
"কখন ",
"কখনও কখনও",
"কম, অল্প ",
"কারণ ",
"কি",
"কিছু ",
"কিন্তু ",
"কে ",
"কেউ",
"কেমন ",
"কোথায়",
"কোনটা ",
"ডান",
"তাই, সুতরাং",
"তার, তাদের, ওর, ওদের ",
"তারপর",
"তারা ",
"তুমি, আপনি ",
"তোমরা , আপনারা ",
"তোমার, তোর ",
"দিকে",
"না ",
"নিচে",
"পরিবর্তে , বরং ",
"পর্যন্ত",
"বাইরে",
"বাম",
"ভিতর",
"ভিতরে",
"মত",
"যতক্ষণ না",
"যথেষ্ট",
"যদি ",
"যাহার",
"যাহোক",
"সব, সবাই ",
"সবাই",
"সর্বাধিক",
"সামান্য",
"সে রকমই",
"সে, ও",
],
"ca": [
"-ho",
"-la",
"-lo",
"-ne",
"-se",
"a",
"abans",
"això",
"al",
"algun",
"alguna",
"algunes",
"alguns",
"algú",
"allò",
"als",
"altra",
"altre",
"altres",
"amb",
"aqueix",
"aqueixa",
"aqueixes",
"aqueixos",
"aquell",
"aquella",
"aquelles",
"aquells",
"aquest",
"aquesta",
"aquestes",
"aquestos",
"aquests",
"bastant",
"bastants",
"bé",
"cada",
"cadascun",
"cadascuna",
"cadascú",
"cap",
"cert",
"certa",
"certes",
"certs",
"com",
"con",
"contra",
"d",
"d'",
"da",
"damunt",
"darrere",
"davant",
"de",
"del",
"dels",
"des",
"dient",
"diferent",
"diferents",
"dins",
"dintre",
"dir",
"divers",
"diverses",
"diversos",
"durant",
"eixa",
"eixe",
"eixes",
"eixos",
"el",
"ell",
"ella",
"elles",
"ells",
"els",
"em",
"emperò",
"en",
"endavant",
"enfront",
"ens",
"entre",
"envers",
"era",
"eren",
"es",
"estan",
"estant",
"estar",
"estaran",
"estarem",
"estaria",
"estarien",
"estarà",
"estat",
"estava",
"estaven",
"este",
"estem",
"estes",
"esteu",
"estic",
"estiguem",
"estiguessin",
"estigui",
"estiguin",
"estigués",
"estos",
"està",
"et",
"ets",
"excepte",
"extra",
"fa",
"faci",
"facin",
"facis",
"faig",
"fan",
"faran",
"farem",
"fareu",
"faria",
"farien",
"faries",
"faràs",
"faràs",
"faré",
"faríem",
"faríeu",
"fas",
"feia",
"feien",
"feies",
"fem",
"fent",
"fer",
"fes",
"fessin",
"fessis",
"fet",
"feu",
"fins",
"foren",
"fos",
"fossin",
"fou",
"front",
"fèiem",
"fèieu",
"féssiu",
"gaire",
"gaires",
"gràcies",
"ha",
"hagi",
"hagin",
"haguem",
"haguessin",
"haguessis",
"hagut",
"hagués",
"haguéssim",
"haguéssin",
"haguéssiu",
"han",
"has",
"hauran",
"haurem",
"haureu",
"hauria",
"haurien",
"hauries",
"haurà",
"hauràs",
"hauré",
"hauríem",
"hauríeu",
"havent",
"haver",
"havia",
"havien",
"havies",
"havíem",
"havíeu",
"he",
"hem",
"heu",
"hi",
"ho",
"hom",
"hàgim",
"i",
"in",
"jo",
"l",
"l",
"l'",
"la",
"las",
"les",
"li",
"llur",
"llurs",
"lo",
"los",
"ls",
"m",
"m",
"m'",
"malgrat",
"mancant",
"massa",
"mateix",
"mateixa",
"mateixes",
"mateixos",
"me",
"mentre",
"menys",
"mes",
"meu",
"meus",
"meva",
"meves",
"mi",
"mitjançant",
"molt",
"molta",
"moltes",
"molts",
"moltíssim",
"moltíssima",
"moltíssimes",
"moltíssims",
"n",
"n'",
"ne",
"ni",
"ningun",
"ninguna",
"ningunes",
"ninguns",
"ningú",
"no",
"nombroses",
"nombrós",
"nos",
"nosaltres",
"nostra",
"nostre",
"nostres",
"ns",
"o",
"on",
"os",
"pel",
"pels",
"per",
"perqu",
"perquè",
"però",
"poc",
"poca",
"pocs",
"poques",
"prou",
"qual",
"quals",
"qualsevol",
"quan",
"quant",
"quantes",
"quants",
"que",
"quelcom",
"qui",
"quin",
"quina",
"quines",
"quins",
"què",
"rere",
"respecte",
"s",
"s",
"s'",
"sa",
"sabent",
"salvant",
"se",
"segons",
"sens",
"sense",
"sent",
"ser",
"seran",
"serem",
"seria",
"serien",
"serà",
"seré",
"seríem",
"ses",
"seu",
"seus",
"seva",
"seves",
"si",
"siguem",
"sigui",
"siguin",
"sigut",
"sinó",
"sobre",
"som",
"sota",
"su",
"suficient",
"séssim",
"sóc",
"són",
"t",
"t'",
"tal",
"tals",
"tant",
"tanta",
"tantes",
"tants",
"te",
"tenc",
"tendran",
"tendrem",
"tendreu",
"tendria",
"tendrien",
"tendries",
"tendràs",
"tendràs",
"tendré",
"tendríem",
"tendríeu",
"tenen",
"tenia",
"tenien",
"tenies teníem",
"tenim",
"tenir",
"teniu",
"tens",
"teníeu",
"teu",
"teus",
"teva",
"ti",
"tinc",
"tindran",
"tindre",
"tindrem",
"tindreu",
"tindria",
"tindrien",
"tindries",
"tindràs",
"tindràs",
"tindré",
"tindríem",
"tindríeu",
"tingut",
"tot",
"tota",
"total",
"totes",
"tothom",
"tots",
"tu",
"té",
"u",
"ultra",
"un",
"una",
"unes",
"uns",
"us",
"va",
"vagi",
"vagin",
"vaig",
"vam",
"van",
"varen",
"vau",
"vers",
"versus",
"via",
"vora",
"vos",
"vosaltres",
"vostre",
"vostè",
"vostès",
"vàrem",
"y",
"érem",
"és",
],
"en": [
"a",
"a.k.a",
"aboard",
"about",
"above",
"abt",
"accord",
"according",
"across",
"after",
"against",
"ago",
"aground",
"ahead",
"aka",
"ala",
"albeit",
"all",
"along",
"alongside",
"although",
"am",
"amid",
"amidst",
"among",
"amongst",
"amoung",
"an",
"and",
"and/or",
"another",
"any",
"any1",
"anybody",
"anyone",
"anything",
"are",
"around",
"as",
"aside",
"astride",
"at",
"atop",
"away",
"b",
"b/c",
"b/t",
"back",
"base",
"based",
"bc",
"be",
"because",
"been",
"before",
"behind",
"being",
"below",
"beneath",
"beside",
"besides",
"between",
"beyond",
"board",
"both",
"btwn",
"but",
"by",
"can",
"cause",
"circa",
"cos",
"could",
"coz",
"cus",
"depend",
"depending",
"despite",
"did",
"do",
"does",
"down",
"due",
"during",
"each",
"either",
"else",
"even",
"ever",
"every",
"everybody",
"everyone",
"everything",
"except",
"for",
"forth",
"from",
"get",
"gets",
"getting",
"give",
"given",
"got",
"had",
"half",
"has",
"hav",
"have",
"having",
"he",
"her",
"hers",
"herself",
"him",
"himself",
"his",
"how",
"however",
"i",
"i'd",
"if",
"in",
"include",
"including",
"inside",
"instead",
"into",
"is",
"it",
"it's",
"its",
"itself",
"lest",
"like",
"made",
"many",
"may",
"me",
"might",
"mine",
"minus",
"most",
"much",
"must",
"my",
"myself",
"nary",
"near",
"nearby",
"neither",
"next",
"nigh",
"no",
"nobody",
"none",
"noone",
"nor",
"not",
"nothing",
"notwithstanding",
"of",
"off",
"on",
"onboard",
"once",
"one",
"ones",
"oneself",
"only",
"onto",
"opposite",
"or",
"other",
"others",
"ought",
"our",
"ours",
"ourselves",
"out",
"outside",
"over",
"overt",
"own",
"past",
"per",
"plus",
"prior",
"quite",
"rather",
"re",
"regard",
"regarding",
"regardless",
"round",
"s/he",
"save",
"self",
"shall",
"she",
"should",
"side",
"since",
"so",
"some",
"somebody",
"someone",
"something",
"such",
"sure",
"teh",
"than",
"thanks",
"that",
"the",
"their",
"theirs",
"them",
"themselves",
"then",
"there",
"these",
"they",
"they're",
"thier",
"this",
"tho",
"those",
"thou",
"though",
"through",
"throughout",
"thru",
"thy",
"til",
"till",
"to",
"together",
"too",
"toward",
"towards",
"u",
"under",
"underneath",
"unless",
"unlike",
"until",
"unto",
"up",
"upon",
"ur",
"us",
"use",
"versus",
"via",
"vs",
"vs.",
"w/",
"w/o",
"w/out",
"was",
"we",
"were",
"what",
"whatever",
"whatnot",
"when",
"whenever",
"where",
"whereas",
"wherever",
"whether",
"which",
"while",
"whilst",
"whither",
"who",
"who's",
"whoever",
"whom",
"whomever",
"whose",
"why",
"will",
"with",
"within",
"without",
"wo",
"worth",
"would",
"wud",
"y'all",
"ya",
"yet",
"yo",
"you",
"you're",
"your",
"youre",
"yours",
"yourself",
"yourselves",
],
"es": [
"a",
"a fin de que",
"a medida que",
"a menos que",
"a modo de",
"a no ser que",
"a poco que",
"a que",
"abandono",
"acerca",
"acostumbra",
"adónde",
"ahora",
"al igual que",
"al lado de",
"algo",
"alguien",
"alguna",
"algunas",
"alguno",
"algunos",
"algún",
"alrededor",
"ambas",
"ambos",
"ante",
"aparece",
"aparecen",
"apareció",
"aparte",
"apenas",
"aquel",
"aquella",
"aquellas",
"aquello",
"aquellos",
"aquesa",
"aquesas",
"aquesos",
"aquesta",
"aquestas",
"aquesto",
"aquestos",
"aquél",
"aquélla",
"aquéllas",
"aquéllos",
"arrepentir",
"arrepentiréis",
"así",
"así como",
"así que",
"atlético",
"aun",
"aunque",
"aún",
"bajo",
"bastante",
"bastantes",
"bien",
"cada",
"casi",
"cerca",
"chance",
"cierta",
"ciertas",
"cierto",
"ciertos",
"comenzado",
"comenzó",
"comienzan",
"como",
"como quiera que",
"como si",
"con",
"con tal de",
"con tal que",
"conforme",
"conmigo",
"conque",
"considera",
"consideradas",
"consideran",
"consideró",
"consigo",
"contendrán",
"contigo",
"continuaba",
"continuar",
"continuaron",
"continuase",
"continuó",
"continúa",
"contra",
"corresponden",
"corresponder",
"cual",
"cual si",
"cuales",
"cualesquier",
"cualesquiera",
"cualquier",
"cualquiera",
"cuan",
"cuando",
"cuanta",
"cuantas",
"cuanto",
"cuanto quiera que",
"cuantos",
"cuya",
"cuyas",
"cuyo",
"cuyos",
"cuàles",
"cuál",
"cuáles",
"cuán",
"cuándo",
"cuánta",
"cuántas",
"cuánto",
"cuántos",
"cómo",
"da",
"dado que",
"dar",
"de",
"de manera que",
"de modo que",
"deba",
"debajo",
"deban",
"debas",
"debe",
"debemos",
"deben",
"deber",
"deberá",
"deberán",
"debería",
"deberíamos",
"deberían",
"debes",
"debido",
"debiera",
"debieron",
"debimos",
"debió",
"debo",
"debía",
"debíamos",
"debían",
"declaraba",
"declarada",
"declarado",
"declarase",
"declaro",
"declaró",
"dejaban",
"dejado",
"dejan",
"dejará",
"del",
"delante",
"demasiada",
"demasiadas",
"demasiado",
"demasiados",
"demás",
"den",
"dentro",
"dentro_de",
"des",
"desde",
"después",
"detrás",
"di",
"dicha",
"dichas",
"dicho",
"dichos",
"diferente",
"diferentes",
"distintas",
"distinto",
"distintos",
"diversas",
"diverso",
"diversos",
"don",
"donde",
"dos",
"durante",
"dónde",
"echar",
"el",
"el que",
"ella",
"ellas",
"ello",
"ellos",
"en",
"en cambio",
"en caso de",
"en la medida en que",
"en tanto que",
"encima",
"enfrente",
"entonces",
"entre",
"era",
"eramos",
"eran",
"eras",
"eres",
"ergo",
"es",
"esa",
"esas",
"escasa",
"escasas",
"escaso",
"escasos",
"escrito",
"ese",
"eso",
"eso que",
"esos",
"esotra",
"esotro",
"esta",
"estaba",
"estabais",
"estabamos",
"estaban",
"estabas",
"estado",
"estamos",
"estan",
"estando",
"estar",
"estaremos",
"estará",
"estarán",
"estaré",
"estaría",
"estaríamos",
"estarían",
"estarías",
"estas",
"este",
"estemos",
"esto",
"estos",
"estotra",
"estotro",
"estoy",
"estuve",
"estuviera",
"estuvieran",
"estuvieron",
"estuviese",
"estuviesen",
"estuvimos",
"estuvo",
"está",
"estábamos",
"estáis",
"están",
"estás",
"esté",
"estén",
"ex",
"excepto",
"frente",
"fue",
"fuera",
"fueran",
"fuere",
"fueron",
"fuese",
"fuesen",
"fui",
"fuimos",
"gracias",
"gracias_a",
"habeis",
"haber",
"haberle",
"haberse",
"habido",
"habiendo",
"habiéndo",
"habremos",
"habrá",
"habrán",
"habrás",
"habré",
"habría",
"habríamos",
"habrían",
"habéis",
"había",
"habíamos",
"habían",
"habías",
"hace",
"hacer",
"hacia",
"hacía",
"halla",
"han",
"has",
"hasta",
"hasta que",
"hay",
"haya",
"hayamos",
"hayan",
"hayas",
"he",
"hecho",
"hemos",
"hola",
"hubiera",
"hubieran",
"hubieron",
"hubiese",
"hubiesen",
"hubiéramos",
"hubo",
"iba",
"iban",
"ido",
"incluso",
"ir",
"irá",
"irán",
"iré",
"iría",
"junto a",
"la",
"las",
"le",
"lejos",
"les",
"lo",
"los",
"luego",
"mal que",
"mas",
"me",
"mediante",
"menos",
"mes",
"mi",
"mientras",
"mientras que",
"mis",
"misma",
"mismas",
"mismo",
"mismos",
"mismísimo",
"morir",
"moriría",
"mostrado",
"mostraron",
"mucha",
"muchas",
"muchisimas",
"muchisimio",
"muchisimo",
"mucho",
"muchos",
"muchísima",
"muchísimas",
"muchísimo",
"muchísimos",
"más",
"más bien",
"mí",
"mía",
"mías",
"mío",
"míos",
"nada",
"nadie",
"negar",
"ni",
"ni que",
"ningun",
"ninguna",
"ningunas",
"ninguno",
"ningunos",
"ningún",
"no",
"no obstante",
"noche",
"nombrado",
"nombró",
"nos",
"nosotros",
"nuestra",
"nuestras",
"nuestro",
"nuestros",
"o",
"os",
"otra",
"otras",
"otro",
"otros",
"pa",
"para",
"para que",
"parezca",
"partir",
"pasar",
"pero",
"po",
"poca",
"pocas",
"poco",
"pocos",
"podamos",
"podeis",
"podemos",
"poder",
"podes",
"podido",
"podras",
"podre",
"podremos",
"podriaís",
"podrá",
"podrán",
"podrás",
"podré",
"podréis",
"podría",
"podríamos",
"podrían",
"podéis",
"podía",
"podíamos",
"podían",
"poner",
"poquito",
"por",
"por el contrario",
"por ende",
"por eso",
"por lo que",
"por mucho que",
"por más que",
"por no hablar de",
"por si",
"porque",
"pos",
"post",
"pre",
"pro",
"propia",
"propias",
"propio",
"propios",
"pude",
"pudiendo",
"pudiera",
"pudieran",
"pudieras",
"pudieron",
"pudiese",
"pudiesen",
"pudimos",
"pudo",
"pueda",
"puedan",
"puedas",
"puede",
"pueden",
"puedes",
"puedo",
"pues",
"puesto",
"puesto que",
"que",
"queda",
"quedaba",
"quedan",
"quedó",
"queremos",
"querer",
"queriendo",
"quien",
"quienes",
"quienesquiera",
"quienquier",
"quienquiera",
"quiera",
"quiere",
"quisiera",
"quién",
"quiénes",
"qué",
"re",
"resulta",
"resultado",
"resultaría",
"resulte",
"sabe",
"saber",
"sabiendo",
"salen",
"salir",
"salió",
"salvo",
"se",
"sea",
"seamos",
"sean",
"seas",
"seguir",
"seguirá",
"seguía",
"según",
"semejante",
"semejantes",
"semi",
"sendas",
"sendo",
"sendos",
"ser",
"será",
"serán",
"serás",
"seré",
"seréis",
"sería",
"serían",
"serías",
"si",
"si bien",
"si y solo si",
"sido",
"siempre que",
"siendo",
"siente",
"siento",
"siga",
"sigamos",
"sigue",
"sin",
"sino",
"siquiera",
"sobre",
"sobrer",
"sobrir",
"soler",
"solían",
"somos",
"son",
"soy",
"sub",
"suele",
"suelen",
"suelo",
"super",
"supo",
"sur",
"sus",
"suya",
"suyas",
"suyo",
"suyos",
"sé",
"sí",
"tal",
"tales",
"tanta",
"tantas",
"tanto",
"tantos",
"tantísima",
"tantísimas",
"tantísimos",
"te",
"tendremos",
"tendrian",
"tendrá",
"tendrán",
"tendría",
"tendrían",
"tenemos",
"tener",
"tenga",
"tengan",
"tengo",
"tenia",
"tenido",
"teniendo",
"tenéis",
"tenía",
"teníamos",
"tenían",
"terminas",
"ti",
"tiene",
"tienen",
"tienes",
"toda",
"todas",
"todavía",
"todes",
"todo",
"todos",
"trabajado",
"trans",
"tras",
"tu",
"tus",
"tuve",
"tuviera",
"tuvieron",
"tuviese",
"tuvo",
"tuya",
"tuyas",
"tuyo",
"tuyos",
"tú",
"u",
"un",
"una",
"unas",
"une",
"unir",
"uno",
"unos",
"usted",
"ustedes",
"va",
"vamos",
"van",
"varias",
"varios",
"varía",
"vas",
"vaya",
"vayan",
"venir",
"venía",
"ver",
"vice",
"vieron",
"vino",
"vis a vis",
"visto que",
"volver",
"volverá",
"volveríamos",
"volvió",
"vos",
"vosotras",
"vosotros",
"voy",
"vuelva",
"vuelvan",
"vuelve",
"vuelven",
"vuestra",
"vuestras",
"vuestro",
"vuestros",
"vía",
"y",
"ya",
"ya que",
"yo",
"ámbos",
"él",
"éramos",
"ésa",
"ésas",
"ése",
"ésos",
"ésta",
"éstas",
"éste",
"ésto",
"éstos",
"íbamos",
"ó",
"ú",
"última",
"últimas",
"último",
"últimos",
"\ufeffdesde",
"\ufeffel",
"\ufeffen",
"\ufeffla",
"\ufefflas",
],
"eu": [
"*edin",
"*edun",
"*ezan",
"aitzitik",
"ala",
"alabaina",
"aldiz",
"alegia",
"alta",
"anitz",
"anitzek",
"anitzeko",
"anitzez",
"antzera",
"arabera",
"ari",
"ari_izan",
"ariko",
"arren",
"asko",
"askoan",
"askok",
"askoko",
"askorekin",
"askoren",
"askorengan",
"askorentzat",
"askori",
"askorik",
"askotako",
"askotan",
"askotariko",
"askotatik",
"askotaz",
"askotxo",
"askoz",
"at",
"aunitz",
"aurka",
"aurkako",
"aurretik",
"azpian",
"azpitik",
"ba",
"bada",
"badago",
"badezake",
"badidazu",
"badiezu",
"badio",
"badiogu",
"badiote",
"badiougu",
"badiozu",
"badira",
"badirela",
"baditu",
"baditugu",
"badituzte",
"badituzu",
"badu",
"badugu",
"badugun",
"badut",
"badute",
"baduzu",
"bagara",
"bagatzaizkio",
"bagenu",
"baginen",
"bai",
"baietz",
"baikaituzte",
"bailegoen",
"bailituen",
"bailitzake",
"bailitzateke",
"baina",
"bainan",
"bainintzen",
"bainizkion",
"baino",
"baita",
"baitabil",
"baitaiteke",
"baitan",
"baitaude",
"baitiete",
"baitigu",
"baitio",
"baitiote",
"baitira",
"baititu",
"baititugu",
"baitituzte",
"baitituzu",
"baititzaket",
"baitizkio",
"baitu",
"baitugu",
"baitute",
"baituzu",
"baitzaio",
"baitzaizkio",
"baitzara",
"baitzegoen",
"baitzen",
"baitzeuden",
"baitzien",
"baitzion",
"baitzioten",
"baitziren",
"baitzitekeen",
"baitzituen",
"baitzitzaion",
"baitzuen",
"baitzuten",
"baizik",
"baizituen",
"baldin",
"balego",
"balira",
"baliteke",
"balitu",
"balituzkete",
"balitz",
"balitzait",
"balu",
"balute",
"banintz",
"banitu",
"banu",
"barik",
"barru",
"bat",
"batera",
"batera\x97",
"batere",
"batzu",
"batzuei",
"batzuek",
"batzuekin",
"batzuen",
"batzuengatik",
"batzuentzat",
"batzuetako",
"batzuetakoak",
"batzuetan",
"batzuetara",
"batzuetatik",
"batzuez",
"batzuk",
"batzutako",
"batzutan",
"bazaigu",
"bazaizu",
"bazara",
"bazen",
"bazina",
"baziren",
"bazituen",
"bazituzten",
"bazuen",
"bazuten",
"bederen",
"behintzat",
"bera",
"beragatik",
"beraiei",
"beraiek",
"beraiekin",
"beraien",
"beraietaz",
"berak",
"berarekin",
"beraren",
"berarengan",
"berarengana",
"berarengandik",
"berarengatik",
"berarentzat",
"berari",
"berauek",
"berauen",
"berauetan",
"beraz",
"berbera",
"berberagatik",
"berberak",
"berberarekin",
"berberaren",
"berberera",
"bere",
"berea",
"bereak",
"berean",
"berek",
"bereko",
"berekoa",
"berekoak",
"beren",
"beretan",
"beretik",
"beretzat",
"berriz",
"bertze",
"bertzeekin",
"bertzela",
"bestalde",
"bestaldean",
"beste",
"bestea",
"besteak",
"bestean",
"bestearekiko",
"bestearekin",
"bestearen",
"bestearengandik",
"besteari",
"besteaz",
"besteei",
"besteen",
"besteengandik",
"besteetan",
"besteko",
"bestekoa",
"bestela",
"bestera",
"besterantz",
"besterik",
"bestetan",
"bestetik",
"bezala",
"bezalako",
"bezalakoa",
"bezalakoen",
"bidez",
"bitartean",
"bitarteko",
"bitarterako",
"bitartez",
"da",
"dabil",
"dabiltza",
"dadila",
"dadin",
"dago",
"dagoela",
"dagoelako",
"dagoen",
"dagoena",
"dagoenaren",
"dagoenean",
"dagoenez",
"daiteekenaren",
"daiteke",
"daitekeela",
"daitekeen",
"daitekeena",
"daitekeenaren",
"daitekeenez",
"daiteken",
"daitezela",
"daitezen",
"daitezke",
"daitezkeelako",
"daitezkeelarik",
"daitezkeen",
"daitezkeenak",
"daitezkela",
"dakizuke",
"danok",
"daude",
"daudela",
"daudelako",
"dauden",
"daudenak",
"daudenek",
"daudenen",
"daudenik",
"dautzuet",
"dela",
"delako",
"delarik",
"den",
"dena",
"denak",
"denaren",
"denarentzat",
"denari",
"denean",
"denek",
"denen",
"denera",
"denerako",
"denetan",
"denetarik",
"denetik",
"denez",
"denik",
"denok",
"denon",
"denona",
"denontzat",
"deus",
"dexente",
"dezadan",
"dezagun",
"dezake",
"dezakedala",
"dezakedan",
"dezakedanean",
"dezakeela",
"dezakeen",
"dezakeena",
"dezakegu",
"dezakegula",
"dezakegun",
"dezakela",
"dezakelako",
"dezaket",
"dezakete",
"dezaketela",
"dezaketen",
"dezakezu",
"dezakezuen",
"dezakezuenez",
"dezakezunez",
"dezala",
"dezan",
"dezaten",
"dezente",
"dezenterekin",
"dezentetan",
"diat",
"didala",
"didana",
"didate",
"didazue",
"die",
"diegu",
"diegun",
"diela",
"dien",
"dienak",
"diet",
"diete",
"dietela",
"dietelako",
"dietenean",
"diezaiekete",
"diezaiokeena",
"diezaiokete",
"diezaiola",
"diezaioten",
"diezaizkioke",
"diezazkioke",
"diezazkiokeen",
"digu",
"digun",
"digute",
"digutela",
"diguten",
"digutenean",
"diguzu",
"dik",
"din",
"dinat",
"dio",
"diogu",
"diogulako",
"diogun",
"diola",
"dion",
"diona",
"dionean",
"dionez",
"diot",
"diote",
"diotela",
"dioten",
"diotena",
"diotenak",
"diotenek",
"diozu",
"dira",
"direla",
"direlako",
"direlakoan",
"direlakotz",
"diren",
"direnak",
"direnean",
"direnek",
"direnen",
"direnetan",
"direnez",
"direnik",
"dit",
"ditake",
"ditazke",
"ditin",
"ditu",
"ditudala",
"ditudalako",
"ditudan",
"ditudanean",
"dituela",
"dituelako",
"dituelarik",
"dituen",
"dituena",
"dituenak",
"dituenean",
"ditugu",
"ditugula",
"ditugun",
"ditugunez",
"ditun",
"ditut",
"dituzte",
"dituztela",
"dituztelako",
"dituzten",
"dituztenak",
"dituztenean",
"dituztenek",
"dituztenekin",
"dituztenen",
"dituzu",
"dituzue",
"dituzuen",
"dituzula",
"dituzun",
"dituzunik",
"ditzagun",
"ditzake",
"ditzakeen",
"ditzakegu",
"ditzakegula",
"ditzakete",
"ditzaketela",
"ditzaketelako",
"ditzaketen",
"ditzakezu",
"ditzan",
"dizkidazu",
"dizkie",
"dizkien",
"dizkiet",
"dizkiete",
"dizkigu",
"dizkigula",
"dizkigunak",
"dizkigute",
"dizkio",
"dizkiola",
"dizkion",
"dizkiot",
"dizkiotela",
"dizkit",
"dizkizuet",
"dizkizugu",
"dizu",
"dizuet",
"dizugu",
"dizut",
"dizute",
"du",
"duan",
"dudala",
"dudalarik",
"dudan",
"dudanak",
"dudanarekin",
"dudanean",
"dudanik",
"duela",
"duelako",
"duelakoan",
"duen",
"duena",
"duenak",
"duenaren",
"duenarentzat",
"duenari",
"duenean",
"duenentz",
"duenez",
"duenik",
"dugu",
"dugula",
"dugulako",
"dugun",
"duguna",
"dugunari",
"dugunean",
"dugunez",
"dugunik",
"duk",
"dun",
"dunala",
"dut",
"dute",
"dutela",
"dutelako",
"dutelakoan",
"duten",
"dutena",
"dutenagatik",
"dutenak",
"dutenaren",
"dutenean",
"dutenek",
"duteneko",
"dutenen",
"dutenena",
"dutenenetatik",
"dutenentz",
"dutenetakoa",
"dutenetik",
"dutenez",
"duzu",
"duzue",
"duzuela",
"duzuen",
"duzuenean",
"duzuenez",
"duzula",
"duzun",
"duzunarekin",
"ea",
"edo",
"edonor",
"edota",
"edozein",
"edozeinek",
"edozer",
"edozertarako",
"elgarrekin",
"elgarri",
"elkar",
"elkarrekiko",
"elkarrekin",
"elkarren",
"elkarri",
"ene",
"era",
"ere",
"esker",
"eta",
"eurak",
"eurei",
"eurek",
"eurekin",
"euren",
"eurentzat",
"ez",
"ezan",
"ezazu",
"ezazue",
"ezean",
"ezein",
"ezen",
"ezer",
"ezerekin",
"ezerk",
"ezertarako",
"ezertaz",
"ezertxo",
"ezetz",
"ezik",
"ezta",
"gabe",
"gabeko",
"gainera",
"gainerakoan",
"gainerat",
"gainera\x97",
"gainetik",
"gaitezen",
"gaitezke",
"gaitezkeela",
"gaitu",
"gaituela",
"gaituzte",
"gaituztenak",
"gara",
"garela",
"garelako",
"garen",
"garenez",
"garenok",
"gaude",
"gaudenak",
"gehiago",
"gehiagoan",
"gehiagok",
"gehiagoko",
"gehiagorekin",
"gehiegi",
"gehiegirik",
"gehiegitxo",
"gehien",
"gehiena",
"gehienak",
"gehienek",
"gehienekin",
"gehienentzako",
"gehienentzat",
"gehienetako",
"gehienetan",
"gehienok",
"gehientsu",
"gehientsuen",
"gehitxo",
"gehixeago",
"genbiltzan",
"genezake",
"genien",
"genion",
"genituela",
"genituelako",
"genituen",
"genituzke",
"genituzkeelako",
"genizkion",
"genizuen",
"genizun",
"genuela",
"genuelako",
"genuen",
"genuenean",
"genuenetik",
"genuenez",
"genuke",
"genukeen",
"geratu",
"geratzen",
"geroztik",
"geu",
"geure",
"geuregan",
"geuri",
"ginela",
"ginen",
"ginenean",
"ginenekoa",
"gintezkeela",
"gintuen",
"gintuenagatik",
"gintunan",
"gintuzten",
"gintzaizkion",
"gu",
"guk",
"gure",
"gurean",
"gurekin",
"guretzat",
"guri",
"gutako",
"gutaz",
"guti",
"gutiz",
"gutiz-gehien",
"gutiz-gehienek",
"gutxi",
"gutxiago",
"gutxiagorako",
"gutxiagorekin",
"gutxian",
"gutxien",
"gutxienez",
"gutxik",
"gutxiko",
"gutxira",
"gutxiren",
"gutxitan",
"guzi",
"guziak",
"guziarekin",
"guziekin",
"guzientzat",
"guzti",
"guztia",
"guztiagatik",
"guztiak",
"guztian",
"guztiarekin",
"guztiaren",
"guztiari",
"guztiaz",
"guztiei",
"guztiek",
"guztien",
"guztiengan",
"guztientzako",
"guztientzat",
"guztietako",
"guztietan",
"guztietara",
"guztietatik",
"guztiez",
"guztioi",
"guztiok",
"guztion",
"guztionak",
"guztionen",
"guztiontzat",
"guztira",
"guztitako",
"haatik",
"haiek",
"haiekin",
"haien",
"haiengan",
"haiengandik",
"haietako",
"haietan",
"haietatik",
"hainbat",
"hainbatek",
"hainbaten",
"hainbatez",
"hainbertze",
"hainbeste",
"hainbesterako",
"haiteke",
"haiz",
"halaber",
"halere",
"harekin",
"haren",
"harena",
"harentzat",
"hargatik",
"hari",
"hark",
"hartako",
"hartan",
"hartara",
"hartarako",
"hartatik",
"hau",
"haudala",
"hauei",
"hauek",
"hauekin",
"hauen",
"hauetako",
"hauetan",
"hauetara",
"hauetarako",
"hauetarik",
"hauetatik",
"hauexek",
"hauez",
"hauxe",
"heu",
"heure",
"hhriek",
"hi",
"hik",
"hinduan",
"hintzen",
"hire",
"hiri",
"honegatik",
"honek",
"honekin",
"honen",
"honengatik",
"honentzat",
"honetako",
"honetan",
"honetara",
"honetarako",
"honetatik",
"honetaz",
"honez",
"honi",
"hori",
"horiei",
"horiek",
"horiekin",
"horien",
"horientzat",
"horietako",
"horietakoren",
"horietan",
"horietarako",
"horietariko",
"horietatik",
"horiez",
"horixe",
"horregatik",
"horrek",
"horrekin",
"horren",
"horrenbeste",
"horrenbestez",
"horrengatik",
"horretako",
"horretan",
"horretantxe",
"horretara",
"horretarako",
"horretatik",
"horretaz",
"horrexegatik",
"horrexekin",
"horrexetan",
"horrez",
"horrezaz",
"horri",
"hortaz",
"huan",
"huntan",
"hura",
"huraxe",
"iezaidazu",
"iezaiezu",
"iezaion",
"iezaiozu",
"inor",
"inoren",
"inorentzako",
"inori",
"inork",
"inortaz",
"irian",
"itzazu",
"izaki",
"kontra",
"lezake",
"lezakeen",
"lezakete",
"lezan",
"liekeela",
"liezaiokeen",
"lioke",
"liokeela",
"liokeen",
"lirateke",
"liratekeela",
"liteke",
"litekeela",
"litekeen",
"litekeena",
"litezke",
"lituzkeela",
"lituzkeen",
"lituzkete",
"litzaidake",
"litzaiguke",
"litzateke",
"litzatekeela",
"litzatekeelako",
"litzatekela",
"lizateke",
"luke",
"lukeela",
"lukeelako",
"lukeen",
"lukeena",
"lukete",
"luketen",
"nabil",
"nago",
"nahiko",
"nahikoa",
"nahikorik",
"nahiz",
"naiteke",
"naiz",
"naizela",
"naizen",
"naizenean",
"naizenetan",
"naizenetik",
"naizenez",
"naizenik",
"nau",
"nauen",
"nauenarentzat",
"nauenean",
"nauk",
"naun",
"naute",
"nautela",
"nauzu",
"nauzun",
"nazan",
"nazaten",
"nazazu",
"nazazun",
"nenbilen",
"nengoela",
"nengoen",
"nere",
"neu",
"neuk",
"neure",
"nezake",
"ni",
"nian",
"nien",
"nigan",
"nik",
"ninduen",
"ninduten",
"nintekeela",
"nintzaion",
"nintzateke",
"nintzatekeela",
"nintzela",
"nintzelako",
"nintzen",
"nintzenean",
"nion",
"nire",
"nirea",
"niregan",
"niregana",
"niregatik",
"nirekin",
"niretzako",
"niretzat",
"niri",
"nitaz",
"nituela",
"nituen",
"nituzke",
"nizuke",
"nor",
"norbait",
"norbaitek",
"norbaitekin",
"norbaiten",
"norbaitengana",
"norbaitentzat",
"norbaiti",
"norbera",
"norberak",
"norberaren",
"norbere",
"noren",
"nori",
"nork",
"nornahi",
"nornahik",
"nortzuk",
"nortzuren",
"nuela",
"nuen",
"nuena",
"nuenean",
"nuenetik",
"nuke",
"nukeela",
"omen",
"ondoan",
"ondoko",
"ondora",
"ondoren",
"ondorengo",
"ondotik",
"ordea",
"ordez",
"orduan",
"oro_har",
"orobat",
"orohar",
"orok",
"ororen",
"orori",
"ostean",
"ostera",
"osterantzean",
"pean",
"piskat",
"pixka_bat",
"pixkat",
"pranko",
"ugari",
"ugarik",
"ugarirekin",
"ugariren",
"ugaritan",
"zagok",
"zaidan",
"zaidanaren",
"zaie",
"zaiela",
"zaien",
"zaienez",
"zaigu",
"zaigun",
"zaiguna",
"zaigunean",
"zaik",
"zaio",
"zaiola",
"zaiolako",
"zaion",
"zaiona",
"zait",
"zaitez",
"zaitezen",
"zaitu",
"zaitut",
"zaituzte",
"zaitzakegu",
"zaizkidan",
"zaizkie",
"zaizkiela",
"zaizkien",
"zaizkigu",
"zaizkio",
"zaizkiola",
"zaizkion",
"zaizkit",
"zaizkizu",
"zaizkizue",
"zaizkizun",
"zaizu",
"zaizue",
"zara",
"zarela",
"zarete",
"zatekeela",
"zatekeen",
"zatzait",
"zaude",
"ze",
"zebilen",
"zedin",
"zegoan",
"zegoela",
"zegoelako",
"zegoen",
"zegoenez",
"zegok",
"zehar",
"zein",
"zeina",
"zeinek",
"zeinen",
"zeintzu",
"zeintzuetan",
"zeintzuk",
"zela",
"zelako",
"zelarik",
"zen",
"zena",
"zenak",
"zenarekin",
"zenari",
"zenbait",
"zenbaitek",
"zenbaiten",
"zenbaitetan",
"zenbaiti",
"zenbaitzuk",
"zenbat",
"zenbateraino",
"zenean",
"zenekoa",
"zenetik",
"zenez",
"zeniguten",
"zenigutenez",
"zenik",
"zenituen",
"zenitzakeen",
"zenuela",
"zenuen",
"zenuke",
"zenukete",
"zenutela",
"zenuten",
"zeozer",
"zer",
"zer_edo_zer",
"zerbait",
"zerbaitek",
"zerbaitengatik",
"zerbaitetarako",
"zeren",
"zerendako",
"zeri",
"zerk",
"zertan",
"zertara",
"zertarako",
"zertaz",
"zertxobait",
"zeu",
"zeudela",
"zeudelako",
"zeuden",
"zeudenak",
"zeuk",
"zeure",
"zezakeen",
"zezaken",
"zezaketen",
"zezala",
"zezan",
"zezaten",
"zidan",
"zidatelako",
"zidaten",
"zidatena",
"zidatenak",
"zidatenean",
"ziela",
"zien",
"zienez",
"zietela",
"zietelako",
"zieten",
"ziezaion",
"zigun",
"zigunez",
"ziguten",
"zinan",
"zinen",
"zintudan",
"zintuztela",
"zintuztenean",
"ziola",
"ziolako",
"ziolarik",
"zion",
"ziona",
"zionean",
"zionez",
"zioten",
"ziotenak",
"zirela",
"zirelako",
"zirelakoan",
"zirelarik",
"ziren",
"zirenak",
"zirenean",
"zirenetik",
"zirenez",
"zirenik",
"ziren\x97",
"zirezte",
"zitekeela",
"zitekeen",
"zitekeena",
"zitekeenik",
"zitezen",
"zitezkeela",
"zitezkeelakoan",
"zitezkeen",
"zituela",
"zituelako",
"zituelarik",
"zituen",
"zituenean",
"zituenei",
"zituztela",
"zituztelarik",
"zituzten",
"zituztenak",
"zituztenetik",
"zitzaidakeen",
"zitzaidala",
"zitzaidan",
"zitzaien",
"zitzaigun",
"zitzaiola",
"zitzaion",
"zitzaionagatik",
"zitzaionean",
"zitzaizkidan",
"zitzaizkien",
"zitzaizkienean",
"zitzaizkigun",
"zitzaizkion",
"zitzaizkon",
"zitzaizun",
"zitzakeen",
"zitzaketenak",
"zizioten",
"zizkidaten",
"zizkien",
"zizkienik",
"zizkieten",
"zizkigun",
"zizkiola",
"zizkion",
"zizkiona",
"zizkioten",
"zizkiotenekin",
"zizuen",
"zizun",
"zoin",
"zonbat",
"zu",
"zuei",
"zuek",
"zuela",
"zuelako",
"zuelarik",
"zuen",
"zuena",
"zuenak",
"zuenarentzat",
"zuenean",
"zuenetik",
"zuenez",
"zuenik",
"zuentzako",
"zuetako",
"zuetaz",
"zugandik",
"zuk",
"zukeen",
"zuketen",
"zure",
"zureak",
"zurekin",
"zuretzat",
"zutela",
"zutelako",
"zutelarik",
"zuten",
"zutena",
"zutenean",
"zuteneko",
"zutenetik",
"zutenez",
],
"fr": [
"a",
"afin",
"ai",
"aie",
"aient",
"ainsi",
"ait",
"alias",
"aller",
"allons",
"apres",
"après",
"as",
"au",
"au-delà",
"aucun",
"aucune",
"aucunes",
"aucuns",
"aujourd'",
"auprès",
"auquel",
"aura",
"aurai",
"auraient",
"aurais",
"aurait",
"aurions",
"aurons",
"auront",
"autant",
"autour",
"autre",
"autres",
"autrui",
"auxquelles",
"auxquels",
"avaient",
"avais",
"avait",
"avant",
"avec",
"avez",
"aviez",
"avions",
"avoir",
"avons",
"ayant",
"ayez",
"ayons",
"beaucoup",
"c'est-à-dire",
"c-à-d.",
"ca",
"car",
"ce",
"ceci",
"cela",
"celle",
"celle-ci",
"celles",
"celles-ci",
"celui",
"celui-ci",
"celui-là",
"cent",
"certain",
"certaine",
"certaines",
"certains",
"ces",
"cet",
"cette",
"ceux",
"ceux-ci",
"ceux-là",
"cf.",
"chacun",
"chacune",
"chaque",
"chez",
"ci",
"cinq",
"combien",
"comme",
"comment",
"concernant",
"contre",
"cà",
"d'après",
"d'autres",
"dans",
"de",
"dehors",
"depuis",
"derrière",
"des",
"deux",
"devait",
"devant",
"devez",
"devions",
"devoir",
"devons",
"devra",
"devraient",
"devrait",
"devrions",
"devrons",
"devront",
"doit",
"doivent",
"donc",
"dont",
"du",
"durant",
"dès",
"début",
"dû",
"elle",
"elle-même",
"elles",
"elles-mêmes",
"en",
"entre",
"entres",
"envers",
"environ",
"es",
"est",
"et",
"etaient",
"etant",
"etre",
"eut",
"eux",
"eux-mêmes",
"excepté",
"eût",
"faire",
"fais",
"faisaient",
"faisait",
"faisant",
"fait",
"faite",
"faites",
"fasse",
"fassent",
"fera",
"ferait",
"feront",
"firent",
"fit",
"font",
"furent",
"fussent",
"fut",
"fût",
"für",
"grâce",
"hormis",
"hors",
"i",
"il",
"ils",
"iront",
"je",
"jusque",
"l'on",
"la",
"ladite",
"laquelle",
"le",
"le/lui",
"ledit",
"lequel",
"les",
"lesdites",
"lesquelles",
"lesquels",
"leur",
"leurs",
"lors",
"lorsque",
"lui",
"lui-aussi",
"lui-même",
"là",
"ma",
"maint",
"maintes",
"mais",
"malgré",
"me",
"mes",
"mien",
"moi",
"moi-même",
"moins",
"mon",
"ne",
"ni",
"nonobstant",
"nos",
"notre",
"nous",
"nous-mêmes",
"nul",
"nôtre",
"nôtres",
"on",
"ont",
"onze",
"ou",
"outre",
"où",
"par",
"parce",
"parmi",
"pas",
"pendant",
"personne",
"peu",
"peut",
"peuvent",
"peux",
"plupart",
"plus",
"plusieurs",
"pour",
"pourquoi",
"pourra",
"pourraient",
"pourrait",
"pourrez",
"pourrons",
"pourront",
"pouvait",
"pouvez",
"pouvoir",
"pouvons",
"presque",
"près",
"pu",
"puis",
"puisque",
"puisse",
"puissent",
"puissions",
"qu",
"quand",
"quant",
"quarante",
"quatre",
"que",
"quel",
"quelconque",
"quelle",
"quelles",
"quelqu'un",
"quelque",
"quelques",
"quelques-unes",
"quelques-uns",
"quelqu’un",
"quels",
"qui",
"quiconque",
"quid",
"quoi",
"quoique",
"rien",
"sa",
"sans",
"sauf",
"se",
"selon",
"sera",
"serai",
"seraient",
"serais",
"serait",
"seras",
"serez",
"seriez",
"serions",
"serons",
"seront",
"ses",
"si",
"sien",
"sienne",
"siennes",
"siens",
"sinon",
"six",
"soi",
"soi-même",
"soient",
"sois",
"soit",
"sommes",
"son",
"sont",
"sous",
"soyez",
"soyons",
"suis",
"sur",
"t-il",
"ta",
"tandis",
"tant",
"tantôt",
"te",
"tel",
"telle",
"telles",
"tes",
"tien",
"toi",
"ton",
"tous",
"tout",
"toute",
"toutes",
"trois",
"tte",
"tu",
"un",
"une",
"unes",
"uns",
"unt",
"va",
"vais",
"van",
"vers",
"versus",
"via",
"voici",
"voilà",
"voir",
"voire",
"vont",
"vos",
"votre",
"vous",
"vous-même",
"vs",
"vu",
"y",
"à",
"á",
"ça",
"étaient",
"étais",
"était",
"étant",
"étiez",
"étions",
"été",
"êtes",
"être",
],
"hi": [
"अंदर",
"अकेला",
"अतिरिक्त",
"अथवा, या",
"अधिकांश",
"अन्यथा",
"अब, अभि, इसी वक्त",
"अभी तक",
"आधा",
"आप, तुम, तुजे",
"आपका, तुम्हारा, तेरा",
"इधर, यहाँ",
"इन्हें, इन",
"इस तरफ",
"इस से",
"इसका, इसकी",
"इसके द्वारा",
"इसके साथ",
"इसलिए",
"इसलिए, तो",
"उदाहरण के लिए",
"उन को, इन को, उन्हें, इन्हें",
"उनका, उनके, उनकी, इनका",
"उनके",
"उनमें से",
"उन्हें",
"उस तरफ, उसी और",
"उसकी, उसके",
"उसके जैसा",
"उसको, उसके, इसको, इसके, इसकी",
"ऊपर",
"ऐसा",
"और",
"कब, जब",
"कभी - कभी",
"कभी कभी",
"कम",
"कम, थोड़ा",
"कहीं",
"का, की, के",
"काफ़ी",
"किंतु, पर, लेकिन, मगर",
"कितने",
"किस तरफ",
"किसके, जिसके, जिनके, किसका",
"किसको, किसे, जिसे, जिन्हे",
"किसी को",
"की ओर, की तरफ़",
"कुछ, थोड़े",
"के अंदर",
"के अलावा",
"के ऊपर",
"के लिये",
"के सामने",
"कैसे, कैसा",
"कोई",
"कोई न कोई",
"कोई नहीं",
"कोई, कोई व्यक्ति",
"कौन",
"कौन सा, जो",
"कौन, जो",
"क्या",
"क्यों",
"क्योंकि, चूंकि",
"जब तक",
"जब तक, तक तक",
"जहाँ, कहां, किधर",
"जिसका",
"जैसा",
"जैसे",
"जैसे की, जैसा, वैसा",
"जैसे, इस तरह",
"ज्यादा, अधिक",
"ढेर सारा",
"ढेर सारा, बहुत सारा",
"तक",
"तक, जब तक",
"तब, फिर",
"ताकि",
"तुम्हारा",
"तुम्हारा, तुम्हारे",
"तुम्हे, तुझे, तुमको",
"तेरा, तेरी",
"थोड़ा",
"दाहिने, दाहिना",
"दुसरा, एक और",
"दूर",
"दोनों",
"द्वारा",
"नहीं, मत ",
"नीचे",
"पास में, पास",
"पास, नजदीक, करीब",
"पीछे",
"पूरा",
"प्रति, से, तक",
"प्रत्येक",
"फिर, तो, तब, उस वक़्त",
"फिर, दुबारा",
"बजाय",
"बहुत, अनेक",
"बहुत, ज्यादा, काफी",
"बाएं, वाम",
"बाद में",
"बाद में, पीछे",
"बाहर",
"भी",
"मुझे",
"में, भीतर, अंदर",
"में, मैंने",
"मेरा, अपना",
"मेरा, मेरी",
"मेरी, मेरा, मेरे",
"यदि",
"यदि, अगर",
"यदि, या",
"यह, ये, इसे",
"लेकिन",
"वह",
"वह, जो",
"वहां",
"वही",
"वे, वह, वो, उन्होंने",
"वैसे, उसके जैसा",
"शायद",
"सब लोग",
"सब, सभी, सारे",
"सबसे ज्यादा, अधिकांश",
"साथ",
"से",
"हम",
"हमारा, हमारे, हमारी",
"हर जगह",
"हालाँकि",
],
"id": [
"Anda",
"ada",
"adakah",
"adalah",
"adanya",
"adapaun",
"adapun",
"agar",
"akan",
"akau",
"akhirnya",
"akibat",
"akibatnya",
"aku",
"alias",
"anda",
"aneka",
"antar",
"antara",
"antaranya",
"apa",
"apabila",
"apakah",
"apalagi",
"apapun",
"asal",
"atas",
"atau",
"ataukah",
"ataupun",
"bagai",
"bagaimana",
"bagaimanakah",
"bagaimanapun",
"bagi",
"bagi-nya",
"bahkan",
"bahwa",
"bahwasanya",
"baik",
"bakal",
"balik",
"banyak",
"banyaknya",
"baru",
"bawah",
"beberapa",
"begini",
"beginilah",
"begitu",
"belakang",
"beliau",
"belum",
"beragam",
"berapa",
"berapakah",
"berbagai",
"berberapa",
"berdasar",
"berdasarkan",
"berdiri",
"berdirinya",
"berikut",
"berkat",
"bersama",
"bersamanya",
"berupa",
"beserta",
"betapa",
"bila",
"bilamana",
"bisa",
"boleh",
"buah",
"buat",
"bukan",
"bukankah",
"bukanlah",
"bukannya",
"buruh",
"cara",
"dalam",
"dalamnya",
"dan",
"dapat",
"dari",
"darimana",
"daripada",
"dekat",
"demi",
"demikian",
"dengan",
"dengannya",
"depan",
"dg",
"di",
"dia",
"diantara",
"diantaranya",
"diatas",
"dibalik",
"dibandingkan",
"dibawah",
"dibawahnya",
"dibeberapa",
"dibelakang",
"diberbagai",
"didalam",
"didalamnya",
"diluar",
"dimana",
"diri",
"dirinya",
"disaat",
"disamping",
"disebelah",
"disekeliling",
"diseluruh",
"disini",
"ditepi",
"dng",
"dr",
"engkau",
"gambar",
"gimana",
"hadap",
"hai",
"hanya",
"harus",
"hei",
"ia",
"ialah",
"ini",
"inikah",
"inilah",
"inipun",
"isi",
"isinya",
"itu",
"itua",
"itulah",
"itupun",
"iye",
"jadi",
"jangan",
"jauh",
"jelang",
"jenis",
"jika",
"juga",
"kah",
"kalau",
"kalian",
"kalo",
"kami",
"kamilah",
"kamu",
"kan",
"kapan",
"kapankah",
"karena",
"karenanya",
"kau",
"ke",
"kebanyakan",
"kecuali",
"kedalam",
"kedepan",
"kedua",
"keduanya",
"keliling",
"keluar",
"kemudian",
"kena",
"kenapa",
"kendati",
"kepada",
"kepadaku",
"kepadamu",
"kepadanya",
"kepusatnya",
"kerana",
"keseluruhan",
"keseluruhannya",
"kesemuanya",
"ketika",
"ketimbang",
"khususnya",
"kira",
"kita",
"kok",
"koq",
"kpd",
"ku",
"la",
"lagi",
"lah",
"lain",
"lainnya",
"lalu",
"lama",
"lantaran",
"lantas",
"layak",
"layaknya",
"lengah",
"lewat",
"loh",
"luar",
"macam",
"maka",
"makanya",
"maksud",
"maksudnya",
"malahan",
"mampu",
"mana",
"manakah",
"manakala",
"manapun",
"masa",
"masing",
"masing-masing",
"maupun",
"mayoritas",
"melainkan",
"melalui",
"melawan",
"melewati",
"menajak",
"menbeli",
"mengajak",
"mengapa",
"mengenai",
"mengenainya",
"menjadi",
"menjelang",
"menuju",
"menurut",
"menurutmu",
"mereka",
"merekapun",
"merupakan",
"meski",
"meskipn",
"meskipun",
"misalkan",
"misalnya",
"msl",
"mulai",
"mungkin",
"namun",
"nya",
"oleh",
"olehnya",
"orang",
"pada",
"padahal",
"padanya",
"para",
"pasca",
"pd",
"per",
"perihal",
"perlu",
"pula",
"pun",
"saat",
"saatnya",
"sama",
"sambil",
"sampai",
"sampai-sampai",
"samping",
"sana",
"sang",
"satu",
"satu-satunya",
"satunya",
"saya",
"seakan",
"seandainya",
"seantero",
"sebab",
"sebagai",
"sebagaimana",
"sebagian",
"sebaliknya",
"sebangsa",
"sebanyak",
"sebelah",
"sebelum",
"sebelumnya",
"seberang",
"seberat",
"sebesar",
"sebuah",
"secara",
"sedang",
"sedangkan",
"sedangkkan",
"sedari",
"sedikit",
"sedikitnya",
"seekor",
"segala",
"segenap",
"seharusnya",
"sehingga",
"sehubungan",
"seiring",
"sejak",
"sejauh",
"sejenis",
"sejumlah",
"sekali",
"sekaligus",
"sekalipun",
"sekitar",
"sekitarnya",
"selain",
"selaku",
"selama",
"selesai",
"seluas",
"seluruh",
"semacam",
"semasa",
"semenjak",
"sementara",
"sempat",
"semua",
"semuanya",
"sendiri",
"senilai",
"seorang",
"sepanjang",
"sepasang",
"sepeninggal",
"seperti",
"sepertinya",
"sepeti",
"sepucuk",
"seputar",
"serangkaian",
"seraya",
"serta",
"sesampai",
"sesampainya",
"seseorang",
"sesuai",
"sesuatu",
"sesudah",
"setebal",
"setelah",
"setelahnya",
"setengah",
"setiap",
"setinggi",
"seusai",
"sewaktu",
"si",
"siapa",
"siapakah",
"siapapun",
"silakan",
"sini",
"sinilah",
"situ",
"soal",
"suatu",
"sudah",
"supaya",
"tak",
"tan",
"tangguh",
"tanpa",
"tapi",
"tatkala",
"telah",
"tempat",
"tengah",
"tengahnya",
"tentang",
"tepat",
"tepatnya",
"teratas",
"terhadap",
"terhadapnya",
"termasuk",
"ternyata",
"tersebut",
"tertentu",
"terutama",
"tesebut",
"tetap",
"tetapi",
"tiada",
"tiap",
"tidak",
"tidakkah",
"tidaklah",
"tidaknya",
"tsb",
"tt",
"ttg",
"tuh",
"tujuh",
"untuk",
"untukmu",
"untuknya",
"untung",
"usah",
"usai",
"via",
"waktu",
"walau",
"walaupun",
"ya",
"yaitu",
"yakni",
"yang",
"yg",
],
"pt": [
"a",
"a cabo de",
"a caminho de",
"a despeito de",
"a favor de",
"a fim de",
"a menos que",
"a não ser",
"a não ser que",
"a partir de",
"a propósito",
"a respeito de",
"a título de",
"abaixo de",
"acima",
"acima de",
"afinal",
"afora",
"agora",
"agora que",
"ai",
"ainda",
"ainda mais",
"algo",
"algum",
"alguma",
"algumas",
"alguns",
"alguém",
"além",
"além de",
"ambas",
"ambos",
"andar",
"andou",
"ante",
"antes",
"anti",
"antre",
"ao",
"ao cabo de",
"ao invés de",
"ao lado",
"ao longo de",
"ao passo que",
"ao redor de",
"aos cuidados de",
"apenas",
"apesar de",
"apesar de que",
"após",
"aquela",
"aquelas",
"aquele",
"aqueles",
"aquilo",
"as",
"assim",
"assim como",
"assim que",
"atras",
"através",
"através de",
"atráis",
"atrás",
"atrás de",
"até",
"até que",
"auto",
"avante",
"aí",
"bastante",
"bem",
"bem como",
"cada",
"cara a cara",
"caso",
"cerca",
"cima",
"com",
"comigo",
"como",
"como se",
"conforme",
"connosco",
"conosco",
"conquanto",
"consigo",
"consoante",
"contanto",
"contanto que",
"contigo",
"contra",
"contudo",
"convosco",
"cuja",
"cujas",
"cujo",
"cujos",
"d'",
"d.",
"da",
"dada",
"dado",
"dado que",
"dali",
"daquela",
"daquelas",
"daquele",
"daqui",
"daqui a",
"daí",
"de",
"de modo que",
"dela",
"delas",
"dele",
"deles",
"demais",
"dentre",
"dentro",
"dentro de",
"depois",
"depois de",
"desde",
"desde que",
"dessa",
"dessas",
"desse",
"desses",
"desta",
"destas",
"deste",
"destes",
"detrás de",
"deva",
"devam",
"deve",
"devem",
"devemos",
"devendo",
"dever",
"deveria",
"deveriam",
"deverá",
"deverão",
"deviam",
"devido",
"devido a",
"devo",
"diante de",
"disso",
"diversas",
"diversos",
"do que",
"donde",
"doutros",
"dum",
"duma",
"durante",
"e",
"e/ou",
"eba",
"eis",
"ela",
"elas",
"ele",
"eles",
"eles/elas",
"em",
"em cima de",
"em frente a",
"em meio a",
"em nome de",
"em prol de",
"em relação a",
"em torno de",
"em vez de",
"em virtude de",
"em vista de",
"em volta de",
"embaixo de",
"embora",
"enquanto",
"entre",
"entretanto",
"então",
"era",
"eram",
"ergo",
"essa",
"essas",
"esse",
"esses",
"esta",
"estado",
"estamos",
"estando",
"estar",
"estarem",
"estaria",
"estariam",
"estarmos",
"estará",
"estarão",
"estas",
"estava",
"estavam",
"este",
"esteja",
"estejam",
"estes",
"esteve",
"estivemos",
"estiver",
"estiveram",
"estiverem",
"estivesse",
"estivessem",
"estou",
"está",
"estávamos",
"estão",
"eu",
"excepto",
"exceto",
"fica",
"ficado",
"ficamos",
"ficando",
"ficar",
"ficaram",
"ficaria",
"ficou",
"fiquei",
"foi",
"fomos",
"for",
"fora",
"fora de",
"foram",
"forem",
"fosse",
"fossem",
"frente a",
"fui",
"fôr",
"gente",
"graças",
"graças a",
"havendo",
"haver",
"haverem",
"havia",
"haviam",
"houver",
"houvesse",
"há",
"i.e.",
"ia",
"iam",
"ido",
"igual a",
"inté",
"invés de",
"ir",
"ireii",
"irem",
"iremos",
"iria",
"iriam",
"irá",
"irão",
"isso",
"isto",
"junto a",
"junto com",
"já",
"já que",
"la",
"las",
"lhe",
"lhes",
"lo",
"logo",
"logo que",
"los",
"lá",
"mais",
"mais de",
"mais do que",
"mais que",
"mal",
"malgrado",
"mas",
"me",
"mediante",
"menos",
"mesma",
"mesmas",
"mesmo",
"mesmo que",
"mesmo se",
"mesmos",
"meu",
"meus",
"mim",
"minha",
"minhas",
"muita",
"muitas",
"muito",
"muito menos",
"muitos",
"muitíssimo",
"n'",
"na",
"na frente de",
"na sequência de",
"nada",
"naquela",
"naquele",
"naqueles",
"naquilo",
"nas",
"nele",
"neles",
"nem",
"nenhum",
"nenhuma",
"nenhumas",
"nenhuns",
"nessa",
"nessas",
"nesse",
"nesses",
"nesta",
"nestas",
"neste",
"nestes",
"ninguém",
"no",
"no que",
"nos",
"nosco",
"nossa",
"nossas",
"nosso",
"nossos",
"num",
"numa",
"nós",
"o",
"o(s)",
"onde",
"onde quer que",
"ora",
"os",
"ou",
"outra",
"outras",
"outrem",
"outro",
"outros",
"outrém",
"oxalá",
"p'ra",
"p/",
"pa",
"para",
"para com",
"para que",
"parece",
"parecer",
"pelo",
"per",
"perante",
"perantes",
"permanece",
"permanecer",
"perto de",
"pode",
"podem",
"podemos",
"podendo",
"poder",
"poderei",
"poderem",
"poderemos",
"poderia",
"poderiam",
"poderá",
"poderão",
"poderíamos",
"podia",
"podiam",
"podíamos",
"pois",
"por",
"por causa de",
"por causa que",
"por conta de",
"por entre",
"por isso",
"por isto",
"por meio de",
"por trás",
"por trás de",
"por volta de",
"porquanto",
"porque",
"portanto",
"porém",
"possa",
"possam",
"possamos",
"posso",
"pouca",
"poucas",
"pouco",
"poucos",
"pouquíssimos",
"pra",
"precisam",
"precisar",
"precisaram",
"precisarão",
"precisou",
"prestes a",
"pretender",
"pretendiam",
"pro",
"pré",
"pré-",
"pró",
"pude",
"pudemos",
"puderam",
"puderem",
"pudesse",
"pudessem",
"pós",
"pôde",
"pôr",
"público",
"q.b.",
"quais",
"quaisquer",
"qual",
"qualquer",
"quando",
"quanta",
"quantas",
"quanto",
"quanto a",
"quanto baste",
"quanto mais",
"quantos",
"que",
"quem",
"quer",
"quão",
"quê",
"rente a",
"rente de",
"rumo a",
"se",
"se bem que",
"se e somente se",
"se-",
"segundo",
"seja",
"sejam",
"sem",
"sem falar de",
"sempre que",
"sendo",
"sendo que",
"senão",
"ser",
"serei",
"serem",
"seremos",
"seria",
"seriam",
"sermos",
"será",
"serão",
"seu",
"seus",
"si",
"sido",
"sob",
"sobre",
"somos",
"sou",
"sse",
"sua",
"suas",
"sub",
"são",
"sê",
"só que",
"sôbre",
"ta",
"tais",
"tal",
"tampouco",
"tanta",
"tantas",
"tanto",
"tantos",
"te",
"tem",
"temos",
"tende",
"tendo",
"tenha",
"tenham",
"tenhamos",
"tenho",
"tentado",
"tentar",
"tentaram",
"ter",
"terei",
"terem",
"teremos",
"teria",
"teriam",
"termos",
"terá",
"terão",
"teríamos",
"teu",
"teus",
"teve",
"ti",
"tido",
"tinha",
"tinham",
"tive",
"tivemos",
"tiver",
"tiveram",
"tiverem",
"tivesse",
"tivessem",
"to",
"toda",
"todas",
"todavia",
"todo",
"todos",
"trás",
"tu",
"tua",
"tuas",
"tudo",
"tá",
"tão",
"tão logo",
"té",
"têm",
"tínhamos",
"ultra",
"um",
"uma",
"uma vez que",
"umas",
"uns",
"vai",
"vais",
"vamos",
"varias",
"varios",
"versus",
"via",
"visto",
"visto que",
"voce",
"você",
"vocês",
"vos",
"vossa",
"vossas",
"vosso",
"vossos",
"vou",
"vs",
"vá",
"várias",
"vários",
"vão",
"vérsus",
"vós",
"à",
"à beira de",
"à custa de",
"à expensa de",
"à luz de",
"à medida que",
"àquela",
"àqueles",
"às",
"às custas de",
"às expensas de",
"é",
"íamos",
"\u200b\u200bem",
],
"ur": [
"اسلئے",
"اسکے جیسا",
"ان کے بیچ ,ان لوگوں کے بیچ",
"اندر",
"انکا",
"اور ,و",
"اوپر",
"اگر ,گرچہ ,اگرچہ",
"باہر",
"بایاں ,بائیں",
"بجائے ,بدلے ,بدلے میں",
"بہت ,بہت سارے ,بہت کچھ",
"بہت زیادہ",
"تب تک",
"تم لوگ ,آپ ,آپ لوگ",
"تمہارا ,تیرا ,آپکا",
"تو, تم ,آپ",
"تھوڑا ,تھوڑی",
"جب تک",
"جسکا",
"جیسے",
"حالاںکہ",
"دایاں ,دائیں ,صحیح",
"دوسرا",
"زیادہ تر",
"ساتھ ,کے ساتھ",
"سب ,سبھی ,سب کچھ ,سارے ,سارا",
"سب لوگ",
"طرف ,اسکی طرف",
"لیکن",
"مثلأ ,مثال کے طور پے",
"میرا",
"میں",
"میں ,کے اندر ,اندر",
"نہی تو",
"نہیں ,ناں ,نا",
"نیچے",
"وہ ,وہ لوگ",
"وہ ,وہ والا, کہ",
"وہ ,یے",
"وہاں",
"پھر",
"پہ ,پر ,میں",
"کافی",
"کب",
"کبھی کبھی",
"کم",
"کوئی",
"کون",
"کونسا",
"کچھ",
"کہاں",
"کیا",
"کیسے",
"کیوںکہ ,چوںکہ ,کیوںکی",
"کےلئے",
"ہم ,ھم",
"یہ ,یہ والا",
"یہاں",
],
"vi": [
"ai",
"ai ai",
"ai nấy",
"anh",
"anh em",
"anh trai",
"anh ấy",
"ba",
"bao",
"bao giờ",
"bay",
"bà",
"bà con",
"bà ấy",
"bác",
"bây",
"bé",
"bên",
"bạn",
"bạn gái",
"bạn trai",
"bả",
"bản thân",
"bất chấp",
"bất cứ",
"bất kì",
"bất luận",
"bất nhược",
"bất quá",
"bấy",
"bấy nhiêu",
"bần tăng",
"bầy quân",
"bầy tui",
"bậu",
"bằng",
"bệ hạ",
"bị cáo",
"bố",
"bố nó",
"bồ",
"bộ",
"bởi",
"bởi vì",
"cc",
"cha",
"chao",
"chi",
"chiếu theo",
"cho",
"cho dù",
"cho đến",
"choa",
"chàng",
"chán",
"cháu",
"chí",
"chính",
"chú",
"chú mày",
"chúng",
"chúng mày",
"chúng mình",
"chúng nó",
"chúng ta",
"chúng tao",
"chúng tôi",
"chút",
"chăng",
"chưa",
"chưng",
"chả",
"chắc",
"chẳng cứ",
"chỉ",
"chị",
"chị gái",
"chị ấy",
"chớ",
"chứ",
"con",
"con này",
"cuối cùng",
"các",
"các hạ",
"cái",
"cái gì",
"cái này",
"cán bộ",
"còn",
"có",
"có vẻ",
"cóc",
"cô",
"cô nương",
"cô ta",
"cô ấy",
"côi",
"công tử",
"cùng",
"cơ",
"cơ mà",
"cưng",
"cạnh",
"cả",
"cả nhà",
"cầm bằng",
"cậu",
"cổ",
"cộng",
"cụ",
"của",
"cứ",
"do",
"do vậy",
"do đó",
"duy",
"dù",
"dù sao",
"dù vậy",
"dưng",
"dưới",
"dường như",
"dạ",
"dầu",
"dẫu",
"dẫu vậy",
"dậy",
"dọc",
"dợ",
"em",
"ghe",
"già",
"giá như",
"giả dụ",
"giả sử",
"giữa",
"gì",
"ha",
"hay",
"hay là",
"hen",
"hoàng thượng",
"hoặc",
"huynh",
"huống",
"huống chi",
"huống gì",
"huống hồ",
"há",
"hôn",
"hơn",
"hơn nữa",
"hả",
"hầu hết",
"hắn",
"hết",
"hết cả",
"hề",
"hễ",
"họ",
"hổi",
"hỡi",
"hử",
"khanh",
"khi",
"khi nào",
"không",
"không ai",
"không những",
"khứa",
"kia",
"kém",
"kìa",
"kẻo",
"kể từ",
"l",
"là",
"lão",
"lên",
"lại nữa",
"lần",
"lẫn",
"lắm",
"mi",
"min",
"miễn",
"moa",
"muôn",
"muội",
"mà",
"mà còn",
"mày",
"mãi",
"mình",
"mô",
"mũ",
"mất",
"mấy",
"mầy",
"mẫu hậu",
"mặc dù",
"mặc dầu",
"mặt khác",
"mẹ",
"mẹ nó",
"mọi",
"mọi người",
"mọi vật",
"mỏa",
"mỗi",
"một chút",
"một nửa",
"một số",
"một vài",
"một ít",
"mụ",
"ngay",
"nghe",
"nghen",
"nghỉ",
"ngoài",
"ngoài ra",
"ngoại",
"ngoải",
"ngài",
"ngươi",
"người",
"người người",
"người ta",
"ngược lại",
"ngộ",
"nha",
"nhiều",
"nhà quân",
"nhá",
"nhân",
"nhân dịp",
"nhé",
"như",
"như vậy",
"nhưng",
"nhưng mà",
"nhược bằng",
"nhất là",
"nhằm",
"nhỉ",
"nhỏ",
"nhờ",
"nhỡ",
"những",
"ni",
"nà",
"nàng",
"nào",
"này",
"nè",
"nên",
"nó",
"nô tài",
"nô tì",
"nơi",
"nơi nơi",
"nấy",
"nầy",
"nẩu",
"nếu",
"nếu như",
"nọ",
"nội",
"nớ",
"nừng",
"nửa",
"nữa",
"phi",
"phía",
"phô bay",
"phải",
"phải hôn",
"phải không",
"phần",
"phần lớn",
"phỏng",
"phứt",
"qua",
"quanh",
"quý khách",
"quý vị",
"quả",
"quả nhân",
"ra",
"riêng",
"rùi",
"rằng",
"rồi",
"sang",
"sao",
"sau",
"sau cùng",
"song",
"song le",
"sắp",
"sẽ",
"sở dĩ",
"ta",
"tao",
"tau",
"thanh niên",
"thay",
"thay vì",
"theo",
"theo đó",
"thiếp",
"thiệt",
"thành",
"thâu",
"thêm",
"thì",
"thí dụ",
"thôi",
"thần",
"thầy",
"thẩy",
"thật",
"thằng này",
"thế",
"thế là",
"thế mà",
"thế nhưng",
"thị",
"thời",
"tiểu nhân",
"toa",
"toà",
"toàn",
"toàn bộ",
"toàn thể",
"trong",
"trong khi",
"trong đó",
"trái",
"trái lại",
"trên",
"trò",
"trước",
"trẫm",
"trời",
"trừ phi",
"tuy",
"tuy nhiên",
"tuy rằng",
"tuy vậy",
"tê",
"tóm lại",
"tôi",
"tương đương",
"tại",
"tại hạ",
"tại vì",
"tất cả",
"tầm",
"tận",
"tỉ",
"tổ",
"tớ",
"tới",
"tụi",
"tụi nó",
"tức",
"tức là",
"từ",
"tự",
"tựa",
"ui",
"và",
"vài",
"vài ba",
"vào",
"vì",
"vì thế",
"vì vậy",
"ví dụ",
"ví như",
"vô",
"vô số",
"vô vàn",
"vả chăng",
"vả lại",
"vậy",
"vậy là",
"vậy mà",
"về",
"về hướng",
"về phía",
"vị",
"với",
"xuống",
"à",
"á",
"ái khanh",
"âu là",
"í",
"ít",
"ông",
"ông ấy",
"út",
"ý",
"đa số",
"đang",
"đi",
"đâu",
"đây",
"đã",
"đê",
"đích thân",
"đó",
"đôi",
"đương",
"được",
"đại nhân",
"đấy",
"đầu tiên",
"đằng này",
"đằng ấy",
"đẳng",
"đặng",
"đến",
"để",
"đệ",
"đối với",
"đồ",
"ơi",
"ư",
"ạ",
"ả",
"ảnh",
"ấy",
"ẻm",
"ổng",
"ờ",
"ở",
"ừ",
"ừa",
"ừm",
],
"zh": [
"一",
"一争",
"一些",
"一切",
"一旦",
"一点",
"一爭",
"上",
"上前",
"上表",
"下",
"不",
"不仅",
"不会",
"不但",
"不僅",
"不光",
"不关",
"不准",
"不单",
"不可",
"不單",
"不够",
"不夠",
"不应",
"不得",
"不想",
"不愿",
"不應",
"不是",
"不會",
"不準",
"不用",
"不管",
"不經",
"不肯",
"不能",
"不要",
"不該",
"不論",
"不论",
"不该",
"不過",
"不需",
"不願",
"与",
"与其",
"且",
"且是",
"並",
"並且",
"並非",
"个",
"个人",
"中",
"临",
"为",
"为了",
"为人",
"为什么",
"主",
"乃至",
"之",
"之上",
"之下",
"之中",
"之內",
"之内",
"之初",
"之前",
"之后",
"之外",
"之後",
"之所以",
"之时",
"之時",
"之間",
"之间",
"也",
"也是",
"书",
"了",
"争辩",
"事",
"于",
"井",
"亚",
"亞",
"亦为",
"亦是",
"亦為",
"亭",
"亲",
"人",
"人人",
"人家",
"什么",
"什麼",
"今",
"仍是",
"仍算",
"从",
"他",
"他们",
"他俩",
"他倆",
"他們",
"代",
"令",
"以",
"以上",
"以下",
"以为",
"以來",
"以前",
"以北",
"以及",
"以后",
"以外",
"以往",
"以後",
"以来",
"以為",
"以爲",
"以至",
"们",
"价",
"任",
"任何",
"众",
"会",
"传",
"伪",
"似乎",
"似的",
"但",
"但是",
"位",
"低",
"住",
"体",
"何",
"何方",
"佛",
"作",
"作为",
"作為",
"你",
"你们",
"你們",
"你自己",
"你门",
"佬",
"併",
"使",
"來",
"供",
"依",
"依据",
"依據",
"依照",
"依靠",
"侠",
"侧",
"侨",
"侯",
"便是",
"係",
"保存",
"保級",
"保级",
"俠",
"信",
"修复",
"修復",
"個",
"個人",
"們",
"倘若",
"借助",
"借由",
"借着",
"值",
"假使",
"假如",
"偏",
"做",
"側",
"偽",
"傳",
"傻",
"像",
"像是",
"僑",
"價",
"儘管",
"元",
"先",
"光",
"光棍",
"党",
"內",
"內外",
"全",
"全体",
"全副",
"全套",
"全部",
"全體",
"公",
"关",
"关于",
"关心",
"兵",
"其",
"其中",
"其他",
"其余",
"其它",
"其餘",
"典",
"兼",
"内",
"内外",
"军",
"冠",
"冢",
"冲",
"冷",
"准",
"准备",
"减慢",
"几",
"凭",
"凭借",
"出手",
"刀",
"分",
"分布",
"列",
"则为",
"则是",
"初",
"別",
"別人",
"别",
"别人",
"别的",
"到",
"到处",
"制",
"券",
"剂",
"則是",
"則為",
"前",
"前任",
"前后",
"前後",
"剑",
"剧",
"副",
"劇",
"劍",
"劑",
"力",
"办",
"办学",
"功",
"加",
"劣",
"努力",
"包",
"包裹",
"化",
"区",
"医",
"區",
"半",
"单",
"卡",
"卫",
"即",
"即使",
"即便",
"却是",
"卻",
"卻是",
"卿",
"厂",
"厅",
"历届",
"压",
"原",
"去",
"县",
"又",
"又或",
"又是",
"及",
"友",
"发展",
"发育",
"变",
"变得",
"口",
"古",
"另",
"另外",
"只是",
"只有",
"只能",
"只要",
"可",
"可以",
"可是",
"可能",
"台",
"史",
"叶",
"号",
"司",
"吃",
"各",
"各个",
"各位",
"各個",
"各天",
"各州",
"各式",
"各樣",
"各种",
"各种各样",
"各種",
"各種各樣",
"各类",
"各級",
"各级",
"各自",
"各項",
"各類",
"各项",
"同",
"同年",
"名",
"后",
"向",
"吗",
"君",
"否",
"吧",
"呀",
"员",
"呢",
"周",
"味",
"和",
"和美",
"咱们",
"品",
"哈尔滨",
"哈爾濱",
"員",
"哪",
"哪个",
"哪些",
"哪個",
"哪儿",
"哪兒",
"哪怕",
"哪裏",
"哪裡",
"哪里",
"唯有",
"商",
"啊",
"啦",
"喇",
"喜",
"喜欢",
"喜歡",
"單",
"單憑",
"嗎",
"嗬",
"嘛",
"嘴",
"器",
"回",
"因",
"因为",
"因应",
"因應",
"因此",
"因為",
"团",
"园",
"围",
"国",
"图",
"圆",
"圈",
"國",
"圍",
"園",
"圓",
"圖",
"團",
"土",
"圣",
"在",
"在內",
"在内",
"地",
"场",
"坊",
"坟",
"坡",
"型",
"埋",
"城",
"埤",
"執政",
"基",
"基于",
"基於",
"堂",
"堡",
"堤",
"報",
"場",
"塔",
"塘",
"墓",
"墙",
"增長",
"增长",
"墟",
"墳",
"壓",
"士",
"处",
"外",
"多",
"多少",
"多次",
"夜",
"够",
"夠",
"夢",
"大",
"大家",
"天",
"头",
"夹",
"夾",
"奏",
"奖",
"套",
"女",
"女士们",
"女士门",
"奸",
"她",
"她们",
"她俩",
"她倆",
"她們",
"好",
"好了",
"好像",
"如",
"如何",
"如同",
"如果",
"妃",
"妇",
"妳",
"妹",
"始",
"娘",
"婆",
"婦",
"子",
"孔",
"字",
"季",
"学",
"學",
"宁愿",
"它",
"它们",
"它們",
"安全",
"宏",
"宗",
"官",
"实属",
"审",
"客",
"室",
"宫",
"宮",
"家",
"宽",
"富",
"實屬",
"審",
"寬",
"对",
"对于",
"对方",
"对此",
"寺",
"将",
"將",
"對",
"對方",
"對於",
"對此",
"小",
"尖",
"就",
"就是",
"就算",
"尸",
"尽管",
"局",
"层",
"屋",
"屍",
"展",
"属",
"層",
"屬",
"屯",
"山",
"屿",
"岗",
"岛",
"岩",
"岭",
"岸",
"峡",
"峰",
"島",
"峽",
"崖",
"崗",
"嶺",
"嶼",
"川",
"州",
"工",
"左右",
"差",
"巷",
"币",
"市",
"布",
"师",
"希望",
"帝",
"带",
"師",
"席",
"帮",
"帶",
"帽",
"幣",
"幫",
"年",
"并",
"并且",
"并非",
"幾",
"庄",
"床",
"庐",
"库",
"应",
"应当",
"应该",
"底",
"店",
"庙",
"府",
"度",
"座",
"庫",
"庭",
"廟",
"廠",
"廬",
"廳",
"廷",
"建基於",
"开口",
"开始",
"式",
"弯",
"張",
"強",
"弹",
"强",
"彈",
"彎",
"当",
"当中",
"当届",
"录",
"形",
"形容",
"形成",
"影响",
"影響",
"彼此",
"往",
"径",
"待",
"很多",
"後",
"徑",
"徒",
"得",
"得宠",
"得寵",
"從",
"御",
"微",
"徽",
"心",
"必",
"必須",
"必须",
"志",
"快",
"态",
"怎么样",
"怎樣",
"怎麼",
"怕",
"性",
"怪",
"总",
"恆",
"恋",
"恒",
"您",
"想",
"愛",
"感",
"感到",
"感覺",
"感觉",
"愿意",
"態",
"憑",
"憑藉",
"懂",
"懂得",
"應",
"應當",
"應該",
"懒得",
"戀",
"戏",
"我",
"我们",
"我們",
"我自己",
"我门",
"或",
"或是",
"或者",
"战",
"截止",
"截至",
"戰",
"戲",
"戶",
"户",
"房",
"所",
"所以",
"所有",
"手",
"才是",
"打",
"执政",
"把",
"报",
"拖",
"持續",
"按",
"按照",
"挡",
"损失",
"据",
"排行",
"接唱",
"接触",
"接觸",
"控制",
"推进",
"推進",
"描述",
"損失",
"擋",
"據",
"支",
"教",
"敢",
"数",
"整",
"整个",
"整個",
"整场",
"整块",
"整場",
"整塊",
"整套",
"整所",
"整架",
"整片",
"整顆",
"整颗",
"數",
"文",
"斋",
"斗",
"新",
"方",
"於",
"族",
"旗",
"无论",
"既",
"既是",
"既然",
"日",
"日趋",
"日趨",
"旧",
"时",
"星",
"是",
"是否",
"是否是",
"是次",
"显",
"显得",
"時",
"晚",
"暖",
"暗",
"暨",
"曲",
"更为",
"更是",
"更為",
"更趋",
"更趨",
"書",
"替",
"會",
"會不會",
"月",
"有",
"有些",
"有关",
"有的",
"有關",
"服",
"朝",
"期",
"期間",
"期间",
"未能",
"末",
"本",
"本人",
"本地",
"本屆",
"本届",
"本班",
"本身",
"术",
"机",
"权",
"杆",
"材",
"村",
"束",
"来",
"杯",
"板",
"林",
"枪",
"架",
"某",
"某个",
"某些",
"某個",
"某种",
"某種",
"染色",
"柜",
"树",
"校",
"株",
"核",
"根据",
"根據",
"格",
"案",
"档",
"桥",
"桨",
"桿",
"梁",
"梁耀忠",
"梦",
"棍",
"棒",
"棚",
"椭",
"業",
"楼",
"榜",
"槍",
"槳",
"樂",
"樂意",
"樓",
"樹",
"橋",
"橙",
"機",
"橢",
"檔",
"櫃",
"權",
"次",
"欲",
"款",
"歌",
"正",
"正如",
"正是",
"此",
"此套",
"此次",
"此种",
"此種",
"此等",
"此类",
"此項",
"此類",
"此项",
"歷",
"歷屆",
"死",
"段",
"殿",
"母",
"毎年",
"每",
"每个",
"每位",
"每個",
"每元",
"每升",
"每卡",
"每周",
"每天",
"每幅",
"每年",
"每座",
"每当",
"每戶",
"每户",
"每所",
"每日",
"每枚",
"每次",
"每段",
"每片",
"每秒",
"每組",
"每组",
"每边",
"每週",
"每邊",
"每間",
"每间",
"每队",
"每隊",
"每集",
"每首",
"毒",
"比",
"比如說",
"比起",
"氏",
"气",
"氣",
"水",
"永保",
"江",
"池",
"沒",
"沒有",
"沒能",
"沟",
"没",
"没有",
"没能",
"河",
"治军",
"治軍",
"沼",
"沿",
"沿着",
"沿著",
"況且",
"泉",
"法",
"波",
"洋",
"洞",
"洲",
"派",
"流沙",
"浅",
"浊",
"浓",
"浦",
"海",
"涉世",
"涌",
"液",
"淡",
"深",
"深感",
"混",
"淺",
"清",
"減慢",
"渡",
"港",
"湖",
"湾",
"準",
"準備",
"溝",
"溥仪",
"溥儀",
"溪",
"满",
"满洲",
"滩",
"滿",
"滿洲",
"潮",
"澡",
"澳",
"濁",
"濃",
"灘",
"灣",
"火",
"炉",
"炎",
"炮",
"点",
"為",
"為了",
"為人",
"烃",
"烟",
"热",
"烴",
"無",
"無論",
"煙",
"熟",
"熱",
"營",
"爐",
"爭取",
"爭辯",
"爱",
"爲",
"父",
"爷",
"爺",
"牆",
"片",
"版",
"牌",
"牠",
"牠們",
"物",
"犯",
"状",
"狀",
"狂",
"狗",
"狮",
"猫",
"獅",
"獎",
"獲利",
"率",
"王",
"班",
"球",
"琴",
"甚么",
"甚至",
"甚至是",
"甚麼",
"甚麽",
"生",
"用",
"由",
"由于",
"由於",
"电",
"男",
"町",
"画",
"界",
"畔",
"畫",
"當",
"當中",
"當屆",
"病",
"症",
"癌",
"癖",
"發展",
"發育",
"的",
"的話",
"的话",
"皮",
"盃",
"监管",
"盖因",
"監管",
"目",
"直到",
"直至",
"相对",
"相對",
"相比",
"省",
"看",
"看似",
"看得",
"眼",
"眾",
"眾多",
"着",
"督",
"瞭",
"短",
"石",
"矿",
"码",
"砲",
"硅",
"碑",
"碱",
"碼",
"礁",
"礦",
"礼",
"社",
"祂",
"神",
"祠",
"禮",
"离",
"离开",
"秀",
"私交",
"秋",
"种",
"科",
"秤",
"稅",
"税",
"種",
"突感",
"窑",
"窟",
"窯",
"站",
"端",
"競選",
"符",
"笨",
"等",
"管",
"管理",
"箱",
"節",
"篇",
"籍",
"米",
"类",
"粉",
"精",
"糖",
"系",
"紀",
"紅",
"紋",
"純",
"紙",
"級",
"素",
"組",
"結",
"給",
"綉",
"經",
"經由",
"經過",
"綜",
"綫",
"綱",
"網",
"線",
"緣",
"縣",
"縱使",
"總",
"繞",
"繼",
"红",
"级",
"纪",
"纯",
"纲",
"纵使",
"纸",
"纹",
"线",
"组",
"经",
"经由",
"经过",
"结",
"绕",
"给",
"绣",
"继",
"综",
"网",
"罩",
"罪",
"署",
"羊",
"美",
"群",
"翁",
"老",
"者",
"而",
"而且",
"而已",
"而是",
"而非",
"聖",
"肉",
"肯",
"肺",
"胎",
"胚",
"胶",
"能",
"能否",
"能够",
"能夠",
"脚",
"脸",
"腔",
"腳",
"腿",
"膜",
"膠",
"臉",
"臨",
"自",
"自从",
"自家",
"自己",
"自從",
"自我",
"自身",
"至",
"至于",
"至於",
"臺",
"與",
"與其",
"舊",
"舞",
"舟",
"舰",
"舱",
"船",
"艇",
"艙",
"艦",
"色",
"节",
"花",
"若",
"若是",
"茶",
"药",
"莊",
"获利",
"菌",
"菜",
"营",
"葉",
"著",
"蓋因",
"蓝",
"藉",
"藉助",
"藉由",
"藉著",
"藍",
"藤",
"藥",
"藩",
"處",
"號",
"虽",
"虽则",
"虽然",
"蛙",
"行",
"術",
"街",
"衛",
"衣",
"表",
"表现",
"表現",
"表示",
"被",
"装",
"裏",
"裔",
"裙",
"裝",
"裡",
"裡面",
"裤",
"製",
"褲",
"要",
"要不要",
"要么",
"要是",
"要求",
"親",
"覺得",
"觀",
"观",
"觉得",
"角",
"計劃",
"記",
"詞",
"試圖",
"詩",
"話",
"該",
"該屆",
"該批",
"該族",
"該條",
"該段",
"該組",
"該集",
"該項",
"誌",
"認為",
"認識",
"語",
"誤信",
"說",
"誰",
"課",
"請",
"論",
"諸",
"諸如",
"謂",
"證",
"譜",
"變",
"變得",
"认为",
"认识",
"记",
"许多",
"许许多多",
"论",
"证",
"词",
"诗",
"话",
"该",
"该届",
"该批",
"该族",
"该条",
"该段",
"该组",
"该集",
"语",
"误信",
"说",
"请",
"诸",
"诸如",
"课",
"谁",
"谓",
"谱",
"谷",
"豆",
"象",
"貓",
"負債",
"費",
"資",
"賣",
"質",
"賽",
"负债",
"质",
"费",
"资",
"赛",
"起",
"起伏",
"起来",
"趁",
"超",
"趋",
"趋于",
"趨",
"趨於",
"距",
"距离",
"距離",
"跟",
"路",
"躁",
"身",
"車",
"軍",
"軒",
"軟",
"軸",
"較",
"輕",
"车",
"轩",
"软",
"轴",
"轻",
"较",
"辦",
"辦學",
"边",
"达到",
"过",
"过后",
"运作",
"近",
"还",
"还是",
"还有",
"这",
"这些",
"这儿",
"这养",
"这样",
"这次",
"这种",
"这里",
"远",
"连",
"连任",
"连同",
"迷",
"追溯",
"透过",
"透過",
"這",
"這些",
"這個",
"這兒",
"這樣",
"這樣子",
"這次",
"這種",
"這裏",
"這裡",
"這邊",
"這麼",
"通",
"通过",
"通過",
"逢",
"連",
"連任",
"連同",
"週",
"運作",
"過",
"過後",
"道",
"達到",
"遠",
"選舉",
"還是",
"邊",
"那",
"那个",
"那些",
"那儿",
"那兒",
"那样",
"那樣",
"那裏",
"那裡",
"那邊",
"那里",
"邦",
"邨",
"郎",
"郡",
"部",
"都",
"都是",
"鄉",
"配",
"酒",
"酸",
"醣",
"醫",
"里",
"里面",
"重",
"量",
"金",
"針",
"針對",
"銘",
"鋼",
"錄",
"錦",
"鍋",
"鍵",
"鎊",
"鎮",
"鏈",
"鏡",
"鐵",
"鑒於",
"针",
"针对",
"钢",
"铁",
"铭",
"链",
"锅",
"锦",
"键",
"镇",
"镜",
"長",
"长",
"門",
"開口",
"開始",
"間",
"閣",
"閣下",
"關",
"關心",
"關於",
"门",
"间",
"阁",
"队",
"阶",
"际",
"陆",
"降解",
"院",
"除",
"除了",
"除外",
"除非",
"陵",
"陸",
"隊",
"階",
"随",
"随同",
"隔",
"際",
"隨",
"隨同",
"难过",
"集",
"雖",
"雖則",
"雖然",
"離",
"離開",
"難過",
"電",
"需",
"需要",
"非",
"靠",
"面",
"音",
"頂",
"須",
"頭",
"頭個",
"題",
"額",
"願意",
"類",
"顯",
"顯得",
"顶",
"须",
"题",
"额",
"風",
"风",
"飯",
"餅",
"餐",
"館",
"饃",
"首先",
"點",
],
}
# %% ../nbs/03_helpers.ipynb 10
class FastTextLanguageDetector:
def __init__(self, model_path: str = "/tmp/lid.176.bin"):
import fasttext
self.model_path = model_path
self.model = fasttext.load_model(model_path)
def get_language(self, text):
lines = " ".join(text.splitlines())
prediction = self.model.predict(lines, k=1) # returns top 2 matching languages
lang, prob = prediction[0][0].replace("__label__", ""), prediction[1][0]
return lang, prob
@classmethod
def from_pretrained(
cls,
*,
url: str = "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin",
output_dir: str = squeakily.__path__[0],
):
path = os.path.join(output_dir, "lid.176.bin")
if not os.path.exists(path):
# download pretrained model with standard lib (From: https://stackoverflow.com/questions/22676/how-to-download-a-file-over-http)
response = urllib.request.urlretrieve(
url,
)
if response:
return cls(model_path=os.path.join(output_dir, "lid.176.bin"))
else:
raise Exception("Failed to download model")
else:
return cls(model_path=path)
def __reduce__(self):
return (self.__class__, (self.model_path,))
def __eq__(self, other):
return self.model_path == other.model_path
# %% ../nbs/03_helpers.ipynb 15
class SentencePiece:
def __init__(
self,
model: str,
):
import sentencepiece
super().__init__()
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(model))
def do(self, text: dict) -> dict:
tokenized = self.sp.encode_as_pieces(text)
return " ".join(tokenized)
# %% ../nbs/03_helpers.ipynb 16
KENLM_MODEL_REPO = "edugp/kenlm"
class KenlmModel:
digit_re: re.Pattern = re.compile(r"\d")
unicode_punct: Dict[str, str] = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
unicode_punct_re = re.compile(f"[{''.join(unicode_punct.keys())}]")
non_printing_chars_re = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
kenlm_model_dir = None
sentence_piece_model_dir = None
def __init__(
self,
model_dataset: str,
language: str,
lower_case: bool = False,
remove_accents: bool = False,
normalize_numbers: bool = True,
punctuation: int = 1,
):
import kenlm
self.download_kenlm_model(model_dataset, language)
try:
self.model = kenlm.Model(self.kenlm_model_dir)
self.tokenizer = SentencePiece(self.sentence_piece_model_dir)
except OSError:
os.remove(self.kenlm_model_dir)
if os.path.exists(self.sentence_piece_model_dir):
os.remove(self.sentence_piece_model_dir)
raise OSError(
"File was corrupt and should have been removed. Please, retry."
)
self.accent = remove_accents
self.case = lower_case
self.numbers = normalize_numbers
self.punct = punctuation
@classmethod
def from_pretrained(
cls,
*,
model_dataset: str,
language: str,
lower_case: bool,
remove_accents: bool,
normalize_numbers: bool,
punctuation: int,
):
return cls(
model_dataset,
language,
lower_case,
remove_accents,
normalize_numbers,
punctuation,
)
def pp(self, log_score, length):
return 10.0 ** (-log_score / length)
def get_perplexity(self, doc: str, normalize_cc_net: bool = True):
if normalize_cc_net:
doc = self.normalize(
doc,
accent=self.accent,
case=self.case,
numbers=self.numbers,
punct=self.punct,
)
# Tokenize (after normalizing): See https://github.com/facebookresearch/cc_net/blob/bda555bd1cf1ee2e0b925363e62a61cd46c8b60d/cc_net/mine.py#L352 for full pipeline
doc = self.tokenizer.do(doc)
doc_log_score, doc_length = 0, 0
for line in doc.split("\n"):
log_score = self.model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
return round(self.pp(doc_log_score, doc_length), 1)
def normalize(
self,
line: str,
accent: bool = True,
case: bool = True,
numbers: bool = True,
punct: int = 1,
) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = self.strip_accents(line)
if numbers:
line = self.digit_re.sub("0", line)
if punct == 1:
line = self.replace_unicode_punct(line)
elif punct == 2:
line = self.remove_unicode_punct(line)
line = self.remove_non_printing_char(line)
return line
def strip_accents(self, line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
def replace_unicode_punct(self, text: str) -> str:
return "".join(self.unicode_punct.get(c, c) for c in text)
def remove_unicode_punct(self, text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return self.unicode_punct_re.sub("", text)
def remove_non_printing_char(self, text: str) -> str:
return self.non_printing_chars_re.sub("", text)
def download_kenlm_model(self, model_dataset: str, language: str):
try:
kenlm_model_url = hf_hub_url(
KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.arpa.trie.bin"
)
self.kenlm_model_dir = cached_download(kenlm_model_url)
except HTTPError:
kenlm_model_url = hf_hub_url(
KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.arpa.bin"
)
self.kenlm_model_dir = cached_download(kenlm_model_url)
sentence_piece_model_url = hf_hub_url(
KENLM_MODEL_REPO, filename=f"{model_dataset}/{language}.sp.model"
)
self.sentence_piece_model_dir = cached_download(sentence_piece_model_url)
# %% ../nbs/03_helpers.ipynb 20
class LLMLabelerParser(BaseModel):
labels: List = Field(
..., title="Labels", description="Labels that the LLM classifies the text as"
)
class LLMLabeler:
def __init__(
self,
instruction: str,
labels: List,
model_name: str = "gpt-3.5-turbo",
api_key: str = None,
model_type: str = "openai",
):
from langchain.output_parsers import PydanticOutputParser
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
self.instruction = instruction
self.labels = labels
# Set up a parser + inject instructions into the prompt template.
self.parser = PydanticOutputParser(pydantic_object=LLMLabelerParser)
prompt = PromptTemplate(
template="{instruction}\n{labels}\n{format_instructions}\n",
input_variables=["instruction", "labels"],
partial_variables={
"format_instructions": self.parser.get_format_instructions()
},
)
system_message_prompt = SystemMessagePromptTemplate(prompt=prompt)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
self.chat_template = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
if model_type == "azure":
raise NotImplementedError("Azure models are not supported yet")
elif model_type == "openai":
self.model = ChatOpenAI(
openai_api_key=api_key, model_name=model_name, temperature=0
)
else:
raise ValueError(f"Model type {model_type} is not supported")
def __call__(self, text: str):
messages = self.chat_template.format_prompt(
instruction=self.instruction, labels=self.labels, text=text
).to_messages()
output = self.model(messages)
predicted_labels = self.parser.parse(output.content)
# check if all the predicted tags are in the list of tags
assert all(
[label in self.labels for label in predicted_labels.labels]
), f"Predicted labels {predicted_labels.labels} are not in the list of tags {self.labels}"
return predicted_labels.labels
| [
"{instruction}\n{labels}\n{format_instructions}\n",
"instruction",
"format_instructions",
"[PLACEHOLDER, PLACEHOLDER]",
"{text}"
] |
2024-01-10 | sourcecodecheck/azure-openai-samples | fundamentals~langchain~AzureOpenAIUtil~SqlServer.py | import urllib, os
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import AzureOpenAI
from langchain.agents import AgentExecutor
class SqlServer:
def __init__(self, Server, Database, Username, Password, port=1433, odbc_ver=18, topK=10) -> None:
odbc_conn = 'Driver={ODBC Driver '+ str(odbc_ver) + ' for SQL Server};Server=tcp:' + \
Server + f',{port};Database={Database};Uid={Username};Pwd={Password};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'
params = urllib.parse.quote_plus(odbc_conn)
self.conn_str = 'mssql+pyodbc:///?odbc_connect={}'.format(params)
db = SQLDatabase.from_uri(self.conn_str)
self.toolkit = SQLDatabaseToolkit(db=db)
self.SQL_PREFIX = """You are an agent designed to interact with a Microsoft Azure SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results using SELECT TOP in SQL Server syntax.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
"""
deploy_name = os.getenv('DEPLOYMENT_NAME')
# print(deploy_name)
self.agent_executor = create_sql_agent(
llm=AzureOpenAI(temperature=0, deployment_name='text-davinci-003'),
toolkit=self.toolkit,
verbose=True,
prefix=self.SQL_PREFIX,
topK = topK
)
def run(self, text: str):
return self.agent_executor.run(text)
| [] |
2024-01-10 | Kabilan108/TermGPT | termgpt~termgpt.py | """
TermGPT
-------
Your in-terminal LLM assistant.
"""
from io import StringIO
import os
from rich.console import Console
import openai as ai
import pydoc
import click
ai.api_key = os.environ["OPENAI_API_KEY"]
console = Console(file=StringIO(), force_terminal=True)
class TermGPT:
"""
TermGPT
-------
Your in-terminal LLM assistant.
Methods
-------
cli()
The main CLI command.
ask(question: str)
Ask TermGPT a question.
"""
system_prompt = [
{
"role": "system",
"content": """
You are an assistant for a machine learning engineer.
You are tasked with helping the engineer with their work.
Your goal is to provide concise and accurate feedback to help the user solve their problem.
Your response should include suggestions for the best way to solve the problem.
These suggestions can be in the form of a list or a single answer.
You should try your best to give correct solutions.
If your response includes code snippets, make sure to format them correctly:
one-liners should be wrapped in ` tags and longer snippets should be wrapped between ``` tags.
Make your responses friendly and helpful.
Code snippets should also be wrapped in [cyan]`[/cyan] or [cyan]```[/cyan] tags.
Emphasize important words or phrases with [bold]bold[/bold] or [italic]italic[/italic] tags.
You can also use [magenta]...[/magenta] for additional emphasis.
Use [green]...[/green] to for titles or headings.
You are allowed to use emojis using the format :emoji_name:
""",
}
]
@staticmethod
def _generate_content(response):
"""
Generate content from the response.
"""
lines = [x for x in response.splitlines() if x]
for line in lines:
console.print(line)
@click.group()
@staticmethod
def cli() -> None:
"""
Welcome to TermGPT! I am your in-terminal LLM assistant.
To get started, type `termgpt ask` and then your question.
"""
return
@cli.command()
@click.argument("question", type=str, required=True)
@staticmethod
def ask(question) -> None:
"""
Ask TermGPT a question.
"""
response = ai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=TermGPT.system_prompt + [{"role": "user", "content": question}],
)
response = response["choices"][0]["message"]["content"] # type: ignore
TermGPT._generate_content(response)
pydoc.pager(console.file.getvalue()) # type: ignore
console.file.close() # type: ignore
if __name__ == "__main__":
TermGPT.cli()
| [
"\nYou are an assistant for a machine learning engineer.\nYou are tasked with helping the engineer with their work.\nYour goal is to provide concise and accurate feedback to help the user solve their problem. \nYour response should include suggestions for the best way to solve the problem. \nThese suggestions can be in the form of a list or a single answer. \nYou should try your best to give correct solutions. \nIf your response includes code snippets, make sure to format them correctly: \none-liners should be wrapped in ` tags and longer snippets should be wrapped between ``` tags.\nMake your responses friendly and helpful.\nCode snippets should also be wrapped in [cyan]`[/cyan] or [cyan]```[/cyan] tags.\nEmphasize important words or phrases with [bold]bold[/bold] or [italic]italic[/italic] tags.\nYou can also use [magenta]...[/magenta] for additional emphasis.\nUse [green]...[/green] to for titles or headings.\nYou are allowed to use emojis using the format :emoji_name:\n ",
"content"
] |
2024-01-10 | estelleafl/optimum-habana | optimum~habana~diffusers~pipelines~stable_diffusion~pipeline_stable_diffusion.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import time
import warnings
from dataclasses import dataclass
from math import ceil
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import PIL
import torch
from diffusers.configuration_utils import FrozenDict
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import BaseOutput, deprecate
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from optimum.utils import logging
from ....transformers.gaudi_configuration import GaudiConfig
from ....utils import speed_metrics
from ..pipeline_utils import GaudiDiffusionPipeline
logger = logging.get_logger(__name__)
@dataclass
class GaudiStableDiffusionPipelineOutput(BaseOutput):
images: Union[List[PIL.Image.Image], np.ndarray]
nsfw_content_detected: Optional[List[bool]]
throughput: float
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class GaudiStableDiffusionPipeline(
GaudiDiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
):
"""
Extends the [`StableDiffusionPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline) class:
- Generation is performed by batches
- Two `mark_step()` were added to add support for lazy mode
- Added support for HPU graphs
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
use_habana (bool, defaults to `False`):
Whether to use Gaudi (`True`) or CPU (`False`).
use_hpu_graphs (bool, defaults to `False`):
Whether to use HPU graphs or not.
gaudi_config (Union[str, [`GaudiConfig`]], defaults to `None`):
Gaudi configuration to use. Can be a string to download it from the Hub.
Or a previously initialized config can be passed.
bf16_full_eval (bool, defaults to `False`):
Whether to use full bfloat16 evaluation instead of 32-bit.
This will be faster and save memory compared to fp32/mixed precision but can harm generated images.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
use_habana: bool = False,
use_hpu_graphs: bool = False,
gaudi_config: Union[str, GaudiConfig] = None,
bf16_full_eval: bool = False,
):
super().__init__(
use_habana,
use_hpu_graphs,
gaudi_config,
bf16_full_eval,
)
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
self.to(self._device)
@property
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
num_prompts = 1
elif prompt is not None and isinstance(prompt, list):
num_prompts = len(prompt)
else:
num_prompts = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * num_prompts
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif num_prompts != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has {len(negative_prompt)} elements, but `prompt`:"
f" {prompt} has {num_prompts}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(num_prompts * num_images_per_prompt, seq_len, -1)
return prompt_embeds, negative_prompt_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def prepare_latents(self, num_images, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (num_images, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != num_images:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective number"
f" of images of {num_images}. Make sure the number of images matches the length of the generators."
)
if latents is None:
# torch.randn is broken on HPU so running it on CPU
rand_device = "cpu" if device.type == "hpu" else device
if isinstance(generator, list):
shape = (1,) + shape[1:]
latents = [
torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)
for i in range(num_images)
]
latents = torch.cat(latents, dim=0).to(device)
else:
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
@classmethod
def _split_inputs_into_batches(cls, batch_size, latents, text_embeddings, uncond_embeddings):
# Use torch.split to generate num_batches batches of size batch_size
latents_batches = list(torch.split(latents, batch_size))
text_embeddings_batches = list(torch.split(text_embeddings, batch_size))
if uncond_embeddings is not None:
uncond_embeddings_batches = list(torch.split(uncond_embeddings, batch_size))
# If the last batch has less samples than batch_size, pad it with dummy samples
num_dummy_samples = 0
if latents_batches[-1].shape[0] < batch_size:
num_dummy_samples = batch_size - latents_batches[-1].shape[0]
# Pad latents_batches
sequence_to_stack = (latents_batches[-1],) + tuple(
torch.zeros_like(latents_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
latents_batches[-1] = torch.vstack(sequence_to_stack)
# Pad text_embeddings_batches
sequence_to_stack = (text_embeddings_batches[-1],) + tuple(
torch.zeros_like(text_embeddings_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
text_embeddings_batches[-1] = torch.vstack(sequence_to_stack)
# Pad uncond_embeddings_batches if necessary
if uncond_embeddings is not None:
sequence_to_stack = (uncond_embeddings_batches[-1],) + tuple(
torch.zeros_like(uncond_embeddings_batches[-1][0][None, :]) for _ in range(num_dummy_samples)
)
uncond_embeddings_batches[-1] = torch.vstack(sequence_to_stack)
# Stack batches in the same tensor
latents_batches = torch.stack(latents_batches)
if uncond_embeddings is not None:
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
for i, (uncond_embeddings_batch, text_embeddings_batch) in enumerate(
zip(uncond_embeddings_batches, text_embeddings_batches[:])
):
text_embeddings_batches[i] = torch.cat([uncond_embeddings_batch, text_embeddings_batch])
text_embeddings_batches = torch.stack(text_embeddings_batches)
return latents_batches, text_embeddings_batches, num_dummy_samples
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
batch_size: int = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated images.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated images.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
batch_size (`int`, *optional*, defaults to 1):
The number of images in a batch.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated randomly.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Returns:
[`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] or `tuple`:
[`~diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.GaudiStableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
with torch.autocast(device_type="hpu", dtype=torch.bfloat16, enabled=self.gaudi_config.use_torch_autocast):
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
num_prompts = 1
elif prompt is not None and isinstance(prompt, list):
num_prompts = len(prompt)
else:
num_prompts = prompt_embeds.shape[0]
num_batches = ceil((num_images_per_prompt * num_prompts) / batch_size)
logger.info(
f"{num_prompts} prompt(s) received, {num_images_per_prompt} generation(s) per prompt,"
f" {batch_size} sample(s) per batch, {num_batches} total batch(es)."
)
if num_batches < 3:
logger.warning("The first two iterations are slower so it is recommended to feed more batches.")
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device="cpu")
timesteps = self.scheduler.timesteps.to(device)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
num_prompts * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Split into batches (HPU-specific step)
latents_batches, text_embeddings_batches, num_dummy_samples = self._split_inputs_into_batches(
batch_size,
latents,
prompt_embeds,
negative_prompt_embeds,
)
outputs = {
"images": [],
"has_nsfw_concept": [],
}
t0 = time.time()
t1 = t0
# 8. Denoising loop
for j in self.progress_bar(range(num_batches)):
# The throughput is calculated from the 3rd iteration
# because compilation occurs in the first two iterations
if j == 2:
t1 = time.time()
latents_batch = latents_batches[0]
latents_batches = torch.roll(latents_batches, shifts=-1, dims=0)
text_embeddings_batch = text_embeddings_batches[0]
text_embeddings_batches = torch.roll(text_embeddings_batches, shifts=-1, dims=0)
for i in range(num_inference_steps):
timestep = timesteps[0]
timesteps = torch.roll(timesteps, shifts=-1, dims=0)
capture = True if self.use_hpu_graphs and i < 2 else False
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents_batch] * 2) if do_classifier_free_guidance else latents_batch
)
# latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
# predict the noise residual
noise_pred = self.unet_hpu(
latent_model_input,
timestep,
text_embeddings_batch,
cross_attention_kwargs,
capture,
)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents_batch = self.scheduler.step(
noise_pred, latents_batch, **extra_step_kwargs, return_dict=False
)[0]
if not self.use_hpu_graphs:
self.htcore.mark_step()
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(i, timestep, latents_batch)
if not output_type == "latent":
# 8. Post-processing
image = self.vae.decode(latents_batch / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents_batch
outputs["images"].append(image)
self.scheduler.reset_timestep_dependent_params()
if not self.use_hpu_graphs:
self.htcore.mark_step()
speed_metrics_prefix = "generation"
speed_measures = speed_metrics(
split=speed_metrics_prefix,
start_time=t0,
num_samples=num_batches * batch_size if t1 == t0 else (num_batches - 2) * batch_size,
num_steps=num_batches,
start_time_after_warmup=t1,
)
logger.info(f"Speed metrics: {speed_measures}")
# Remove dummy generations if needed
if num_dummy_samples > 0:
outputs["images"][-1] = outputs["images"][-1][:-num_dummy_samples]
# Process generated images
for i, image in enumerate(outputs["images"][:]):
if i == 0:
outputs["images"].clear()
if output_type == "latent":
has_nsfw_concept = None
else:
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
if output_type == "pil":
outputs["images"] += image
else:
outputs["images"] += [*image]
if has_nsfw_concept is not None:
outputs["has_nsfw_concept"] += has_nsfw_concept
else:
outputs["has_nsfw_concept"] = None
if not return_dict:
return (outputs["images"], outputs["has_nsfw_concept"])
return GaudiStableDiffusionPipelineOutput(
images=outputs["images"],
nsfw_content_detected=outputs["has_nsfw_concept"],
throughput=speed_measures[f"{speed_metrics_prefix}_samples_per_second"],
)
@torch.no_grad()
def unet_hpu(self, latent_model_input, timestep, encoder_hidden_states, cross_attention_kwargs, capture):
if self.use_hpu_graphs:
return self.capture_replay(latent_model_input, timestep, encoder_hidden_states, capture)
else:
return self.unet(
latent_model_input,
timestep,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
@torch.no_grad()
def capture_replay(self, latent_model_input, timestep, encoder_hidden_states, capture):
inputs = [latent_model_input, timestep, encoder_hidden_states, False]
h = self.ht.hpu.graphs.input_hash(inputs)
cached = self.cache.get(h)
if capture:
# Capture the graph and cache it
with self.ht.hpu.stream(self.hpu_stream):
graph = self.ht.hpu.HPUGraph()
graph.capture_begin()
outputs = self.unet(inputs[0], inputs[1], inputs[2], inputs[3])[0]
graph.capture_end()
graph_inputs = inputs
graph_outputs = outputs
self.cache[h] = self.ht.hpu.graphs.CachedParams(graph_inputs, graph_outputs, graph)
return outputs
# Replay the cached graph with updated inputs
self.ht.hpu.graphs.copy_to(cached.graph_inputs, inputs)
cached.graph.replay()
self.ht.core.hpu.default_stream().synchronize()
return cached.graph_outputs
| [
"1"
] |
2024-01-10 | mslehre/text-embedding | src~compute_embedding.py | import os
import tiktoken
import openai
from openai.embeddings_utils import get_embedding, cosine_similarity
from tokenizer import get_token_from_string, get_string_from_tokens
def embedding_from_string(string: str,
embedding_name: str = "text-embedding-ada-002",
max_token: int = 8191) -> list[float]:
"""This function computes the embedding for a string.
Args:
string (str): This parameter is the string of which the embedding is
computed.
embedding_name (str): The name of the embedding model. By default the
model text-embedding-ada-002 is used.
max_token (int): The maximum number of tokens for which an embedding is
computed. By default this is the maximum number of tokens of the
embedding model text-embedding-ada-002. If the default model is
changed, the default value for max_token should be adapted to the
maximum number of tokens of the new default embedding model.
Returns:
list[float]: The embedding for the string is returned as vector. In case
of text-embedding-ada-002 as embedding model the dimension is 1536.
If no embedding can be computed because the embedding model cannot
be accessed or max_token is larger than the maximum number of tokens
the embedding model supports, None is returned. If the string is
too long because it is encoded to more tokens than max_token, the
embedding for the string that is encoded to the first max_token
tokens is computed.
"""
# Check if number of tokens is too large to for default embedding model.
if (max_token > 8191):
print('Your specified number of maximum number of tokens is larger \
than the maximum number of tokens', embedding_name, 'supports, \
which is', 8191, 'tokens.')
return [None]
# Test if OPENAI_API_KEY is set as environment variable.
if (os.environ.get('OPENAI_API_KEY') is None):
print('You did not set your openai api key as environment variable. \
Therefore the embedding cannot be computed. Please set your key as \
environment variable by typing: export OPENAI_API_KEY=\'your key\'\
.')
return [None]
# Get tokens from string to test whether number of tokens is too large for
# the string. If the number of tokens for the string exceeds the maximum
# number of tokens, only the first max_token tokens are returned.
tokens = get_token_from_string(string, max_token=max_token, force_cut=True,
verbose=True)
# Get string from tokens since in case the string was too long and
# the number of tokens was cut, the string is different from the original
# string.
string = get_string_from_tokens(tokens)
return get_embedding(string, engine=embedding_name)
def compute_similarity_of_texts(text1: str, text2: str) -> float:
"""This function computes two embeddings one for each text and then
computes the cosine similarity of the two embeddings. If a text is too
large because its string is encoded to more tokens than the maximum number
of tokens for which an embedding is computed, the embedding for the string
that is encoded to the first max_token tokens is computed where max_token
is the maximum number of tokens for which an embedding is computed. The
cosine similarity is returned.
Args:
text1 (str): the first string to compare
text2 (str): the second string to compare
Returns:
float: The cosine similarity of the texts is returned as float. If the
embedding for one of the texts cannot be computed, None is returned.
"""
texts = [text1, text2]
embeddings = []
# Compute embeddings for the texts.
for text in texts:
embeddings.append(embedding_from_string(text))
# Test if embeddings could be computed.
for i in range(0,2):
if (embeddings[i] == [None]):
return None
return cosine_similarity(embeddings[0], embeddings[1])
def main():
"""If this program is executed the embedding for
a few strings in a list is computed and both the string and the first five
values of its embedding are printed.
"""
text = ["You are a big boy",
"incomprehensibilities",
"ich", "Ich",
"er",
"ist",
".", "?", ",",
"er ist ich."]
for t in text:
print(t,"\nFirst five values of embedding for text:")
embedding = embedding_from_string(t)
print(embedding[0:5])
exit(0)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | mslehre/text-embedding | src~chunks_for_question.py | import os
import pandas as pd
import numpy as np
from compute_embedding import embedding_from_string
from openai.embeddings_utils import cosine_similarity
from settings import DATA_DIR
def get_k_IDs(question: str,
embeddings_file: str,
k: int = 5) -> list[int]:
"""Gets the IDs of the k chunks that have the highest cosine similarity
with the embedded question. The embeddings of the chunks are given in the
hdf5 file named after the string of 'embeddings_file' which should be
locaded in the data directory of this repository. The function gives back a
list that contains the IDs of the k chunks sorted decreasingly by
similarity to the question.
Args:
question (str): The string of the question to compare the embeddings
of the chunks to.
embeddings_file (str): The relative path of the hdf5 file, that
contains the embeddings , if one is currently in the data
directory, since all data files should be stored there.
Example: For the file named "example_embeddings" in the
directory "data/folder" the strings should be in the
following format: "folder/example_embeddings"
(without "/" at the beginning)
Attention: The dictionary has to have a key that contains the
string "embeddings" that gives the array of the
embeddigs and a string with "ids" that gives the ids or
names of the corrsponding files.
k (int): Integer that indicates the number of chunks that are returned.
Returns:
list[str]: The list that contains the IDs of the k files with the
best cosine similiarity for the question orderded from most to
least similar.
"""
# Check if question is given:
if not question:
print("No question was given. Please enter a question.")
return [None]
# Embed the question:
question_embedding = embedding_from_string(string=question)
# Get the embeddings from the hpf5 file if exists and acess is given:
file_path = os.path.join(DATA_DIR, embeddings_file)
if (not os.path.isfile(file_path) or not os.access(file_path, os.R_OK)):
print("The file " + file_path + " does not exist or is not readable!")
return [None]
# Read the pd.DataFrames from the hdf5 file
hdf = pd.HDFStore(file_path, mode='r')
embeddings_df = pd.read_hdf(hdf, "embeddings")
ids_df = pd.read_hdf(hdf, "ids")
hdf.close()
# Check if k not bigger than the number of embeddings:
n,_ = embeddings_df.shape
if (k > n):
print(f'k was given as {k} but there are {n} embeddings given. k is '
+ f'set to {n}.')
k = n
# Compute IDs of the best embeddings and return the sorted list from
# biggest to smallest similarity:
inds = get_embeddings_argsort(question_embedding=question_embedding,
embeddings=embeddings_df)
inds = [ids_df[0].iloc[i] for i in inds]
return inds[0:k]
def get_embeddings_argsort(question_embedding: list[float],
embeddings: pd.DataFrame) -> list[int]:
"""Gets the argsort of the given embeddings from higehst to lowest cosine
similarity with the given question.
Args:
question_embedding (list[float]): The embedded question to which the
embeddings are compared to.
embeddings (pd.DataFrame): The pandas DataFrame containing the
embeddings of the chunks.
Returns:
list[int]: The list that contains the indices (row numbers)of the
argsort of the embeddings according to the cosine similiarity for
the question orderd from most to least similar.
"""
similarities = []
n,_ = embeddings.shape
for i in range(0,n):
similarities.append(cosine_similarity(question_embedding,
embeddings.iloc[i].tolist()))
# Return the indices of the k best embeddings, the best results have the
# highest cosine similarity.
inds = np.array(similarities).argsort()[::-1]
return inds
def main():
"""Main to test the function that gets the k best chunks for a question.
"""
question = "What is the publication domain of Volkmar Liebscher?"
k = 5
a = get_k_IDs(question,
embeddings_file="pub_embeddings.h5", k=k)
print(f'Question: {question} The list with the best {k} file(s) is {a}.')
exit(0)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | mslehre/text-embedding | src~ask_question.py | #set key with export OPENAI_API_KEY="..."
import os
import openai
from build_prompt import get_prompt
from settings import DATA_DIR, LM_model
def get_answer(
query: str,
text_dir: str,
id_list: list[str]) -> str:
"""From a question asked by the user, generate the answer
Args:
query (str): Question asked by the user
text_dir (str): Documents directory
id_list (list[str]): List of relevant docs
Returns:
str: Answer generated with the LLM
"""
docs,seperator_list = get_texts_from_ids(id_list=id_list,
text_dir=text_dir)
if len(seperator_list) == (len(docs) + 1):
meta_data = seperator_list.pop(0)
seperator_list[0] = meta_data + seperator_list[0]
#assemble the prompt
this_prompt = get_prompt(query, docs, seperator_list)
if (this_prompt == None):
return None
#call openai to obtain a response
if LM_model == "gpt-3.5-turbo":
response = openai.ChatCompletion.create(
model = LM_model,
messages=[ {"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": this_prompt}],
temperature = 0,
max_tokens = 500,
)
result = response['choices'][0]['message']['content']
else:
response = openai.Completion.create(
model = "text-davinci-003",
prompt = this_prompt,
temperature = 0,
max_tokens = 500,
)
result = response['choices'][0]['text']
return result
def get_texts_from_ids(id_list: list[str],
text_dir: str) -> tuple[list[str], list[str]]:
"""Returns a tuple with the texts from the text chunks with the specified
IDs and a list with the information from the according meta files. If no
meta file exists, the second list contains 'Here is a new text' as an
entry. The second list can be used as a seperator list for the prompt
building.
Args:
id_list (list[str]): List with the IDs of the sleected chunks.
text_dir (str): Documents directory.
Returns:
list[str]: A list that contains the texts from the chunks with the
selected IDs.
list[str]: A list that contains the meta information from the
corresponding meta files if they exist.
"""
docs = []
seperator_list = []
# First read in the chunks given by the id_list from the given directory.
# Either the file exists as in the directory or it is located in a sub
# directory, file name without chunk id.
# add meta data that holds for all files in the directory if exists
meta_file = os.path.join(text_dir, 'meta.txt') # meta file for all texts
if os.path.isfile(meta_file) and os.access(meta_file, os.R_OK):
with open(meta_file, 'r', encoding="utf-8") as file:
meta_data = file.read()
seperator_list.append(f'Here is some meta information that hold for '
+ f'all texts that are given to answer the '
+ f'question:\n{meta_data}\n\n')
for id in id_list:
file_path = os.path.join(text_dir, id + ".txt")
dir_path = os.path.join(text_dir, id.split('.')[0]) # sub dir
file_in_sub_dir = False
# search for a sub directory with chunks:
if(not os.path.isfile(file_path) and os.path.isdir(dir_path)):
file_path = os.path.join(dir_path, id + ".txt")
file_in_sub_dir = True # to get the location of the meta file
if not os.access(file_path, os.R_OK):
print(f'ERROR: Could not find or access the file {id}.txt '
+ f' directly or in a sub directory {dir_path}.')
exit(1)
this_chunk = open(file_path, "r", encoding="UTF-8")
docs.append(this_chunk.read())
this_chunk.close()
# Create seperator for the chunk using the meta file data if a meta
# file exist:
if file_in_sub_dir == True:
meta_file = os.path.join(dir_path, id.split('.')[0] + '.meta.txt')
else:
meta_file = os.path.join(text_dir, id.split('.')[0] + '.meta.txt')
if os.path.isfile(meta_file) and os.access(meta_file, os.R_OK):
# read in the meta data into seperators
with open(meta_file, 'r') as file:
meta_data = file.read()
seperator_list.append(f'Here are some meta information about the '
+ f'following text:\n{meta_data}.\n The '
+ 'corresponding text:')
else:
# ignore meta file, continue without meta information
seperator_list.append('This is a new text:')
return docs, seperator_list
def main():
testq = "What are common research interests of these scientists?"
testdir = os.path.join(DATA_DIR, "publications")
testlist = ['2','4']
testanswer = get_answer(query=testq, text_dir=testdir, id_list=testlist)
print(testanswer)
if __name__ == "__main__":
main()
| [
"You are a helpful assistant."
] |
2024-01-10 | mslehre/text-embedding | src~tsne_plot.py | #!/usr/bin/env python3
import os
import argparse
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
from openai.embeddings_utils import cosine_similarity
def try_to_read_file(file_path: str) -> str:
# a quick function for argparse to check if given arg is a readable file
if not os.path.isfile(file_path) or not os.access(file_path, os.R_OK):
raise argparse.ArgumentTypeError("The file " + file_path + "does not "
+ "exist or is not readable!")
return file_path
def check_thinning_args(thinning_arg: str,
dir_path: str) -> tuple[int, str]:
# function to check arguments for thinning data
# check if thinning_arg is an int
try:
thinning_arg = int(thinning_arg)
except ValueError as ve:
raise argparse.ArgumentTypeError("Argument " + thinning_arg + " is not"
+ " an integer!")
# check if dir_path is a directory
if not os.path.isdir(dir_path) or not os.access(dir_path, os.R_OK):
raise argparse.ArgumentTypeError("The directory " + dir_path + " does "
+ "not exist or is not readable!")
dir_path = os.path.join(dir_path, '') # append '/'
return thinning_arg, dir_path
def count_pubs(author_ids: pd.DataFrame,
dir_path: str) -> list[int]:
"""
Count number of publications for each provided author ID in the specified
directory.
Args:
author_ids (pandas.DataFrame): Author IDs to count the number of
publications for.
dir_path (str): Directory containing the publication lists, the file
names need to be <author_id>.txt.
Returns:
num_pubs (list): List of number of publications for each author ID.
A -1 is appended if no publication list was found for the author.
"""
num_pubs = []
for id in author_ids[0]:
file_path = dir_path + str(id) + ".txt"
n = -1 # num pubs in file, -1 because the first line is not a pub
# check if file exists and is readable, if yes read
if os.path.isfile(file_path) and os.access(file_path, os.R_OK):
with open(file_path, 'r') as f:
for line in f:
if line.strip(): n += 1 # if line is not empty, count + 1
num_pubs.append(n) # add count for author id
else:
print("WARNING: Can't find or read file ", file_path,
" for author ID ", id, "!")
num_pubs.append(n) # add -1 pubs for authors with no file
return num_pubs
def thin_out_data(author_ids: pd.DataFrame,
embeddings: pd.DataFrame,
dir_path: str,
min_pubs: int) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
Remove data of authors with less than min_pubs publications from author_ids
and embeddings.
Args:
author_ids (pandas.DataFrame): Author IDs of authors to thin out.
embeddings (pandas.DataFrame): Embeddings corresponding to the author
IDs to thin out.
dir_path (str): Path to directory containing the publication lists.
min_pubs (int): Minimum number of publications. Every author with less
than min_pubs publications is being pruned from author_ids and
embeddings.
Returns:
author_ids (pandas.DataFrame): Pruned author IDs.
embeddings (pandas.DataFrame): Pruned embeddings.
"""
num_pubs = count_pubs(author_ids, dir_path)
# get indices of authors with less than min_pubs pubs
drop_indices =[i for i, num in enumerate(num_pubs) if num < min_pubs]
# drop those authors
embeddings = embeddings.drop(drop_indices)
author_ids = author_ids.drop(drop_indices)
return author_ids, embeddings
def get_author_info_and_palette(authors: pd.DataFrame,
author_ids: np.ndarray,
affiliation_map: pd.DataFrame,
affiliation: str) -> tuple[list[str], dict]:
"""
Get affiliations of autors by ID and generate color palette for the
affiliations
Args:
authors (pandas.DataFrame): Table containing at least author ids and
their faculties and institutes.
author_ids (numpay.ndarray): List of author ids for which to get the
affiliations.
affiliation_map (pandas.DataFrame): Table with all possible
faculties or institutes.
affiliation (str): Which affiliation to use - "faculty" or "institute".
Returns:
lnames (list[str]): List of authors last names.
affil (list[str]): List of affiliations for author IDs.
pal (dict): Color palette for the affiliations.
"""
# get last names of authors
lnames = authors.loc[authors['id'].isin(author_ids), 'lastname'].to_list()
# get affiliations of authors by ID
affil = authors.loc[authors['id'].isin(author_ids), affiliation].to_list()
# switch long names of institutions to short names
if affiliation == 'institute':
mapping = dict(zip(affiliation_map['institute_long'],
affiliation_map['institute_short']))
affil_ = [mapping[item] for item in affil]
affil = affil_
affil_uniq = affiliation_map['institute_short'].to_list()
else:
affil_uniq = affiliation_map['faculty'].to_list()
# generate color palette
num_col = len(affil_uniq) # number of colors
colors = sns.color_palette("hls", num_col).as_hex() # get colors
pal = dict(zip(affil_uniq, colors)) # color palette for plot
return lnames, affil, pal
def compute_tsne(X: np.ndarray,
pca_reduction: bool = False,
pca_components: int = 50,
tsne_perplexity: float = 30.0) -> np.ndarray:
"""
Compute t-SNE for embeddings
Args:
X (numpy.ndarray): Embeddings to transform
pca_reduction (bool): If True, a PCA is performed before the t-SNE to
reduce computation resources. Default: True.
pca_components (int): Number of components to keep, when PCA is
performed, i.e. dimension of the result. Default: 50.
tsne_perplexity (float): Number of nearest neighbors that is used in
other manifold learning algorithms, must be less than the number of
samples. Default: 30.0.
Returns:
tsne_result (numpy.ndarray): t-SNE transformed data.
"""
# ada dimensions: 1536
if pca_reduction:
# build PCA, reduce to dim size pca_components
pca = PCA(n_components = pca_components)
pca_result = pca.fit_transform(X) # fit model and apply dim reduction
X = pca_result
tsne = TSNE(perplexity = tsne_perplexity) # perplexity = knn
tsne_result = tsne.fit_transform(X) # fit model and apply dim reduction
return tsne_result
def compute_cosinesim(embeddings: np.ndarray) -> np.ndarray:
"""
Compute pairwise cosine similarity for embeddings
Args:
embeddings (numpy.ndarray): A 2-dim array with embeddings
Returns:
cosine_sims (numpy.ndarray): A symmetric matrix containing pairwise
cosine similarities of the embeddings and Nan on the diagonal.
"""
n = embeddings.shape[0] # number of embeddings
# n x n matrix for pairwise distances
cosine_sims = np.zeros((n,n), dtype = np.float64)
# indices of upper triangle matrix with offset 1
iupper = np.stack(np.triu_indices(n, 1), axis = -1)
# compute cosine similarity
for i,j in iupper:
cosine_sims[i,j] = cosine_similarity(embeddings[i], embeddings[j])
cosine_sims += cosine_sims.T # make matrix symmetric
np.fill_diagonal(cosine_sims, np.nan) # fill diagonal with NaN
return cosine_sims
def get_edges(similarities: np.ndarray,
k: int = None,
threshold: float = None) -> set[tuple[int,int]]:
"""
Given a matrix containing pairwise similarity scores, for each element get
the indices for the k highest scores and/or the indices for the elements
with a score of >= threshold.
Args:
similarities (np.ndarray): A symmetric matrix containing pairwise
similarities and Nan on the diagonal.
k (int): For each element, get the index pairs for the k elements with
the highest similarity score.
threshold (float): Get index pairs with a similarity score of >=
threshold.
Returns:
edges (set[tuple[int,int]]): A set of 0-based index tuples computed
according to k/threshold.
"""
if not k and not threshold:
print("ERROR: You need to specify at least one criterium for selecting"
+ " the edges!")
exit(1)
edges = []
# get edges to the k nearest neightbours for each node
if k:
indices = np.argsort(similarities)
# get knn -> adjacency lists
# argsort sorts ascending, need the last elements
knn = indices[:, -(k+1):-1] # nans get sorted last, exclude them
for i,k in zip(range(len(knn)), knn):
for j in k:
# sort indices so duplicates can be identified by set()
ind = sorted([i,j])
edges.append(tuple(ind))
# get edges for nodes with a similarity of >= threshold
if threshold:
indices = np.stack(np.where(similarities >= threshold), axis = -1)
for i in indices:
ind = sorted(i)
edges.append(tuple(ind))
edges = set(edges) # duplicate edges are removed
return edges
def tsne_plot(X: np.ndarray,
lnames: list[str],
affiliation: list[str],
legend_title: str,
palette: dict,
edges = None):
"""
Plot t-SNE
Args:
X (numpy.ndarray): Result of the t-SNE transformation.
affiliation (list[str]): Containing the institutes or
faculties corresponding to the data points. This decides how to
color the points in the plot.
legend_title (str): Title for the plot legend
palette (dict): Color palette for the plot. Specifies which color to
use for which institute or faculty.
Returns:
matplotlib.figure.Figure: Figure of the plot.
"""
plt.figure(figsize=(15,15))
ax = sns.scatterplot(
x = X[:, 0], y = X[:, 1],
hue = affiliation,
hue_order = list(palette.keys()),
palette = palette,
style = affiliation,
style_order = list(palette.keys()),
legend = "full",
alpha = 1,
s=200,
zorder = 5
)
plt.title("t-SNE Plot of Publication Lists", fontsize = 20) # plot title
# adjust legend position and style
ax.legend(fancybox=True, ncol = 2, fontsize = 14, title = legend_title)
sns.move_legend(ax, "upper right", bbox_to_anchor=(-0.05, 1))
# add edges to plot
if edges:
for i, j in edges:
# add line from i to j
x1 = X[i,0]
y1 = X[i,1]
x2 = X[j,0] - x1
y2 = X[j,1] - y1
plt.arrow(x1, y1, x2, y2,
color='gray', linewidth=1, length_includes_head=True,
head_width=0, alpha = 0.2, zorder = 0)
# for some reason plt.plot doesnt work well with scatter plots
#plt.plot(X[i, :], X[j, :], marker = None, linewidth = 2,
# color = "gray", alpha = 0.1)
# annotate dots with last names
text = []
# box style for labels
bbox_props = dict(boxstyle="round", fc="white", alpha=0.3)
#boxstyle="round, pad=0.15"
for i, label in enumerate(lnames):
text += [ax.text(X[i, 0], X[i, 1], label, bbox=bbox_props,
zorder = 10)] # fontsize = 'x-small'
adjust_text(text) # prevent labels from overlapping
return ax.get_figure()
def main():
parser = argparse.ArgumentParser(
description='Visualize the embeddings of publications with a t-SNE '
+ 'plot.')
parser.add_argument('embed_file', type = try_to_read_file,
help = 'hdf5 file with embeddings.')
parser.add_argument('author_file', type = try_to_read_file,
help = 'File with table containing information about '
+ 'the authors, like ID, faculty, and institute.')
parser.add_argument('affiliation_map', type = try_to_read_file,
help = 'File with table containig all possible '
+ 'faculties/institutes.')
parser.add_argument('-o', '--outfile', default = 'tsne_plot',
help = 'Stem for output file to save plot. Default is '
+ '\"tsne_plot\".')
parser.add_argument('--format', default = 'pdf',
choices = ['png', 'pdf', 'svg'],
help = 'Format for plot. Default is png.')
parser.add_argument('--tsne_perplexity', type = float, default = 30.0,
help = 'Perplexity for the t-SNE algorithm. Default is'
+ ' 30.0.')
parser.add_argument('--pca', action = 'store_true',
help = 'Perform a PCA before the t-SNE.')
parser.add_argument('--pca_components', type = int, default = 50,
help = 'Number of components to keep after performing'
+ ' the PCA. Default is 50.')
parser.add_argument('--affiliation', default = 'institute',
choices = ['institute', 'faculty'],
help = 'Decides after which fashion to color the '
+ 'plot. Default is \"institute\".')
parser.add_argument('-k', '--k_edges', type = int,
help = 'For each author, plot edges to the authors '
+ 'with the k highest cosine similarities.')
parser.add_argument('-t', '--threshold_edges', type = float,
help='For each author, plot edges to the authors '
+ 'with a cosine similarity of >= threshold.')
parser.add_argument('--thinning', nargs = 2, metavar = ('INT', 'DIR'),
help = 'Prune author data by number of publications. '
+ 'Specify a minimum number of publications and the '
+ 'directory containing the publication lists. Authors'
+ ' with less than the required number of publications'
+ ' will not be plotted.')
args = parser.parse_args()
# check thinning args
if args.thinning:
min_pubs, pub_dir = check_thinning_args(args.thinning[0],
args.thinning[1])
outfile = args.outfile + '.' + args.format
# read data
hdf = pd.HDFStore(args.embed_file, mode='r')
embeddings = pd.read_hdf(hdf, "embeddings")
author_ids = pd.read_hdf(hdf, "ids")
hdf.close()
authors = pd.read_table(args.author_file, delimiter = '\t', dtype = str)
# encoding='latin1'
affiliation_map = pd.read_table(args.affiliation_map, delimiter = '\t')
# thinning data
if args.thinning:
author_ids, embeddings = thin_out_data(author_ids, embeddings,
pub_dir, min_pubs)
# convert data to numpy arrays for further steps
author_ids = author_ids.to_numpy().flatten()
embeddings = embeddings.to_numpy(dtype = np.float64)
# perplexity for tsne needs to be smaller than the number of samples
k = args.tsne_perplexity \
if len(author_ids) > args.tsne_perplexity \
else float(len(author_ids) - 1)
# pca components needs to be <= min(n_samples, n_features)
pca_components = args.pca_components \
if min(embeddings.shape) >= args.pca_components \
else min(embeddings.shape)
# transform embeddings
tsne_result = compute_tsne(embeddings, pca_reduction = args.pca,
pca_components = pca_components,
tsne_perplexity = k)
# get last names, affiliations, and color palette
lnames, affiliation, palette = get_author_info_and_palette(
authors,
author_ids,
affiliation_map,
args.affiliation)
# get edges
edges = None
if args.k_edges or args.threshold_edges:
similarities = compute_cosinesim(embeddings)
edges = get_edges(similarities, args.k_edges, args.threshold_edges)
# plot
fig = tsne_plot(tsne_result, lnames, affiliation, args.affiliation,
palette, edges)
fig.savefig(outfile, format = args.format, bbox_inches='tight')
plt.show()
exit(0)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | nipy/pbrain | eegview~utils.py | from __future__ import division
from __future__ import division
import re
import array
import scipy
from matplotlib.cbook import iterable, Bunch
from pbrainlib.mlab import cohere_pairs, fftsurr, window_hanning as hanning
from math import floor, ceil
from numpy import mean #changed from scipy mean. don't think it makes a difference except with multivariate arrays in which case, be careful.
from scipy import zeros, ones, exp, array, pi
from scipy import array, arange, std, rand
import scipy.signal
#import pickle
def hilbert_phaser(s):
"""
FUNC: hilbert_phaser
DESCR: Return the instantaneous phase of s using the hilbert transform
"""
#f = file("hilbert_fpe.pickle", "w")
#pickle.dump(s, f)
#f.close()
#print "utils:hilbert_phaser(): sig.hilbert(", s[0:10], ")"
h = scipy.signal.hilbert(s)
#print "utils:hilbert_phaser(): scipy.absolute(", h[0:10], ")"
a = scipy.absolute(h)
#print "utils:hilbert_phaser(): scipy.angle(", h[0:10], ")"
phase = scipy.angle(h)
return phase
def synchrony(s1, s2, t, winLen, freq, overlap=.1) :
"""
FUNC: synchrony
DESCR: phase synchrony implementation
"""
#print "utils.synchrony(s1(", len(s1), ",), s2(", len(s2), "), t(", len(t),"),", winLen, freq, overlap, ")"
# Compute phase difference
#print "utils.synchrony(): doing hilbert_phaser(s1)"
p1 = hilbert_phaser(s1)
#print "utils.synchrony(): doing hilbert_phaser(s2)"
p2 = hilbert_phaser(s2)
pdiff = p1 - p2
sync = []
time = []
nWin = int(winLen * freq)
nOverlap = int(overlap * freq)
#print "utils.synchrony(): doing ", len(arange(0, len(pdiff), nWin - nOverlap)), " pdiffs"
for i in arange(0, len(pdiff), nWin - nOverlap):
sWin = pdiff[i:i + nWin]
if len(sWin) < 3 : continue
s = 1. / (1 + std(sWin))
#print "utils.synchrony(): [", i, ", ", i+nWin, "]: appending value s=", s
try : sync.append(s)
except :
print 'sWin:', sWin
sys.exit()
if t is not None :
time.append(t[i] + winLen / 2.)
return array(sync), array(time)
def sync_gamma(psi):
"""
FUNC: sync_gamma
DESCR: Compute the measure gamma
gamma^2 = <cos psi>^2 + <sin psi>^2
where
psi(t) = m phix(t) - n phiy(t)
where phi is the instantaneous phase; see also phasediff
gamma is a measure of phase synchrony and ranges from 0-1
"""
from scipy import mean, cos, sin, sqrt
return sqrt(mean(cos(psi))**2 + mean(sin(psi))**2)
def phasediff(s0, s1, filt, n=1, m=1):
"""
FUNC: phasediff
DESCR: Compute the n-m phase difference using the hilbert transform of s0
and s1 after filtering with filt
return value is a class with attributes
s0filt : the filtered s0
s1filt : the filtered s1
p0 : the phase of s0
p1 : the phase of s1
psi : the phase difference
n : n
m : m
"""
s0filt = filt(s0)
s1filt = filt(s1)
p0 = hilbert_phaser(s0filt)
p1 = hilbert_phaser(s1filt)
return Bunch(
s0filt = s0filt,
s1filt = s1filt,
p0 = p0,
p1 = p1,
psi = m*p0 - n*p1,
n = n,
m = m,
)
def lowbutter(lpcf, lpsf, Fs, gpass=3, gstop=15):
"""
FUNC: lowbutter
DESCR: Return a low pass butterworth filter with
lpcf : lowpass corner freq
lpsf : lowpass stop freq
gpass : corner freq attenuation
gstop : stop freq attenuation
return value is a callable function that will filter your data
Example:
mybutt = lowbutter(12, 15, eeg.freq)
sfilt = mybutt(s1)
"""
Nyq = Fs/2.
wp = lpcf/Nyq
ws = lpsf/Nyq
ord, Wn = scipy.signal.buttord(wp, ws, gpass, gstop)
b, a = scipy.signal.butter(ord, Wn, btype='lowpass')
def func(x):
return scipy.signal.lfilter(b,a,x)
return func
def donothing_callback(*args):
pass
def read_cohstat(fh):
"""
FUNC: read_cohstat
DESCR: Read in a cohstat file and returns N, cxy, phases where cxy, pxy
are dictionaries and N is the number of channels
raises a RuntimeError on failure to parse
"""
# grok the number of channels
header1 = fh.readline()
header2 = fh.readline()
cxy = {}
phases = {}
while 1:
line = fh.readline().strip()
#print "read_cohstat(): read line" , line
vals = line.split()
#print "len(", vals, ")=",len(vals)
if len(vals)!=7:
print "read_cohstat(): quitting coherences"
break
tup = vals[0].split('-')
if len(tup) !=2:
raise RuntimeError, 'Bad file format on line %s' % line
try: i = int(tup[0])
except ValueError: continue
try: j = int(tup[1][:-1])
except ValueError: continue
cxy[(i,j)] = array(map(float, vals[1:]))
header2 = fh.readline()
header3 = fh.readline()
while 1:
line = fh.readline().strip()
vals = line.split()
if len(vals)!=7:
print "read_cohstat(): quitting phases"
break
tup = vals[0].split('-')
if len(tup) !=2:
raise RuntimeError, 'Bad file format on line %s' % line
try: i = int(tup[0])
except ValueError: continue
try: j = int(tup[1][:-1])
except ValueError: continue
phases[(i,j)] = array(map(float, vals[1:]))
print "len(cxy)=" , len(cxy), "len(phases)=", len(phases)
return cxy, phases
def all_pairs_ij(N):
"""
FUNC: all_pairs_ij
DESCR: Return a list of all uniq i,j tuples for cohstat
"""
ij = []
for i in range(N):
for j in range(i+1, N):
ij.append((i,j))
return ij
def all_pairs_eoi(eoi):
"""
FUNC: all_pairs_eoi
DESCR:Return a list of all uniq e1,e2 tuples for the eoi
"""
return [ (eoi[i], eoi[j]) for i,j in all_pairs_ij(len(eoi))]
def ij_across_eois(eoi1, eoi2, amp):
"""
FUNC: ij_across_eois
DESCR: gets ij pairs for across eois (useful for in and out of focus
analysis). takes three arguments eoi1, eoi2, and amp. as long as
you keep track of which eoi is which when you enter them in,
everything should be peachy
Note, you really would rather be using electrode_pairs_across_eois
"""
ind1=eoi1.to_data_indices(amp)
ind2=eoi2.to_data_indices(amp)
seen={}
if len(ind1)<len(ind2):
for i in ind1:
seen[i]=1
ind2uniq=[j for j in ind2 if not seen.has_key(j)]
indeoi1=ind1
indeoi2=ind2uniq
else:
for i in ind2:
seen[i]=1
ind1uniq=[j for j in ind1 if not seen.has_key(j)]
indeoi1=ind1uniq
indeoi2=ind2
ij = []
for i in indeoi1:
for j in indeoi2:
ij.append((i,j))
return ij
def electrode_pairs_across_eois(eoia, eoib, amp):
"""
FUNC: electrode_pairs_across_eois
DESCR: gets (e1,e2) pairs for across eois (useful for in and out of focus
analysis). takes three arguments eoia, eoib, and amp.
"""
# if eoia contains eoib you can get in trouble, because eoi2u
# would be empty in the code below; just ask Sinem
if eoia==eoib: return []
if len(eoia)==0: return []
if len(eoib)==0: return []
def smaller_one_first(eoi1, eoi2):
seen = {}
for key in eoi1: seen[key] = 1
# eoi2u are the electrodes in eoi2 that are not in eoi1
eoi2u = [ e for e in eoi2 if not seen.has_key(e)]
return [ (e1, e2) for e1 in eoi1 for e2 in eoi2u]
if len(eoia) < len(eoib): return smaller_one_first(eoia, eoib)
else: return smaller_one_first(eoib, eoia)
def cohere_dict_to_array(m, keys):
"""
FUNC: cohere_dict_to_array
DESCR: Convert a cohere dict 'm' (as returned by cohere_bands, or
cohere_pairs) to an array for statistical processing
"""
# get a representative band
band = m[keys[0]]
if iterable(band):
if len(band)>1:
# XXX: will not work with Numeric or numarray, but only with numpy. how to do this?!?
a = zeros( (len(keys),len(band)), band.dtype)
else:
a = zeros( (len(keys),), band.dtype)
else:
a = zeros( (len(keys),), 'd')
for count, key in enumerate(keys):
a[count] = m[key]
return a
def cohere_array_to_dict(a, keys):
"""
FUNC: cohere_array_to_dict
DESCR: Convert a cohere array (as created by cohere_dict_to_array) back
to a dict
"""
d = {}
count = 0
for rowNum in range(len(keys)):
d[keys[rowNum]] = a[rowNum]
return d
def cohere_bands(cxy, phase, freqs, keys,
bands = ( (1,4), (4,8), (8,12), (12,30), (30,55) ),
progressCallback=donothing_callback):
"""
FUNC: cohere_bands
DESCR: Summarize the output of cohere_pairs_eeg by bands. cxy and
phase are a dictionary from electrode pair keys to Numeric arrays of
coherence and phases for that pair. keys is a list of (e1,e2)
tuples.
The bands are
delta = 1-4 Hz
theta = 4-8
alpha = 8-12
beta = 12-30
gamma = 30-55
Return value is cxyAvg, phaseAvg
"""
#convert the cxy and phase structs to a matrix for averaging
df = freqs[1]-freqs[0]
ind = []
for (fmin, fmax) in bands:
inds = max([int(floor(fmin/df)), 0 ])
inde = min([int(ceil (fmax/df)), len(freqs)])
ind.append( (inds, inde))
cxyAvg = {}
phaseAvg = {}
# note I am doing this element wise as a dict rather than array
# wise as a matrix to conserve memory. dimensions of matrix are
# len(ij)*len(freqs), which for NFFT=2048 and 64 electrodes
# pairwise is 2016*2048 coherences and phases. 8 million floats.
Nbands = len(bands)
count = 0
Nkeys = len(keys)
count =0
for key in keys:
count +=1
if count%20==0:
progressCallback(count/Nkeys, 'Averaging over bands')
thisCxy = cxy[key]
thisPhase = phase[key]
ac = zeros( (Nbands,), thisCxy.dtype)
ap = zeros( (Nbands,), thisPhase.dtype)
count = 0
for inds, inde in ind:
if inds==inde:
ac[count]=thisCxy[inds]
ap[count]=thisPhase[inds]
else:
ac[count] = mean(thisCxy[inds:inde])
ap[count] = mean(thisPhase[inds:inde])
count += 1
cxyAvg[key] = ac
phaseAvg[key] = ap
return cxyAvg, phaseAvg
def power_bands(pxx, freqs,
bands = ( (1,4), (4,8), (8,12), (12,30), (30,55) ),
progressCallback=donothing_callback):
"""
FUNC: power_bands
DESCR: Summarize the output of cohere_pairs_eeg with pxx returned by
bands. pxx is a dictionary from electrodes to Numeric arrays of
power for that trode.
The bands are
delta = 1-4 Hz
theta = 4-8
alpha = 8-12
beta = 12-30
gamma = 30-55
Return value is pxxAvg
"""
#convert the cxy and phase structs to a matrix for averaging
df = freqs[1]-freqs[0]
ind = []
for (fmin, fmax) in bands:
inds = max([int(floor(fmin/df)), 0 ])
inde = min([int(ceil (fmax/df)), len(freqs)])
ind.append( (inds, inde))
pxxAvg = {}
# note I am doing this element wise as a dict rather than array
# wise as a matrix to conserve memory. dimensions of matrix are
# len(ij)*len(freqs), which for NFFT=2048 and 64 electrodes
# pairwise is 2016*2048 coherences and phases. 8 million floats.
Nbands = len(bands)
count = 0
keys = pxx.keys()
Nkeys = len(keys)
count =0
for key in keys:
count +=1
if count%20==0:
progressCallback(count/Nkeys, 'Averaging over bands')
thisPxx = pxx[key]
#adjust for zeroed out electrodes
if thisPxx[1] < .000000001:
thisPxx.fill(1)
print "UTILS: PXX WAS NAN, IS NOW: ", key, pxx[key]
avg = zeros( (Nbands,), thisPxx.dtype)
count = 0
for inds, inde in ind:
if inds==inde:
avg[count]=thisPxx[inds]
else:
avg[count] = mean(thisPxx[inds:inde])
count += 1
pxxAvg[key] = avg
return pxxAvg
def export_cohstat_xyz(XYZ):
"""
FUNC: export_cohstat_xyz
DESCR: XYZ is a 64 x 3 array of floats. Return a string that can be
written to a file cohstat can read. Note the data should be rotated
so that they are in the view plane
"""
if len(XYZ) != 64:
raise ValueError, 'Length of XYZ must be 64!'
lines = [' 64']
for row in XYZ:
lines.append(', '.join(['%d'%val for val in row]))
return '\r\n'.join(lines) + '\r\n'
def export_to_cohstat(cxyBands, phaseBands, keys):
"""
FUNC: export_to_cohstat
DESCR:
This function takes the coherence between pairs of electrodes in a
grid as determined by cohere_bands (which processes the output from
cohere_pairs and puts the data into a string format that can be loaded
CohStat for visual analysis.
export_to_cohstat returns the average coherence and phase within bands
defined by cohere_bands, here defined as follows:
delta = 1-4 Hz
theta = 4-8
alpha = 8-12
beta = 12-30
gamma = 30-55
keys are an ordered list of keys into the cxy and phase dictionaries.
Eg, if keys is from cohere_pairs_eeg, then it is a list of (e1,e2)
tuples
export_to_cohstat then creates a string (with all appropriate headers)
that can be written to a file
The output string is in the format:
Average coherence within selected bands
Delta Theta Alpha Beta gamma
i-j: coh[0] coh[1] coh[2] coh[3] coh[4]
Average phase within selected bands
Delta Theta Alpha Beta gamma
i-j: pha[0] pha[1] pha[2] pha[3] pha[4]
Authors: Scott Simon ([email protected]) and John Hunter.
"""
lines = ["""Average coherence within selected bands:\r
Delta Theta Alpha Beta lowgamma \r
"""]
count=0
#Format the coherence data for each pair
ij = all_pairs_ij(64)
if len(keys)!=len(ij):
raise RuntimeError, 'Cohstat can only handle 64 channels, talk to Leo'
count = 0
for i, j in ij:
thisCxy = cxyBands[keys[count]]
s = '%d-%d:'%(i+1,j+1)
s = s.rjust(22)
lines.append(s + ' %1.3f %1.3f %1.3f %1.3f %1.3f \r\n' %\
(thisCxy[0], thisCxy[1],thisCxy[2],thisCxy[3], thisCxy[4]))
count +=1
#Create the phase header of the file
lines.append("""
Phase of average coherency within selected bands (degrees):\r
Delta Theta Alpha Beta lowgamma \r
""")
count = 0
#Format the phase data for each pair
for i, j in ij:
thisPhase = 180.0/pi*array(phaseBands[keys[count]])
s = '%d-%d:'%(i+1,j+1)
s = s.rjust(22)
lines.append(s + '% 11.3f% 11.3f% 11.3f% 11.3f% 11.3f \r\n' %\
(thisPhase[0], thisPhase[1],thisPhase[2],thisPhase[3], thisPhase[4]))
count+=1
lines.append("""
""")
return ''.join(lines)
def convert_ebersole(filein, fileout):
"""
FUNC: convert_ebersole
DESCR:
This function converts a float ASCII eeg record into a binary eeg
record.
This function takes two file names as inputs. The first, filein,
is the name of the file being converted. The second, fileout, is
the name of the file that will be created.
The output is a file with the voltage from each individual channel
given a column, and each row representing another sampling point.
In the output file, this floating point data is represented in
binary.
Author: Scott Simon, [email protected]
"""
from array import array
count = 0
fi = open(filein, 'r')
for line in fi.xreadlines():
if line[0] == '"': continue
elif len(line.strip())==0:
count += 1
if count == 2: break
fo = open(fileout, 'wb')
for line in fi.xreadlines():
a = array('f', map(float, line.split(',')[1:]))
fo.write(a.tostring())
def eoi_for_matlab(eoi, grd, fileout):
"""
FUNC: eoi_for_matlab
DESCR: This function takes the xyz for the electrodes of interest,
assigned from the .grd file by eoi_to_xyz, and formats these values
in a way that matlab can read.
The input is an .eoi and .grd file, and an output file name. The
output is a file in the format
[...
x y z;...
x y z;...
];
Author: Scott Simon, [email protected]
"""
fo = open(fileout, 'w')
fo.write('XYZ = [...\n')
t = eoi_to_xyz(eoi, grd)
for l in t:
fo.write('\t%1.8f %1.8f %1.8f;...\n' % tuple(l))
fo.write('];')
def eeg_grand_mean(X):
"""
FUNC: eeg_grand_mean
DESCR: X is a numSamples by numChannels numeric array. Return the grand
mean of X (For each element of X, subtract the mean of the row
for which it occurs)
"""
return mean(X,1)
def filter_grand_mean(X):
"""
FUNC: filter_grand_mean
DESCR: X is a numSamples by numChannels numeric array. Return X with
the grand mean removed
"""
X = array(X, 'd')
gm = eeg_grand_mean(X)
print "utils.filter_grand_mean(): type(gm)=", gm.dtype
gm.shape = X.shape[0],
numRows, numCols = X.shape
for i in range(numCols):
X[:,i] = X[:,i] - gm
print "utils.filter_grand_mean(): return X of type ", X.dtype
return X
def remove_channel_means(X):
"""
FUNC: remove_channel_means
DESCR: remove the mean from each channel. X is a numSamples x numChannels array
"""
mu = mean(X,0)
mu.shape = 1,X.shape[1]
return X - mu
def get_exp_prediction(pars, x):
"""
FUNC: get_exp_prediction
DESCR: pars is an a, alpha, k0 tuple of parameters for the exponential function
y = a*exp(alpha*t) + k
Evaluate this function at x and return y
Eg, if x are distances, this would return the predicted coherence
as a function of distance assuming an exponential relationship See
get_best_exp_params for the function to get the best exponential
params for a distance array x and a coherence array.
"""
a, alpha, k = pars
#print a, alpha, k
return a*exp(alpha*x) + k
def get_best_exp_params(x, y, guess=(1.0, -.5, 0.0)):
"""
FUNC: get_best_exp_params
DESCR: Given a distance array x and an equal shaped array of coherences y
and an initial guess for the parameters of get_exp_prediction,
where pars is an a, alpha, k0 tuple, return the best fit
parameters as an a, alpha, k0 tuple
Eg,
best = get_best_exp_params(delta, coh, guess)
"""
print "utils.get_best_exp_params(x=",x, "y=", y, "guess=", guess,"): !!!"
def errfunc(pars):
return y - get_exp_prediction(pars, x) #return the error
J = zeros( (3,len(x)), 'd') # init the Jacobian only once
ddk = -ones((len(x),), 'd') # d/dk indep of k
def deriv_errfunc(pars):
'The Jacobian of the errfunc is -Jacobian of the func'
a, alpha, k = pars
J[0,:] = -exp(alpha*x) #d/da
J[1,:] = -x*a*exp(alpha*x) #d/alpha
J[2,:] = ddk
return J
from scipy.optimize import leastsq
ret = leastsq(errfunc, guess,
full_output=1,
Dfun=deriv_errfunc,
col_deriv=1)
if len(ret)==4:
best, info, ier, mesg = ret
elif len(ret)==5:
best, info, ier, mesg, cov_x = ret
print "WE'RE TRYING!", best
if ier != 1: print "ier == 1..."#return None #so I took this out. I don't know if this is a terrible idea. Dr. Towle has informed me that sometimes the glove just don't fit, but that we still want to draw coherences with the best value, even if the error value is 1.
return best
def cohere_pairs_eeg( eeg, newLength, NFFT, offset, eoiPairs=None, indMin=0, indMax=None,
data=None, returnPxx=False, **kwargs):
"""
FUNC: cohere_pairs_eeg
DESCR: Cxy, Phase, freqs = cohere_pairs_eeg( ...)
Compute the coherence for all pairs in the eoi. eeg is a
EEG instance.
eoiPairs is a list of electrode tuples; if none, use all. Each
tuple is a pair of electrodes, eg,
eoiPairs = [ ( ('MT',7), ('MT',8) ),
( ('MT',7), ('MT',9) ),
....
]
indMin, indmax if provided, give the sample number indices into
eeg.data to do the coherence over (default all)
if data is not None, use data rather than eeg.data to compute
coherence
The other function arguments, except for 'preferSpeedOverMemory'
(see below), are explained in the help string of 'psd'.
Return value is a tuple (Cxy, Phase, freqs).
Cxy -- a dictionary of electrode tuples -> coherence vector for that
pair.
Phase -- a dictionary of phases of the cross spectral density at
each frequency for each pair. keys are (e1,e2).
freqs -- a vector of frequencies, equal in length to either the
coherence or phase vectors for any electrode key. Eg, to make
a coherence
Bode plot:
e1 = ('MT', 7)
e2 = ('MT', 8)
subplot(211)
plot( freqs, Cxy[(e1,e2)])
subplot(212)
plot( freqs, Phase[(e1,e2)])
For a large number of pairs, cohere_pairs can be much more
efficient than just calling cohere for each pair, because it
caches most of the intensive computations. If N is the number of
pairs, this function is O(N) for most of the heavy lifting,
whereas calling cohere for each pair is O(N^2). However, because
of the caching, it is also more memory intensive, making 2
additional complex arrays with approximately the same number of
elements as X.
See mlab cohere_pairs for optional kwargs
See test/cohere_pairs_test.py in the src tree for an example
script that shows that this cohere_pairs and cohere give the same
results for a given pair.
"""
print "utils.cohere_pairs_eeg: eeg.freq: ", eeg.freq
amp = eeg.get_amp()
print "UTILS AMP: ", amp
if eoiPairs is None:
eoiPairs = all_pairs_eoi( amp.to_eoi() )
m = amp.get_electrode_to_indices_dict()
ij = [ (m[e1], m[e2]) for e1, e2 in eoiPairs]
ij.sort()
print len(ij), len(eoiPairs)
if data is None: data = eeg.data
#print "cohere_pairs_eeg: data.shape is ", data.shape
if indMax is None: indMax = data.shape[0]
X = data[indMin:indMax]
if returnPxx:
try:
Cxy, Phase, freqs, Pxx = cohere_pairs(
X, ij, newLength, NFFT, offset, Fs=eeg.freq, returnPxx=True, **kwargs)
except OverflowError, overflowerror:
print "cohere_pairs_eeg(): caught overflow error!! bailing: ", overflowerror
else:
Cxy, Phase, freqs = cohere_pairs(
X, ij, newLength, NFFT, offset, Fs=eeg.freq, **kwargs)
seen = {}
keys = Cxy.keys()
keys.sort()
assert(len(ij)==len(eoiPairs))
for keyIJ, keyEOI in zip(ij, eoiPairs):
Cxy[keyEOI] = Cxy[keyIJ]
del Cxy[keyIJ]
Phase[keyEOI] = Phase[keyIJ]
del Phase[keyIJ]
i,j = keyIJ
e1, e2 = keyEOI
seen[i] = e1
seen[j] = e2
#print Cxy
#print Phase, "&*&*&"
#print freqs
if returnPxx:
for i, ei in seen.items():
Pxx[ei] = Pxx[i]
del Pxx[i]
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def window_hanning(x):
"""
FUNC: window_hanning
DESCR: return x times the hanning window of len(x)
"""
sigma = std(x)
win = hanning(len(x))*x #USED THE MLAB window_hanning SINCE MATPLOTLIB DOESN'T HAVE IT ANYMORE -eli
return win*(sigma/std(win))
def bandpass(lpsf, lpcf, hpcf, hpsf, Fs, gpass=3, gstop=20):
"""
FUNC: bandpass
DESCR: return a butterworth bandpass filter
"""
Nyq = Fs/2.
wp = [lpcf/Nyq, hpcf/Nyq]
ws = [lpsf/Nyq, hpsf/Nyq]
ord, Wn = scipy.signal.buttord(wp, ws, gpass, gstop)
b,a = scipy.signal.butter(ord, Wn, btype='bandpass') # pun intended
def func(x):
return scipy.signal.lfilter(b,a,x)
return func
def gen_surrogate_data(eeg, tmin, tmax, eoi, filters, numSurrs) :
"""
FUNC: gen_surrogate_data
DESCR:
"""
surrData = {}
# Get data
t, data = eeg.get_data(tmin, tmax)
# Extract random pairs from the data
# randInds = (nx.mlab.rand(numSurrs, 2) * len(eoi)).astype(nx.Int)
randInds = (rand(numSurrs, 2) * len(eoi)).astype(Int)
e2i = eeg.get_amp().get_electrode_to_indices_dict()
for i, pair in enumerate(randInds) :
print 'Computing surrogate %d of %d' % (i, numSurrs)
# Get indices into data
ie1, ie2 = pair
i1 = e2i[eoi[ie1]]
i2 = e2i[eoi[ie2]]
# Generate surrogate data
# XXX
surr1 = fftsurr(data[:,i1], window=window_hanning)
surr2 = fftsurr(data[:,i2], window=window_hanning)
# surr1 = fftsurr(data[:,i1])
# surr2 = fftsurr(data[:,i2])
# Generate filtered surrogate data
for j, tup in enumerate(filters.items()) :
band, info = tup
winLen, filter = info
fsurr1 = filter(surr1)
fsurr2 = filter(surr2)
surrData[i, j, band] = (fsurr1, fsurr2)
return surrData
| [] |
2024-01-10 | nipy/pbrain | eegview~eegview.py | # TODO: fix vsteps for different numbers of electrodes
# font sizes are different on ylabels
from __future__ import division
import sys, os, copy, traceback
import distutils.sysconfig
import pygtk
pygtk.require("2.0")
import gtk
from gtk import gdk
from scipy import arange, sin, pi, zeros, ones, reshape, \
greater_equal, transpose, array, arange, resize, \
absolute, nonzero
from scipy import fromstring, arange, log10
from scipy import minimum, maximum
from matplotlib.cbook import exception_to_str
from pbrainlib.gtkutils import str2num_or_err, simple_msg, error_msg, \
not_implemented, yes_or_no, FileManager, select_name, get_num_range, Dialog_FileSelection, Dialog_FileChooser, get_num_value
from matplotlib.widgets import Cursor, SpanSelector
from data import EEGWeb, EEGFileSystem, EOI, Amp, Grids
from file_formats import FileFormat_BNI, W18Header, FileFormat_AxonAscii, FileFormat_NeuroscanAscii, FileFormat_AlphaomegaAscii, NeuroscanEpochFile
from dialogs import Dialog_Preferences, Dialog_SelectElectrodes,\
Dialog_CohstatExport, Dialog_SaveEOI, Dialog_EEGParams, \
Dialog_Annotate, Dialog_AnnBrowser, \
Dialog_PhaseSynchrony, Dialog_PhaseSynchronyPlot, \
AutoPlayDialog, SpecProps, Dialog_EventRelatedSpec
from dialog_filterelectrodes import Dialog_FilterElectrodes
import datetime
import servers
from borgs import Shared
from events import Observer
from shared import fmanager, eegviewrc
from gladewrapper import PrefixWrapper
from utils import filter_grand_mean
from matplotlib import rcParams
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
import matplotlib.cm as cm
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.transforms import BboxTransform, Bbox, ScaledTranslation, blended_transform_factory #in all, removed unit_bbox, Value, Point, and
#replaced get_bbox_transform with BboxTransform, added ScaledTranslation and blended_transform_factory
from matplotlib.patches import Rectangle
from scipy.signal import buttord, butter, lfilter
from mpl_windows import ChannelWin, AcorrWin, HistogramWin, SpecWin, EventRelatedSpecWin
major, minor1, minor2, s, tmp = sys.version_info
if major<2 or (major==2 and minor1<3):
True = 1
False = 0
def load_w18(fullpath):
assert(os.path.exists(fullpath))
basename, filename = os.path.split(fullpath)
fh = file(fullpath, 'rb')
header = W18Header(fh)
params = {
'filename' : filename,
'date' : header.currtime,
'description' : '',
'channels' : 18,
'freq' : 200,
'classification' : 99,
'file_type' : W18,
'behavior_state' : 99,
}
eeg = EEGFileSystem(fullpath, params)
return eeg
def load_bmsi(bnipath):
bni = FileFormat_BNI(bnipath)
basename, ext = os.path.splitext(bnipath)
if os.path.exists(basename):
fullpath = basename
elif os.path.exists(basename + '.eeg'):
fullpath = basename + '.eeg'
else:
fullpath = fmanager.get_filename(
title='Select EEG File accompanying this BNI file')
eeg = bni.get_eeg(fullpath)
return eeg
def load_epoch(fname):
epoch = NeuroscanEpochFile(fname)
return epoch.eeg
def load_params(path):
params = {}
for line in file(path):
line = line.strip()
if not len(line): continue
if line.startswith('#'): continue
k,v = line.split(':',1)
k = k.strip()
v = v.strip()
if k in ('channels', 'pid', 'freq', 'classification', 'file_type', 'behavior_state') :
v = int(v)
params[k] = v
eegfile = params['eegfile']
if not os.path.exists(eegfile):
error_msg('Cannot find eeg file "%s"'%eegfile)
return
eeg = EEGFileSystem(eegfile, params)
return eeg
def load_axonascii(path):
axonascii = FileFormat_AxonAscii(path)
return axonascii.eeg
def load_alphaomegaascii(path):
alphaomegascii = FileFormat_AlphaomegaAscii(path)
return alphaomegascii.eeg
def load_neuroscanascii(path):
try:
neuroscanascii = FileFormat_NeuroscanAscii(path)
except IOError, msg:
print "load_neuroscanascii(): msg=", msg
error_msg(msg, title='Error', parent=parent)
return neuroscanascii.eeg
extmap = { '.w18' : load_w18,
'.bni' : load_bmsi,
'.params' : load_params,
'.epoch' : load_epoch,
'.axonascii' : load_axonascii,
'.neuroscanascii' : load_neuroscanascii,
'.alphaomegaascii' : load_alphaomegaascii
}
class EEGNavBar(gtk.Toolbar, Observer):
"""
CLASS: EEGNavBar
DESCR: toolbar for MainWindow
"""
def add_toolbutton(self, icon_name, tip_text, tip_private, clicked_function, clicked_param1=None):
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
iconw = gtk.Image()
iconw.set_from_stock(icon_name, iconSize)
toolitem = gtk.ToolButton()
toolitem.set_icon_widget(iconw)
toolitem.show_all()
#updated for new tooltip api
toolitem.set_tooltip_text(tip_text)
#toolitem.set_tooltip(self.tooltips, tip_text, tip_private)
toolitem.connect("clicked", clicked_function, clicked_param1)
toolitem.connect("scroll_event", clicked_function)
self.insert(toolitem, -1)
def add_toolitem(self, widget, tip_text):
toolitem = gtk.ToolItem()
toolitem.add(widget)
toolitem.show_all()
self.insert(toolitem, -1)
def add_separator(self):
toolitem = gtk.SeparatorToolItem()
toolitem.set_draw(True)
#toolitem.set_expand(gtk.TRUE)
toolitem.show_all()
self.insert(toolitem, -1)
def __init__(self, eegplot=None, win=None):
"""
eegplot is the EEGPlot instance that the toolboar controls
win, if not None, is the gtk.Window the Figure is embedded in
"""
gtk.Toolbar.__init__(self)
Observer.__init__(self)
self.win = win
self.eegplot = eegplot
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.set_border_width(5)
self.set_style(gtk.TOOLBAR_ICONS)
self.tooltips = gtk.Tooltip()
self.add_toolbutton(gtk.STOCK_GOTO_FIRST, 'Move back one page', 'Private', self.panx, -10)
self.add_toolbutton(gtk.STOCK_GO_BACK, 'Move back in time', 'Private', self.panx, -1)
self.add_toolbutton(gtk.STOCK_GO_FORWARD, 'Move forward in time', 'Private', self.panx, 1)
self.add_toolbutton(gtk.STOCK_GOTO_LAST, 'Move forward one page', 'Private', self.panx, 10)
self.add_separator()
self.add_toolbutton(gtk.STOCK_ZOOM_IN, 'Shrink the time axis', 'Private', self.zoomx, 1)
self.add_toolbutton(gtk.STOCK_ZOOM_OUT, 'Expand the time axis', 'Private', self.zoomx, 0)
self.add_separator()
self.add_toolbutton(gtk.STOCK_GO_UP, 'Increase the voltage gain', 'Private', self.zoomy, 1)
self.add_toolbutton(gtk.STOCK_GO_DOWN, 'Decrease the voltage gain', 'Private', self.zoomy, 0)
self.add_toolbutton(gtk.STOCK_REDO, 'Specify time range', 'Private', self.specify_range)
#self.add_toolbutton(gtk.STOCK_REDO, 'Specify the voltage gain', 'Private', self.specify_range_time)
#the above was not important enough to keep right now -eli
self.add_toolbutton(gtk.STOCK_JUMP_TO, 'Automatically page the EEG', 'Private', self.auto_play)
self.add_toolbutton(gtk.STOCK_SAVE, 'Save the figure', 'Private', self.save_figure)
self.add_separator()
def toggled(button):
self.broadcast(Observer.GMTOGGLED, button)
def lock_trode_toggled(button) :
self.broadcast(Observer.LOCK_TRODE_TOGGLED, button)
self.buttonGM = gtk.CheckButton('GM')
self.buttonGM.show()
self.buttonGM.connect('toggled', toggled)
self.buttonGM.set_active(True)
self.buttonGM.set_active(False)
self.add_toolitem(self.buttonGM, 'Remove grand mean from data if checked')
#self.append_widget(
# self.buttonGM, 'Remove grand mean from data if checked', '')
self.buttonLockTrode = gtk.CheckButton('Lock')
self.buttonLockTrode.show()
self.buttonLockTrode.connect('toggled', lock_trode_toggled)
self.add_toolitem(self.buttonLockTrode, 'Lock Selected Electrode')
#self.append_widget(
# self.buttonLockTrode, 'Lock Selected Electrode', '')
self.add_separator()
#adding a decimate toggle here, without an icon
toolitem = gtk.ToolButton()
toolitem.show_all()
toolitem.set_label("Dec.")
toolitem.connect("clicked", self.set_decimate, None)
self.insert(toolitem, -1)
#/decimate toggle
def set_decimate(self, *args):
decfactor = self.eegplot.find_decimate_factor()
dec_input = get_num_value(labelStr='Choose a decimation factor (1 for none)', title='Enter value', parent=None,
default=decfactor) #the default value will be the optimal dec factor
if dec_input < self.eegplot.decimateFactor:
self.eegplot.decimateFactor = copy.deepcopy(dec_input)
self.eegplot.set_time_lim(updateData=True, broadcast=True)
return
else:
self.eegplot.decimateFactor = dec_input
self.eegplot.plot()
self.eegplot.draw()
def auto_play(self, *args):
tmin, tmax = self.eegplot.get_time_lim()
twidth = tmax-tmin
dlg = AutoPlayDialog(0, self.eegplot.eeg.get_tmax(), twidth)
dlg.show()
def specify_range(self, *args):
response = get_num_range()
if response is None: return
tmin, tmax = response
self.eegplot.set_time_lim(tmin, tmax, updateData=False)
self.eegplot.plot()
self.eegplot.draw()
"""
def specify_range_time(self, *args):
# mcc XXX: trying to be able to specify time as hour:min:sec
response = get_num_range(as_times=True)
if response is None: return
eegstartdate = self.eegplot.eeg.get_date()
# now subtract the eegstartdate to get proper tmin/tmax values
print "EEGNavBar: specify_range_time: ACK subtracting " , str(response), "from " , eegstartdate
timedelta_start = datetime.timedelta(hours=(eegstartdate.time()).hour,
minutes=(eegstartdate.time()).minute,
seconds=(eegstartdate.time()).second)
tmin, tmax = response
timedelta_tmin = datetime.timedelta(hours=response[0].hour, minutes=response[0].minute, seconds=response[0].second)
timedelta_tmax = datetime.timedelta(hours=response[1].hour, minutes=response[1].minute, seconds=response[1].second)
print "timedelta_start=", timedelta_start, "timedelta_tmin=", timedelta_tmin, "timedelta_tmax=", timedelta_tmax
tmin, tmax = (timedelta_tmin - timedelta_start).seconds, (timedelta_tmax - timedelta_start).seconds
print "EEGNavBar: specify_range_time: tmin=", tmin, ", tmax=", tmax
# figure out what times these times correspond to by getting eeg start time
self.eegplot.set_time_lim(tmin, tmax, updateData=True)
self.eegplot.plot() #redraw the traces -eli
self.eegplot.draw()
"""
def save_figure(self, button):
def print_ok(button):
fname = fs.get_filename()
fmanager.set_lastdir(fname)
fs.destroy()
try: self.eegplot.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
try: parent = Shared.windowMain.widget
except AttributeError: parent = None
simple_msg(msg, title='Error', parent=parent)
fs = gtk.FileSelection(title='Save the figure')
if self.win is not None:
fs.set_transient_for(self.win)
fs.set_filename(fmanager.get_lastdir() + os.sep)
fs.ok_button.connect("clicked", print_ok)
fs.cancel_button.connect("clicked", lambda b: fs.destroy())
fs.show()
def set_eegplot(self, eegplot):
self.eegplot = eegplot
def panx(self, button, arg):
if self.eegplot is None: return
try: arg.direction
except AttributeError: right = arg
else:
if arg.direction == gdk.SCROLL_UP: right=1
else: right=0
self.eegplot.pan_time(right)
self.eegplot.plot() #redraw the traces -eli
self.eegplot.draw()
return False
def zoomx(self, button, arg):
if self.eegplot is None: return
try: arg.direction
except AttributeError: direction = arg
else:
if arg.direction == gdk.SCROLL_UP: direction=1
else: direction=0
self.eegplot.change_time_gain(direction)
self.eegplot.plot() #redraw the traces -eli
self.eegplot.draw()
return False
def zoomy(self, button, arg):
if self.eegplot is None: return
try: arg.direction
except AttributeError: direction = arg
else:
if arg.direction == gdk.SCROLL_UP: direction=1
else: direction=0
self.eegplot.change_volt_gain(direction)
self.eegplot.plot() #redraw the traces -eli
self.eegplot.draw()
return False
"""
class AnnotationManager:
#""
CLASS: AnnotationManager
The highlight is the currently created rectangle that has not yet been
annotated.
The selected rectangle is a rect that has been annotated and
selected (not the same as highlighted!)
#""
def __init__(self, eegplot):
self.eegplot = eegplot
self.axes = self.eegplot.axes
self.canvas = self.axes.figure.canvas
self.ann = self.eegplot.eeg.get_ann()
# Create list of eois. Add eois from ann.
eoiAll = self.eegplot.get_eeg().get_amp().to_eoi()
eoiAll.set_description('All')
self.eois = {'All' : eoiAll}
self.eois.update(self.ann.eois)
rectprops = dict(facecolor='#bbbbff',
alpha=0.5)
self.selector = SpanSelector(self.axes, self.onselect,'horizontal',
minspan=0.01,
useblit=True,
rectprops=rectprops)
self._highlight = None
self.resize = False
self.background = None
def dlgAnnotate_ok_callback(params) :
self.dlgAnnotate.hide_widget()
# Add annotation to data structure
key = '%1.1f' % params['startTime'], '%1.1f' % params['endTime'], params['created']
params = self.dlgAnnotate.get_params()
# Set defaults / keep some old values
now = datetime.datetime.now() #fixed datetime syntax
if self.selectedkey is not None: # selected
params['created'] = self.ann[key]['created']
params['edited'] = now.ctime()
params['visible'] = self.ann[key]['visible']
params['rects'] = self.ann[key]['rects']
else : # create new
params['edited'] = params['created']
params['visible'] = 1
params['rects'] = None
# If shrink has changed, remove the rect(s);
# they will be recreated in update_annotations().
if (self.selectedkey is not None
and (self.ann[key]['shrink'] <> params['shrink']
or self.dlgAnnotate.changed)) :
for rect in self.ann[key]['rects'] :
self.eegplot.axes.patches.remove(rect)
params['rects'] = None
self.ann[key] = params
# Add new eoi, if any.
if not self.eois.get(params['eoi'].get_description()) :
self.add_eoi(params['eoi'])
# Write ann file.
self.ann.save_data()
# Create new annotation box.
self.update_annotations()
# Update ann browser info
self.dlgAnnBrowser.update_ann_info(key)
# Turn off highlight box.
self.remove_highlight()
return
def dlgAnnBrowser_ok_callback(*args) :
self.dlgAnnBrowser.hide_widget()
self.selectedkey = None
self.dlgAnnotate = Dialog_Annotate(eegplot, self, ok_callback=dlgAnnotate_ok_callback)
self.dlgAnnBrowser = Dialog_AnnBrowser(eegplot, self, dlgAnnBrowser_ok_callback)
# Update Annotations menuitems sensitivity.
menuItemAnnBrowser = Shared.widgets.get_widget('menuItemAnnBrowser')
menuItemAnnBrowser.set_sensitive(1)
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnCreateEdit.set_sensitive(1)
menuItemAnnHorizCursor = Shared.widgets.get_widget('menuItemAnnHorizCursor')
menuItemAnnHorizCursor.set_sensitive(1)
menuItemAnnVertCursor = Shared.widgets.get_widget('menuItemAnnVertCursor')
menuItemAnnVertCursor.set_sensitive(1)
def onselect(self, xmin, xmax):
if self._highlight is not None:
self.remove_highlight()
self._highlight = xmin, xmax, self._new_rect(xmin, xmax, 0, 1,
facecolor='#bbbbff',
edgecolor='k',
linewidth=2,
alpha=0.5,
zorder=3
)
# Update Annotations menuitems sensitivity
label = 'Create New'
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnCreateEdit.get_children()[0].set_text(label)
menuItemAnnDelete = Shared.widgets.get_widget('menuItemAnnDelete')
menuItemAnnDelete.set_sensitive(0)
self.canvas.draw()
def _new_rect(self, xmin, xmax, ymin, ymax, **props):
trans = blended_transform_factory( self.axes.transData,
self.axes.transAxes ) #xy_sep_transform deprecated
rect = Rectangle(xy=(xmin, ymin), width=xmax-xmin, height=ymax-ymin,
transform=trans, **props)
self.axes.add_patch(rect)
return rect
def remove_rects(self) :
for key, info in self.ann.items() :
if info.get('rects') :
del info['rects']
def over_annotation(self, x, y):
#""
If you are over an annotation, return it's key
If you are over multiple annotations, return the one who's
center is closest to point
If not over annotation, return None
#""
ret = []
for key, info in self.ann.items() :
for rect in info['rects'] :
if rect.get_window_extent().contains(x, y) :
#bounds = rect.get_window_extent().get_bounds()
ax1 = rect.get_axes()
bounds = ax1.dataLim.bounds # is this what i want?? -eli
middle = .5 * (2 * bounds[0] + bounds[2])
d = abs(x - middle)
ret.append((d, key))
ret.sort()
if not len(ret) : return None
return ret[0][1]
def over_edge(self, x, y) :
#""
If you are over an annotation edge, return its key.
x,y are figure coordinates (i.e., event.x, event.y)
#""
key, side = None, None
t, yt = self.axes.transData.inverted().transform((x, y)) #replaced inverse_xy_tup with inverted().transform()
for key, info in self.ann.items() :
s = info['startTime']
e = info['endTime']
if t >= s - .05 and t <= s + .05 :
side = 0
break
elif t >= e - .05 and t <= e + .05 :
side = 1
break
else :
key = None
if key is not None :
for rect in info['rects'] : #get_bounds() is deprecated, use properties intervalx and intervaly -eli
#l, b, w, h = rect.get_window_extent().get_bounds()
ax1 = rect.get_axes()
l, b, w, h = ax1.dataLim.bounds # is this what i want?? -eli
print l, b, w, h
r = l + w
t = b + h
if y <= t and y >= b :
break
else :
key, side = None, None
return key, side
def is_over_highlight(self, t) :
xmin, xmax = self.highlight_span()
return t >= xmin and t <= xmax
def remove_highlight(self):
if self._highlight is not None:
xmin, xmax, rect = self._highlight
self.axes.patches.remove(rect)
self._highlight = None
self.canvas.draw()
# Update Annotations menuitems sensitivity
label = 'Create New'
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnCreateEdit.get_children()[0].set_text(label)
menuItemAnnDelete = Shared.widgets.get_widget('menuItemAnnDelete')
menuItemAnnDelete.set_sensitive(0)
def get_highlight(self):
#""
return (xmin, xmax, Rectangle instance) if a rect is highlighted
Otherwise return None
#""
return self._highlight
def highlight_span(self):
'return the min/max of current highlight or raise if not highlight'
if self._highlight is None: return None, None
xmin, xmax, rect = self._highlight
return xmin, xmax
def remove_selected(self):
#""
remove the selected annotation from the ann data struct and
the plot stff and redraw
#""
thisann = self.eegplot.annman.ann.pop(self.selectedkey) #trying to make this work with python's list.pop()
if thisann is None:
return
for rect in thisann['rects'] :
self.eegplot.axes.patches.remove(rect)
self.selectedkey = None
self.eegplot.draw()
# Update Annotations menuitems sensitivity
label = 'Create New'
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnCreateEdit.get_children()[0].set_text(label)
menuItemAnnDelete = Shared.widgets.get_widget('menuItemAnnDelete')
menuItemAnnDelete.set_sensitive(0)
def set_selected(self, newkey=None) :
'selected is a start, end key; make that annotation the selected one'
if newkey == self.selectedkey: return
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnDelete = Shared.widgets.get_widget('menuItemAnnDelete')
if self.selectedkey is not None:
# unselect the old one if there is one
rects = self.ann[self.selectedkey].get('rects')
if rects is not None :
for rect in rects :
rect.set_edgecolor('k')
rect.set_linewidth(1)
if newkey is None:
# Update Annotations menuitems sensitivity
menuItemAnnDelete.set_sensitive(0)
else :
# now set the props of the new one
rects = self.ann[newkey]['rects']
for rect in rects :
rect.set_edgecolor('r')
rect.set_linewidth(3)
self.canvas.draw()
# Update Annotations menuitems sensitivity
menuItemAnnCreateEdit.get_children()[0].set_text('Edit Selected')
menuItemAnnDelete.set_sensitive(1)
self.selectedkey = newkey
def start_resize(self, side) :
self.resize = True
self.resize_side = side
self.background = self.eegplot.canvas.copy_from_bbox(self.eegplot.axes.bbox)
def end_resize(self) :
self.resize = False
self.background = None
self.eegplot.draw()
def resize_selected(self, s, e) :
rects = self.ann[self.selectedkey]['rects']
for rect in rects :
rect.set_x(s)
rect.set_width(e - s)
# Update key if it changed.
newkey = '%1.1f' % s, '%1.1f' % e
if newkey <> self.selectedkey :
self.ann[newkey] = self.ann[self.selectedkey]
self.ann[newkey]['startTime'] = s
self.ann[newkey]['endTime'] = e
del self.ann[self.selectedkey]
self.selectedkey = newkey
self.eegplot.canvas.restore_region(self.background)
for rect in rects :
self.eegplot.axes.draw_artist(rect)
self.eegplot.canvas.blit(self.eegplot.axes.bbox)
def update_annotations(self) :
#""
Create new annotation rectangles on file load or navigation
#""
tmin, tmax = self.eegplot.get_time_lim()
keys = self.ann.keys()
keys.sort()
for key in keys :
# Remove rects that are not visible.
if not self.ann[key].get('visible') :
rects = self.ann[key].get('rects')
if rects is not None :
for rect in rects :
self.eegplot.axes.patches.remove(rect)
del self.ann[key]['rects']
if self.selectedkey == key :
self.set_selected()
continue
# Start or end of annotation box is in view
s = self.ann[key]['startTime']
e = self.ann[key]['endTime']
if not ( (s > tmin and s < tmax) or
(e > tmin and e < tmax) ) : continue
# Draw/Update annotation box.
rects = self.ann[key].get('rects')
if rects is None:
rects = []
if self.ann[key]['shrink'] :
channel_numd = self.eegplot.get_eeg().get_amp().get_electrode_to_indices_dict()
eoiActive = self.eegplot.get_eoi()
group = []
for i, trode in enumerate(eoiActive) :
if trode in self.ann[key]['eoi'] :
group.append(i)
else :
if len(group) :
ymin = self.eegplot.offsets[group[-1]]
ymax = self.eegplot.offsets[group[0]]
rect = self._new_rect(s, e, ymin, ymax, zorder=3)
rects.append(rect)
group = []
if len(group) :
ymin = self.eegplot.offsets[group[-1]]
ymax = self.eegplot.offsets[group[0]]
rect = self._new_rect(s, e, ymin, ymax, zorder=3)
rects.append(rect)
else :
rect = self._new_rect(s, e, 0, 1, zorder=3)
rects = [rect]
self.ann[key]['rects'] = rects
# Set some rect properties.
for rect in self.ann[key]['rects'] :
rect.set_facecolor(self.ann[key]['color'])
rect.set_alpha(self.ann[key]['alpha'])
self.eegplot.draw()
def add_eoi(self, eoi) :
self.eois[eoi.description] = eoi
"""
class EEGPlot(Observer):
"""
CLASS: EEGPlot
DESCR: controls MainWindow's canvas
"""
timeSets = ((1.,.1), (2.,.2), (5.,.5), (10.,1.), (20.,2.),
(50.,5.), (100., 10.), (200., 20.))
voltSets = (.1, .2, .5, .75, 1., 2., 5., 7.5,
10., 20., 50., 75., 100., 200., 500., 750,
1000., 1250., 1500. , 1750., 2000., 2100., 2300., 2500., 3000., 3500., 4000., 4500., 5000., 7500.,
10000., 20000., 50000., 75000., 150000., 300000.)
colorOrder = ('b','k','g','c','m')
def __init__(self, eeg, canvas):
Observer.__init__(self)
eeg.load_data()
self.canvas = canvas
self.figure = canvas.figure
self.axes = self.figure.axes[0]
self.axes.cla()
self.eeg = eeg
self.cnumDict = self.eeg.get_amp().get_channel_num_dict()
#self.annman = AnnotationManager(self)
amp = eeg.get_amp()
eoi = amp.to_eoi()
self.colord = {}
colorInd = 0
for gname, gnum in eoi:
gname = gname.lower()
color = self.colord.get(gname.lower())
if color is None:
color = self.colorOrder[colorInd % len(self.colorOrder)]
self.colord[gname] = color
colorInd += 1
self._selected = eoi[0]
self.set_eoi(eoi)
self.timeInd = 3
self.voltInd = 27
self.maxLabels = 36
self.decimateFactor = 1 #this is set when toggled by the user
self.filterGM = Shared.windowMain.toolbar.buttonGM.get_active()
# mcc XXX: turning off cache
#self._selectedCache = None, None
# Lock the selected electrode.
self.lock_trode = False
# Create a vertical cursor.
self.cursor = Cursor(self.axes, useblit=True, linewidth=1, color='red')
if eegviewrc.horizcursor == 'True' :
self.cursor.horizOn = True
else :
self.cursor.horizOn = False
if eegviewrc.vertcursor == 'True' :
self.cursor.vertOn = True
else :
self.cursor.vertOn = False
# mcc XXX: map for whether or not to rectify/DC offset/lowpass filter a given (e.g. EMG) channel
#self.rectifyChannels = Set()
def get_color(self, trode):
gname, gnum = trode
gname = gname.lower()
return self.colord[gname]
def recieve(self, event, *args):
#if event in (Observer.SET_TIME_LIM,):
# tmin, tmax = args
# print "EEGVIEW.EEGVIEW.recieve: set_time_lim"
# self.set_time_lim(tmin, tmax, updateData=False, broadcast=False)
# self.plot()
# self.draw()
if event==Observer.SAVE_FRAME:
fname = args[0] + '.png'
width, height = self.canvas.get_width_height()
# matplotlib needs to have get_pixmap() (in backends/FigureCanvasGTKAgg)
pixmap = self.canvas._pixmap
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(fname, 'png')
try:
Shared.windowMain.update_status_bar(
'Saved frame: %s' % fname)
except AttributeError: pass
elif event == Observer.SELECT_CHANNEL:
trode = args[0]
gname, gnum = trode
self.set_selected((gname, gnum))
elif event == Observer.GMTOGGLED:
button = args[0]
self.filterGM = button.get_active()
tmin, tmax = self.get_time_lim()
t, data, freq = self.filter(tmin, tmax)
for ind, line in zip(self.indices, self.lines):
line.set_data(t, data[:,ind])
self.draw()
elif event == Observer.LOCK_TRODE_TOGGLED :
button = args[0]
self.lock_trode = button.get_active()
def draw(self):
self.canvas.draw()
def get_selected(self, filtergm=False):
'return t, data[ind], trode'
print "EEGPlot.get_selected()"
tmin, tmax = self.get_time_lim()
key = (tmin, tmax, self._selected, filtergm)
#keycache, retcache = self._selectedCache
#if keycache==key: return retcache
t, data = self.eeg.get_data(tmin, tmax)
# mccXXX : why does this line exist?
data = -data
if filtergm:
data = filter_grand_mean(data)
ind = self.eoiIndDict[self._selected]
print "EEGPlot.get_selected(): data.shape is ", data.shape, " and we are about to index it like data[:,%d]" % self.indices[ind]
ret = t, data[:,self.indices[ind]], self._selected
#self._selectedCache = key, ret
return ret
def get_selected_window(self, filtergm=False, extraTime=0):
'return t, data[ind], trode'
tmin, tmax = self.get_time_lim()
print "get_selected_window: ", tmin, tmax
# XXX mcc, taking this out for neuroscanascii format which doesn't handle negative vals well
#tmin -= extraTime/2.
#tmax += extraTime/2.
#key = (tmin, tmax, self._selected, filtergm)
#keycache, retcache = self._selectedCache
#if keycache==key: return retcache
print "get_selected_window(tmin=",tmin,"tmax=",tmax,")"
t, data = self.eeg.get_data(tmin, tmax)
# mcc XXX : why does this line exist?
#data = -data
if filtergm:
print "EEGPlot.get_selected(): filtering grand mean"
data = filter_grand_mean(data)
ind = self.eoiIndDict[self._selected]
ret = t, data[:,self.indices[ind]], self._selected
#self._selectedCache = key, ret
return ret
def get_eoi(self):
# XXX mcc: we want to return a copy here, because otherwise view3 can
# remove our EOIs!!
#return list(self.eoi)
return self.eoi
def set_eoi(self, eoi):
print "eegview.set_eoi(",eoi,")"
try:
#print self.eeg.get_amp()
self.indices = eoi.to_data_indices(self.eeg.get_amp())
except KeyError:
msg = exception_to_str('Could not get amplifier indices for EOI')
try: parent = Shared.windowMain.widget
except AttributeError: parent = None
error_msg(msg, title='Error', parent=parent)
return 0
self.eoi = eoi
self.eoiIndDict = dict([ (trode, i) for i, trode in enumerate(self.eoi)])
if not self.eoiIndDict.has_key(self._selected):
self._selected = self.eoi[0]
# Remove annotation rects, so they will get redrawn on the next
# update_annotations()
#self.annman.remove_rects()
return True
def get_eeg(self):
return self.eeg
def find_decimate_factor(self, lpcf = 40):
print "EEGPlot.find_decimate_factor(): calculating decimation factor"
print "EEGPlot.find_decimate_factor(): eeg.freq is ", self.eeg.freq
Nyq = self.eeg.freq/2
self.decimateFactor = int(Nyq/lpcf) #a decimation factor has to be an integer as it turns out-eli
if self.decimateFactor == 0:
self.decimateFactor = 1 #take care of dividebyzero errors - this shouldn't happen anyway when Nyq is high enough (ie when freq is high enough ~500)
print "EEGPlot.find_decimate_factor: ", self.decimateFactor
return self.decimateFactor
def filter(self, tmin, tmax, lpcf=40, lpsf=55, hpcf=None, hpsf=None):
"""
lpcf: low pass corner freq=40 (Hz)
lpsf: low pass stop freq=55 (Hz)
hpcf: high pass corner freq=None
hpsf: high pass stop freq=None
a lowpass decimate filter first uses a lowpass to smooth out the data, and then takes chunks out of it according to the decimation factor
in order to speed up processing. Here we use a butterworth lowpass - this was here before I got here, but I think there must be simpler
options -eli
"""
print "\n========\nEEGPlot.filter(%f, %f, ...)" % (tmin, tmax)
try: t, data = self.eeg.get_data(tmin, tmax)
except KeyError, msg:
msg = exception_to_str('Could not get data')
error_msg(exception_to_str('Could not get data'))
return None
#data = -data # invert neg up #why?
if self.filterGM:
data = filter_grand_mean(data)
Nyq = self.eeg.freq/2
#as of now we do a lowpass filter regardless of whether the decimation factor is > 1. -eli
Rp, Rs = 2, 20
Wp = lpcf/Nyq
Ws = lpsf/Nyq
[n,Wn] = buttord(Wp,Ws,Rp,Rs)
print "EEGPlot.filter(): [n,Wn] = buttord(Wp= ", Wp, ",Ws=", Ws, ",Rp=", Rp, ",Rs=", Rs, ") = [", n, "," , Wn, "]"
[b,a] = butter(n,Wn)
print "EEGPlot.filter(): [b,a] = butter(n=" , n , " , Wn=", Wn, ") = [", b, ",", a, "]"
print "EEGPlot.filter(): doing transpose(lfilter(b,a,transpose(data)))"
data = transpose( lfilter(b,a,transpose(data)))
decfreq = self.eeg.freq/self.decimateFactor
self.decfreq = decfreq
#print "EEGPlot.filter(): decimateFactor = int(Nyq=%f/lpcf=%d) = " % (Nyq, lpcf), decimateFactor, "self.decfreq=(eeg.freq=%f)/(%d) = " % (self.eeg.freq, decimateFactor), self.decfreq
#are all of the above commented lines really not needed anymore? -eli
print "EEGPlot.filter(): returning decimated data t[::%d], data[::%d], %f" % (self.decimateFactor, self.decimateFactor, decfreq)
return t[::self.decimateFactor], data[::self.decimateFactor], decfreq #the "::" takes every decimateFactorth value from each array!
def plot(self):
print "EEGPlot.plot()"
self.axes.cla()
tmin, tmax = self.get_time_lim() #it turns out hardcoding 0,10 in this function was ahem counterproductive -eli
#print "EEGPLOT.plot(): tmn, tmax ", tmin, tmax
#let's take out filtering for some tests
#t, data, freq = self.filter(tmin, tmax)
try: t, data = self.eeg.get_data(tmin, tmax)
except KeyError, msg:
msg = exception_to_str('Could not get data')
error_msg(exception_to_str('Could not get data'))
return None
freq = self.eeg.freq
#print "EEGplot filtertest: ", data[0:10]
dt = 1/freq
self.lines = []
skip = max(1, len(self.indices)//self.maxLabels)
count = 0
amp = self.eeg.get_amp()
labels = []
locs = []
maxo = 0.975
mino = 0.025
N = len(self.indices)
offsets = 1.0-((maxo-mino)/N*arange(N) + mino)
self.offsets = offsets
vset = self.voltSets[self.voltInd]
#old transformation block
"""
boxin = Bbox(
Point(self.axes.viewLim.ll().x(), Value(-vset)),
Point(self.axes.viewLim.ur().x(), Value(vset)))
boxout = Bbox(
Point(self.axes.bbox.ll().x(), Value(-72)),
Point(self.axes.bbox.ur().x(), Value(72)))
transOffset = get_bbox_transform(
unit_bbox(),
Bbox( Point( Value(0), self.axes.bbox.ll().y()),
Point( Value(1), self.axes.bbox.ur().y())
))
"""
#new transformation block
#updated by removing Point and Value methods and simply passing four points #to Bbox() this may be a bad idea... I tried passing them to Bbox.set_points#() but this method seems to be either not working or badly documented.
#also, viewLim is deprecated from what I can tell, so I'll try to use axes.g#et_xlim
viewLimX=self.axes.get_xlim() #this returns a list of min and max x points, which is what we want to pass below
#print "************", viewLimX
boxin = Bbox(
[[viewLimX[0], -vset], #replaced self.axes.viewLim.ll().x() with viewLimX
[viewLimX[1], vset]])
#does this work? yes! there actually is a bbox living in axes, for whatever reason, and this method returns all four points as an array of the form [[x0,y0],[x1,y1]]. the bbox that we rebuild below is (hopefully!) taking the x values of the two points.
axesBboxCoords = self.axes.bbox.get_points()
boxout = Bbox(
[[axesBboxCoords[0][0], -72], #see comment above: I replaced self.axes.bbox.ll().x() with axesBboxCoords[0][0]
[axesBboxCoords[1][0], 72]])
transOffset = BboxTransform(
Bbox.unit(), # ([[0,0], [1,1]]), #replaced unit_bbox with unit()
Bbox( [[0, axesBboxCoords[0][1]],
[1, axesBboxCoords[1][1]]]
))
assert len(self.indices) == len(offsets), 'indices and offsets have different length'
pairs = zip(self.indices, offsets)
labeld = amp.get_dataind_dict()
for ind, offset in pairs:
trode = labeld[ind]
color = self.get_color(trode)
if self._selected==trode: color='r'
trans = BboxTransform(boxin, boxout) #switched to BboxTransform
#print "EEGPlot.plot(): " , data.shape, ind, len(pairs), self.eeg.channels
#set_offset is way deprecated. I'm going to use a tip from the newer transforms_tutorial on the matplotlib.sourceforge page.
#the basic idea is to use ScaledTranslation, which creates an offset that can then be added to the original trans.
#trans.set_offset((0, offset), transOffset)
#so, these two lines below which I've written seem to work at offsetting the lines to where they need to go. -eli
#note: for some reason, in nipy pbrain the original trans.set_offset was written _after_ the call to Line2D
newtrans = ScaledTranslation(0,offset,transOffset)
trans = trans + newtrans
thisLine = Line2D(t, data[:,ind],
color=color,
linewidth=0.75,
linestyle='-',
clip_on=True #added this kwarg
)
thisLine.set_transform(trans)
#thisLine.set_data_clipping(False) #deprecated
#should the following be commented out?
#thisLine.set_lod(on=1)
self.lines.append(thisLine)
self.axes.add_line(thisLine)
if count % skip == 0:
labels.append('%s%d' % trode)
locs.append(offset)
count += 1
#print 'locs', labels[0], locs[0], self.offsets[0]
#self.set_time_lim(tmin,tmax, updateData=False) #i fixed this and then realized it was reduntant anyway -eli
self.axes.set_yticks(locs)
labels = self.axes.set_yticklabels(labels, fontsize=8)
for tick in self.axes.yaxis.get_major_ticks():
tick.label1.set_transform(self.axes.transAxes)
tick.label2.set_transform(self.axes.transAxes)
tick.tick1line.set_transform(self.axes.transAxes)
tick.tick2line.set_transform(self.axes.transAxes)
tick.gridline.set_transform(self.axes.transAxes)
print "EEGPlot.plot(): successful"
# Update annotation boxes
#self.annman.update_annotations()
self.save_excursion()
self.draw()
# XXX: mcc: what is this for ?
def restore_excursion(self):
try: self.saveExcursion
except AttributeError: return
tmin, self.timeInd, self.voltInd = self.saveExcursion
self.set_time_lim(tmin, updateData=True)
def save_excursion(self):
tmin, tmax = self.get_time_lim()
self.saveExcursion = (tmin, self.timeInd, self.voltInd)
def get_max_labels(self):
return 25
def change_time_gain(self, magnify=1):
"""Change the time scale. zoom out with magnify=0, zoom in
with magnify=1)"""
# keep the index in bounds
if magnify and self.timeInd>0:
self.timeInd -= 1
if not magnify and self.timeInd<(len(self.timeSets)-1):
self.timeInd += 1
origmin, origmax = self.get_time_lim()
wid, step = self.timeSets[self.timeInd]
xmin = origmin
xmax = origmin+wid
self.set_time_lim(xmin, xmax, updateData=False)
def change_volt_gain(self, magnify=1):
#note: I had to seriously take this function apart further down. -eli
"""Change the voltage scale. zoom out with magnify=0, zoom in
with magnify=1)"""
#print "EEGPlot.change_volt_gain: magnify=%d, self.voltInd=%d" % (magnify, self.voltInd)
# keep the index in bounds
if magnify and self.voltInd>0:
self.voltInd -= 1
if not magnify and self.voltInd<(len(self.voltSets)-1):
self.voltInd += 1
#print "new self.voltInd=%d" % self.voltInd
vset = self.voltSets[self.voltInd]
#print "vset = self.voltSets[%d]" % self.voltInd
#note: matplotlib had no way of getting at the constructors of a compositeaffine2d object. This use to be done with the get_bbox1 method, but of course this is deprecated and not replaced by ANYTHING. So, using python's built-in hacky __dict__ method below, I extract the in bbox, change the y values to the aboveset vset values, and because python is only using symbolic links and not copying, this does the trick. I am not John and I do not wish to contribute to matplotlib at this time (actually I do but I have other things to do!!) but I wish someone would fix this and then tell me. -eli
for line in self.lines:
trans = line.get_transform()
boxin = trans.__dict__['_a'].__dict__['_boxin'].__dict__['_points_orig']
#print boxin
x0 = boxin[0][0]
x1 = boxin[1][0]
y0 = -vset
y1 = vset
boxin = Bbox(
[[x0,y0],
[x1,y1]])
#print boxin
#box1 = trans.get_bbox1()
#print "calling line.get_transform().get_bbox1().intervaly().set_bounds(-vset, vset)", box1
#boxin.intervaly().set_bounds(-vset, vset)
#print "end of EEGPlot.change_volt_gain()"
def pan_time(self, right=1):
"""Pan the time axis to the right or left"""
# keep the index in bounds
wid, step = self.get_twid_step()
tmin, tmax = self.get_time_lim()
#print "pan_time tmin,tmax: ", tmin, tmax
step *= right
#print "pan_time step: ", step
self.set_time_lim(tmin+step)
#self.plot() #update the plot! eli
def get_time_lim(self,):
return self.axes.get_xlim()
def get_twid_step(self):
#print "get_twid_step(): ", self.timeSets[self.timeInd]
return self.timeSets[self.timeInd] #still not sure exactly why we return twice in this function -eli
ticks = self.axes.get_xticks()
wid = ticks[-1] - ticks[0]
step = ticks[1] - ticks[0]
#print "get_twid_step(): ", wid, step
return wid, step
def set_time_lim(self, xmin=None, xmax=None,
updateData=False, broadcast=True):
#make sure xmin keeps some eeg on the screen
print "EEGPLOT.set_time_lim broadcast=", broadcast, " update data=",updateData
print "EEGPlot.set_time_lim(xmin=", xmin, "xmax=", xmax, ")"
origmin, origmax = self.get_time_lim()
#print "EEGPlot.set_time_lim(): origmin, origmax = ", origmin, origmax
if xmin is None: xmin = origmin
if xmax is None:
wid, step = self.get_twid_step()
xmax = xmin+wid
else:
wid = xmax-xmin
step = wid/10.0
print "EEGPlot.set_time_lim(): axes.set_xlim(", [xmin, xmax], ")"
self.axes.set_xlim([xmin, xmax])
ticks = arange(xmin, xmax+0.001, step)
print "EEGPlot.set_time_lim(): axes.set_xticks(", ticks, ")"
self.axes.set_xticks(ticks)
def fmt(val):
if val==int(val): return '%d' % val
else: return '%1.1f' % val
#self.axes.set_xticklabels([fmt(val) for val in ticks])
self.axes.set_xticklabels([])
if updateData:
print "EEGPlot.set_time_lim(): update data"
# let's take out filtering for some tests
try: t, data = self.eeg.get_data(xmin, xmax)
except KeyError, msg:
msg = exception_to_str('Could not get data')
error_msg(exception_to_str('Could not get data'))
return None
freq = self.eeg.freq
#t, data, freq = self.filter(xmin, xmax)
self.axes.set_xlim((xmin, xmax))
for ind, line in zip(self.indices, self.lines):
line.set_data(t, data[:,ind])
self.plot()
#we'll let the observer take care of this
#self.axesSpec.set_xlim([xmin,xmax])
#self.axesSpec.set_xticklabels(ticks)
# recieve the observers
if broadcast:
print "EEGPLOT: Broadcasting set time lim"
self.broadcast(Observer.SET_TIME_LIM, xmin, xmax)
def get_channel_at_point(self, x, y, select=True):
"Get the EEG with the voltage trace nearest to x, y (window coords)"
# avoid a pygtk queue handling error
if not hasattr(self, 'decfreq'):
return None
tmin, tmax = self.get_time_lim()
dt = 1/self.decfreq
t, yt = self.axes.transData.inverted().transform( (x,y) ) #replaced inverse_xy_tup with inverted().transform()
ind = int((t-tmin)/dt)
ys = zeros( (len(self.lines), ), 'h')
xdata = self.lines[0].get_xdata()
if ind>=len(xdata): return None
thisx = xdata[ind]
for i, line in enumerate(self.lines):
thisy = line.get_ydata()[ind]
trans = line.get_transform()
xt, yt = trans.transform((thisx, thisy)) #replaced xy_tup with transform
ys[i] = yt
ys = absolute(ys-y)
matches = nonzero(ys==min(ys))
ind = matches[0]
labeld = self.eeg.amp.get_dataind_dict()
# XXX: had to change this for some reason with latest scipy/numpy -- mcc
trode = labeld[self.indices[ind[0]]]
#trode = labeld[self.indices[ind]]
gname, gnum = trode
if select :
ok = self.set_selected((gname, gnum))
if ok: self.broadcast(Observer.SELECT_CHANNEL, trode)
return trode
def set_selected(self, trode):
lastind = self.eoiIndDict[self._selected]
ind = self.eoiIndDict[trode]
lastcolor = self.get_color(self._selected)
self.lines[lastind].set_color(lastcolor)
self._selected = trode
self.lines[ind].set_color('r')
self.canvas.draw()
Shared.windowMain.update_status_bar('Selected %s %d' % trode)
return True
class SpecPlot(Observer):
"""
CLASS: SpecPlot
DESCR: spectrogram
"""
propdlg = SpecProps()
flim = 0, 40 # the defauly yaxis
clim = None # the colormap limits
def __init__(self, axes, canvas, eegplot):
Observer.__init__(self)
self.axes = axes
self.canvas = canvas
self.eegplot = eegplot
self.cmap = cm.jet
# min and max power
def make_spec(self, *args):
NFFT, Noverlap = (512, 477)
selected = self.eegplot.get_selected_window(extraTime=float(NFFT)/float(self.eegplot.eeg.freq))
#selected = self.eegplot.get_selected()
print "SpecPlot.make_spec(): selected = ", selected
if selected is None:
self.axes.cla()
t = self.axes.text(
0.5, 0.5,
'Click on EEG channel for spectrogram (scroll mouse to expand)',
verticalalignment='center',
horizontalalignment='center',
)
t.set_transform(self.axes.transAxes)
xmin, xmax = self.eegplot.get_time_lim()
self.axes.set_xlim( [xmin, xmax] )
self.axes.set_xticks( self.eegplot.axes.get_xticks() )
return
flim = SpecPlot.flim
clim = SpecPlot.clim
torig, data, trode = selected
gname, gnum = trode
label = '%s %d' % (gname, gnum)
Fs = self.eegplot.eeg.freq
self.axes.cla()
xmin, xmax = self.eegplot.get_time_lim()
xextent = xmin, xmax
print "make spec: xmin, xmax: ", xmin, xmax
#try:
#print "SpecPlot.make_spec(): calling specgram(data=", data.shape, "NFFT=%d, Fs=%d, noverlap=%d, xextent=" % (NFFT, Fs, Noverlap), xextent, ")"
Pxx, freqs, t, im = self.axes.specgram(
data, NFFT=NFFT, Fs=Fs, noverlap=Noverlap,
cmap=self.cmap, xextent=xextent)
#print "SpecPlot.make_spec(): Pxx.shape is", Pxx.shape, "t is", t
#except OverflowError, overflowerror:
# print "caught overflow error!! bailing: ", overflowerror
# f = file("make_spec-%d-%f-%f.overflow.pickle" % (gnum, xmin, xmax), "w")
# pickle.dump(data, f)
# f.close()
# return
if clim is not None:
im.set_clim(clim[0], clim[1])
t = t + min(torig)
Z = 10*log10(Pxx)
#print "type(Z) is" , type(Z)
#I fixed this using numpy's min and max but this should work too -eli
self.pmin = minimum.reduce(minimum.reduce(Z))
self.pmax = maximum.reduce(maximum.reduce(Z))
#self.eegplot.set_time_lim(xmin=None, xmax=None,
# updateData=False, broadcast=False)
#self.axes.set_xlim( [xmin, xmax] )
#self.axes.set_xticks( self.eegplot.axes.get_xticks() )
print "SpecPlot.make_spec: xticks = ", self.eegplot.axes.get_xticks()
#self.axes.set_title('Spectrogram for electrode %s' % label)
#self.axes.set_xlabel('TIME (s)')
self.axes.set_ylabel('FREQUENCY (Hz)')
self.axes.set_ylim(flim)
if flim[1]-flim[0]>=100:
self.axes.set_yticks(arange(flim[0], flim[1]+1, 20))
else:
self.axes.set_yticks(arange(flim[0], flim[1]+1, 10))
def recieve(self, event, *args):
#note: this gets called on a timescale update -eli
if event in (Observer.SELECT_CHANNEL, Observer.SET_TIME_LIM):
self.make_spec()
self.canvas.draw()
def set_properties(self, *args):
dlg = SpecPlot.propdlg
dlg.show()
if not len(dlg.entryCMin.get_text()) and hasattr(self, 'pmin'):
dlg.entryCMin.set_text('%1.2f'%self.pmin)
if not len(dlg.entryCMax.get_text()) and hasattr(self, 'pmax'):
dlg.entryCMax.set_text('%1.2f'%self.pmax)
while 1:
response = dlg.run()
if response in (gtk.RESPONSE_OK, gtk.RESPONSE_APPLY):
b = dlg.validate()
if not b: continue
SpecPlot.flim = dlg.get_flim()
SpecPlot.clim = dlg.get_clim()
self.make_spec()
self.canvas.draw()
if response==gtk.RESPONSE_OK:
dlg.hide()
break
else:
dlg.hide()
break
class MainWindow(PrefixWrapper):
"""
CLASS: MainWindow
DESCR: represents XML'd widget tree and other dynamic GUI elements
"""
prefix = ''
widgetName = 'windowMain'
gladeFile = 'main.glade'
win = None
def __init__(self):
if os.path.exists(self.gladeFile):
#print "opening %s" % self.gladeFile
theFile=self.gladeFile
elif os.path.exists(os.path.join('gui', self.gladeFile)):
#print "opening %s" % os.path.join('gui', self.gladeFile)
theFile=os.path.join('gui', self.gladeFile)
else:
#print "opening %s" % os.path.join(distutils.sysconfig.PREFIX,
# 'share', 'pbrain', self.gladeFile)
theFile = os.path.join(
distutils.sysconfig.PREFIX,
'share', 'pbrain', self.gladeFile)
print "MainWindow.__init__(): uhh the file is " , theFile
try: Shared.widgets = gtk.glade.XML(theFile)
except:
raise RuntimeError('Could not load glade file %s' % theFile)
PrefixWrapper.__init__(self)
self._isConfigured = False
self.patient = None
figsize = eegviewrc.figsize
self.fig = Figure(figsize=figsize, dpi=72)
self.canvas = FigureCanvas(self.fig) # a gtk.DrawingArea
self.canvas.set_size_request(800, 640)
self.canvas.connect("scroll_event", self.scroll_event)
self.canvas.show()
#self.fig = Figure(figsize=(7,5), dpi=72)
t = arange(0.0,50.0, 0.01)
xlim = array([0,10])
self.axes = self.fig.add_axes([0.075, 0.25, 0.9, 0.725], axisbg='#FFFFCC')
self.axes.plot(t, sin(2*0.32*pi*t) * sin(2*2.44*pi*t) )
self.axes.set_xlim([0.0,10.0])
self.axes.set_xticklabels([])
self.axesSpec = self.fig.add_axes([0.075, 0.05, 0.9, 0.2])
t = self.axesSpec.text(
0.5, 0.5,
'Click on EEG channel for spectrogram (scroll mouse to expand)',
verticalalignment='center',
horizontalalignment='center',
)
t.set_transform(self.axes.transAxes)
self.axesSpec.set_xlim([0.0,10.0])
self.axesSpec.set_xticklabels([])
self.axesSpec.set_yticklabels([])
self.win = self['windowMain']
self.win.move(0,0)
self['vboxMain'].pack_start(self.canvas, True, True)
self['vboxMain'].show()
self.toolbar = EEGNavBar( self.canvas, self['windowMain'])
self.toolbar.show()
self['vboxMain'].pack_start(self.toolbar, False, False)
self.statbar = gtk.Statusbar()
self.statbar.show()
self.statbarCID = self.statbar.get_context_id('my stat bar')
self['vboxMain'].pack_start(self.statbar, False, False)
self.update_status_bar('')
self.buttonDown = None
fsize = self.fig.get_size_inches()
self.fsize = copy.deepcopy(fsize)
"""
# Init Annotations menu sensitivity.
menuItemAnnBrowser = Shared.widgets.get_widget('menuItemAnnBrowser')
menuItemAnnBrowser.set_sensitive(0)
menuItemAnnCreateEdit = Shared.widgets.get_widget('menuItemAnnCreateEdit')
menuItemAnnCreateEdit.set_sensitive(0)
menuItemAnnDelete = Shared.widgets.get_widget('menuItemAnnDelete')
menuItemAnnDelete.set_sensitive(0)
menuItemAnnHorizCursor = Shared.widgets.get_widget('menuItemAnnHorizCursor')
menuItemAnnHorizCursor.set_sensitive(0)
if eegviewrc.horizcursor == 'True' :
menuItemAnnHorizCursor.set_active(1)
else :
menuItemAnnHorizCursor.set_active(0)
menuItemAnnVertCursor = Shared.widgets.get_widget('menuItemAnnVertCursor')
menuItemAnnVertCursor.set_sensitive(0)
if eegviewrc.vertcursor == 'True' :
menuItemAnnVertCursor.set_active(1)
else :
menuItemAnnVertCursor.set_active(0)
"""
self.canvas.mpl_connect('motion_notify_event', self.motion_notify_event)
self.canvas.mpl_connect('button_press_event', self.button_press_event)
self.canvas.mpl_connect('button_release_event', self.button_release_event)
def update_status_bar(self, msg):
self.statbar.pop(self.statbarCID)
mid = self.statbar.push(self.statbarCID, 'Message: ' + msg)
def menu_select_eeg(self, eeg):
amp = eeg.get_amp()
if amp.message is not None:
simple_msg(amp.message, title='Warning',
parent=Shared.windowMain.widget)
try: self.eegplot
except AttributeError: pass
else: Observer.observers.remove(self.eegplot)
try: self.specPlot
except AttributeError: pass
else: Observer.observers.remove(self.specPlot)
self.eegplot = EEGPlot(eeg, self.canvas)
self.toolbar.set_eegplot(self.eegplot)
self.specPlot = SpecPlot(self.axesSpec, self.canvas, self.eegplot)
self.specMenu = self.make_spec_menu()
eois = eeg.get_associated_files(atype=5, mapped=1)
self.eoiMenu = self.make_context_menu(eois)
self.eegplot.plot()
return False
def make_patients_menu(self):
entries = servers.sql.eeg.select(
where='file_type in (1,4)')
eegMap = {}
for entry in entries:
eegMap.setdefault(entry.pid,[]).append(EEGWeb(entry.get_orig_map()))
pidList = ','.join(map(str,eegMap.keys()))
# make a list of eegs and patients so we can pass an index to
# the callback
menuItemPatients = self['menuitemPatients']
menuPatients = gtk.Menu()
patients = servers.sql.patients.select(
where='pid in (%s) ORDER BY last' % pidList)
for patient in patients:
if not eegMap.has_key(patient.pid): continue
menuItemPatient = gtk.MenuItem(
'%s%s' % (patient.first[:2], patient.last[:2]))
menuItemPatient.show()
menuEEGs = gtk.Menu()
for eeg in eegMap[patient.pid]:
eegLabel = eeg.filename.replace('_', '-')
item = gtk.MenuItem(label=eegLabel)
item.show()
eeg.patient = patient
item.connect_object(
"activate", self.menu_select_eeg, eeg)
menuEEGs.append(item)
menuItemPatient.set_submenu(menuEEGs)
menuPatients.append(menuItemPatient)
menuItemPatients.set_submenu(menuPatients)
def load_eoi(self, eoi):
success = self.eegplot.set_eoi(eoi)
if success:
tmin, tmax = self.eegplot.get_time_lim()
self.eegplot.plot()
self.eegplot.set_time_lim(tmin, tmax, updateData=True)
self.eegplot.draw()
else:
#TODO: popup edit window for eoi
pass
def new_eoi(self, menuitem):
self.edit_eoi()
def make_context_menu(self, eois):
contextMenu = gtk.Menu()
label = "Load EOI"
menuItemLoad = gtk.MenuItem(label)
contextMenu.append(menuItemLoad)
menuItemLoad.show()
menuEOIS = gtk.Menu()
for eoi in eois:
eoiLabel = eoi.filename.replace('_', '-')
item = gtk.MenuItem(label=eoiLabel)
item.show()
item.connect_object(
"activate", self.load_eoi, eoi)
menuEOIS.append(item)
menuItemLoad.set_submenu(menuEOIS)
label = "Save EOI"
menuItemSave = gtk.MenuItem(label)
contextMenu.append(menuItemSave)
menuItemSave.connect("activate", self.save_eoi, 0)
menuItemSave.show()
label = "Save As EOI"
menuItemSaveAs = gtk.MenuItem(label)
contextMenu.append(menuItemSaveAs)
menuItemSaveAs.connect("activate", self.save_eoi, 1)
menuItemSaveAs.show()
label = "Edit EOI"
menuItemEdit = gtk.MenuItem(label)
contextMenu.append(menuItemEdit)
menuItemEdit.connect("activate", self.edit_eoi)
menuItemEdit.show()
label = "New EOI"
menuItemNew = gtk.MenuItem(label)
contextMenu.append(menuItemNew)
menuItemNew.connect("activate", self.new_eoi)
menuItemNew.show()
menuItemSep = gtk.MenuItem()
contextMenu.append(menuItemSep)
menuItemSep.show()
"""
label = "Create New Annotation"
menuItemAnnCreateEdit = gtk.MenuItem(label)
menuItemAnnCreateEdit.connect("activate", self.on_menuItemAnnCreateEdit_activate)
menuItemAnnCreateEdit.show()
contextMenu.append(menuItemAnnCreateEdit)
label = "Delete Annotation"
menuItemAnnDelete = gtk.MenuItem(label)
menuItemAnnDelete.connect("activate", self.on_menuItemAnnDelete_activate)
menuItemAnnDelete.show()
contextMenu.append(menuItemAnnDelete)
"""
menuItemSep = gtk.MenuItem()
contextMenu.append(menuItemSep)
menuItemSep.show()
label = "Edit Channel Filter"
menuItemEdit = gtk.MenuItem(label)
menuItemEdit.connect("activate", self.edit_filter)
menuItemEdit.show()
contextMenu.append(menuItemEdit)
return contextMenu
def make_spec_menu(self):
contextMenu = gtk.Menu()
label = "Set limits"
menuItemSave = gtk.MenuItem(label)
contextMenu.append(menuItemSave)
menuItemSave.connect("activate", self.specPlot.set_properties, 0)
menuItemSave.show()
return contextMenu
def edit_eoi(self, *args):
def ok_callback(eoi):
success = self.eegplot.set_eoi(eoi)
if success:
tmin, tmax = self.eegplot.get_time_lim()
self.eegplot.plot()
self.eegplot.set_time_lim(tmin, tmax,updateData=True)
self.eegplot.draw()
d.destroy_dialog()
return
eoiActive = self.eegplot.get_eoi()
eoiAll = self.eegplot.get_eeg().get_amp().to_eoi()
d = Dialog_SelectElectrodes(trodes=eoiAll,
ok_callback=ok_callback,
selected=eoiActive
)
d.set_transient_for(self.widget)
def edit_filter(self, *args):
"""
This brings up the prefiltering window, which allows one to rectify/hilbert-xform the data
before sending it to external mpl_windows.
"""
def ok_callback(filters):
print "in MainWindow.edit_filter.ok_callback(): filters=", filters
rectifiedChannels = {}
hilbertedChannels = {}
for channel, params in filters.iteritems():
print "filter f is ", channel, params['rectify']
rectifiedChannels[channel]= params['rectify']
hilbertedChannels[channel]= params['hilbert']
self.eegplot.get_eeg().set_rectified(rectifiedChannels)
self.eegplot.get_eeg().set_hilberted(hilbertedChannels)
tmin, tmax = self.eegplot.get_time_lim()
self.eegplot.plot()
self.eegplot.set_time_lim(tmin, tmax,updateData=True)
self.eegplot.draw()
d.destroy_dialog()
return
eoiActive = self.eegplot.get_eoi()
#print "eoiActive is " , eoiActive
eoiAll = self.eegplot.get_eeg().get_amp().to_eoi()
#print "eoiAll is ", eoiAll
rectify_selected = self.eegplot.get_eeg().get_rectified()
hilbert_selected = self.eegplot.get_eeg().get_hilberted()
d = Dialog_FilterElectrodes(trodes=eoiActive,
ok_callback=ok_callback,
rectify_selected=rectify_selected,
hilbert_selected=hilbert_selected
)
d.set_transient_for(self.widget)
def save_eoi(self, menuitem, saveas):
eoi = self.eegplot.get_eoi()
if not self['dlgPref_radiobuttonUseWebOn'].get_active():
# not using the web, write to local filesystem
fname = fmanager.get_filename(
title='Enter filename for EOI')
if not os.path.exists(fname):
basepath, ext = os.path.splitext(fname)
if ext.lower() != '.eoi':
fname += '.eoi'
try:
fh = file(fname, 'w')
fh.write(eoi.to_conf_file())
except IOError:
error_msg('Could not write EOI to %s' % fname,
parent=self.widget)
return
#TODO: handle same filename vs different filename; add a save as?
def ok_callback(m):
pid=self.eegplot.get_eeg().get_pid()
newName = m['filename']
eoiNew = EOI()
eoiNew.extend(eoi)
def new_eoi_success():
eeg = self.eegplot.get_eeg()
success = self.eegplot.set_eoi(eoiNew)
eoiNew.update_map(eeg.get_filename())
eois = eeg.get_associated_files(atype=5, mapped=1)
self.eoiMenu = self.make_context_menu(eois)
dlgSave.hide_widget()
simple_msg('%s successfully uploaded' % newName,
title='Congratulations',
parent=self.widget)
if success: self.eegplot.plot()
# make a new file
try:
eoiNew.new_web(pid, newName)
except NameError:
# fname already exists
def response_callback(dialog, response):
if response==gtk.RESPONSE_YES:
eoiNew.set_exists_web(pid, newName)
eoiNew.update_web()
new_eoi_success()
else: dialog.destroy()
msg = '%s already exists. Overwrite?' % newName
yes_or_no(msg=msg, title='Warning!',
responseCallback=response_callback,
parent=dlgSave.widget)
else: new_eoi_success()
if not saveas and eoi.is_web_file():
eoi.update_web()
simple_msg('%s updated' % eoi.filename,
title='You did it!',
parent=self.widget)
return
dlgSave = Dialog_SaveEOI(eoiActive=self.eegplot.get_eoi(),
eoisAll=self.eegplot.get_eeg().get_eois(),
ok_callback=ok_callback)
dlgSave.get_widget().set_transient_for(self.widget)
dlgSave.show_widget()
"""
def on_menuItemAnnCreateEdit_activate(self, event) :
annman = self.eegplot.annman
params = {}
if annman.selectedkey is not None:
params = annman.ann[annman.selectedkey]
else:
# Create new annotation
now = datetime.datetime.now() #datetime.now() was wrong, using datetime.datetime.now() -eli
hlight = annman.get_highlight()
if hlight is None :
params = dict(created=now.ctime())
else :
start, end = annman.highlight_span()
params = dict(startTime=start, endTime=end,
created=now.ctime())
annman.dlgAnnotate.set_params(params)
# XXX doesn't update when i set the hscale value in the constructor...
# dlgAnnotate['hscaleAlpha'].set_value(.5)
annman.dlgAnnotate.get_widget().set_transient_for(self.widget)
annman.dlgAnnotate.show_widget()
def on_menuItemAnnDelete_activate(self, event) :
dlg = gtk.MessageDialog(type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
message_format='Are you sure you wish to delete this annotation?')
dlg.set_title('Delete Annotation')
response = dlg.run()
dlg.destroy()
if response == gtk.RESPONSE_YES :
self.eegplot.annman.remove_selected()
self.eegplot.annman.ann.save_data()
self.eegplot.annman.dlgAnnBrowser.update_ann_info()
def on_menuItemAnnHorizCursor_activate(self, checkMenuItem) :
if checkMenuItem.get_active() :
self.eegplot.cursor.horizOn = True
eegviewrc.horizcursor = True
else :
self.eegplot.cursor.horizOn = False
eegviewrc.horizcursor = False
return False
def on_menuItemAnnVertCursor_activate(self, checkMenuItem) :
if checkMenuItem.get_active() :
self.eegplot.cursor.vertOn = True
eegviewrc.vertcursor = True
else :
self.eegplot.cursor.vertOn = False
eegviewrc.vertcursor = False
return False
"""
#def on_buttonSaveExcursion_clicked(self, event):
# self.eegplot.save_excursion()
# return False
#
#def on_buttonRestoreExcursion_clicked(self, event):
# self.eegplot.restore_excursion()
# self.eegplot.draw()
# return False
#def on_buttonJumpToTime_clicked(self, event):
# val = str2num_or_err(self['entryJumpToTime'].get_text(),
# parent=self.widget)
#
# if val is None: return
# self.eegplot.set_time_lim(val)
# self.eegplot.draw()
# return False
def expose_event(self, widget, event):
#now the traces resize themselves on window resize - hurrah! eli
#I had more trouble with this than I care to admit, which explains the messiness of the code
try: self.eegplot
except AttributeError: return False
newsize = self.fig.get_size_inches()
fsize = self.fsize
#print newsize.all(), fsize.all() #why didn't .all() work??
if (fsize[1] != newsize[1]) or (fsize[0] != newsize[0]) :
self.eegplot.plot() #added these two lines -eli
self.eegplot.draw()
self.fsize = copy.deepcopy(newsize) #why didn't regular copy work?
return False
def configure_event(self, widget, event):
return False
def realize(self, widget):
return False
def motion_notify_event(self, event):
try: self.eegplot
except : return False
if not event.inaxes: return
# Motion within EEG axes
if event.inaxes == self.axes:
t, yt = event.xdata, event.ydata
#t = float('%1.1f' % t)
"""
annman = self.eegplot.annman
# Resize annotation.
if event.button == 1 :
if annman.resize :
s = annman.ann[annman.selectedkey]['startTime']
e = annman.ann[annman.selectedkey]['endTime']
if annman.resize_side == 0 :
s = t
else :
e = t
if s < e :
annman.resize_selected(s, e)
else :
annman.set_selected()
else :
# Change mouse cursor if over an annotation edge.
selected, side = annman.over_edge(event.x, event.y)
if selected is not None :
self.widget.window.set_cursor(gdk.Cursor(gdk.SB_H_DOUBLE_ARROW))
else :
self.widget.window.set_cursor(gdk.Cursor(gdk.LEFT_PTR))
"""
# Update status bar with time and electrode name and number
trode = self.eegplot.get_channel_at_point(event.x, event.y, False)
if trode is not None:
gname, gnum = trode
currdate = self.eegplot.eeg.get_date()
timedelta = datetime.timedelta(0, event.xdata)
if (currdate != None):
self.update_status_bar(
'Time = %1.1f (s), %s, Electrode %s%d' % (t, str(currdate + timedelta), gname, gnum))
else:
self.update_status_bar(
'Time = %1.1f (s), Electrode %s%d' % (t, gname, gnum))
# Motion within spectrum axes
elif event.inaxes == self.axesSpec:
t, f = event.xdata, event.ydata
self.update_status_bar(
'Time = %1.1f (s), Freq = %1.1f (Hz)' % (t, f))
return False
def scroll_event(self, widget, event):
"If in specgram resize"
if event.direction == gdk.SCROLL_UP:
direction = 1
else:
direction = -1
l1,b1,w1,h1 = self.axes.get_position()
l2,b2,w2,h2 = self.axesSpec.get_position()
deltay = direction*0.1*h2
h1 -= deltay
h2 += deltay
self.axes.set_position([l1, b2+h2, w1, h1])
self.axesSpec.set_position([l2, b2, w2, h2])
self.eegplot.plot() #added these two lines -eli
self.eegplot.draw()
self.canvas.draw()
def button_press_event(self, event):
try: self.eegplot
except AttributeError: return False
if not event.inaxes: return
xa, ya = self.axes.transAxes.inverted().transform((event.x, event.y)) #replaced inverse_xy_tup with inverted().transform()
# print 'axes coords', xa, ya
self.buttonDown = event.button
#annman = self.eegplot.annman
if event.button == 1 or event.button == 3 :
if event.inaxes == self.axes:
t, yt = event.xdata, event.ydata
"""
if not annman.is_over_highlight(t) :
key = annman.over_annotation(event.x, event.y)
annman.remove_highlight()
annman.set_selected(key)
annman.dlgAnnBrowser.update_ann_info(key)
"""
if event.button==1:
if event.inaxes == self.axes:
self.eegplot.cursor.visible = False
t, yt = event.xdata, event.ydata
"""
# Start resize if edge of an annotation clicked.
selected, side = annman.over_edge(event.x, event.y)
if selected is not None :
annman.set_selected(selected)
annman.start_resize(side)
annman.selector.visible = False
"""
# Select an electrode if not locked.
if not self.eegplot.lock_trode :
trode = self.eegplot.get_channel_at_point(event.x, event.y)
if trode is not None:
gname, gnum = trode
self.update_status_bar('Electrode: %s%d' % (gname, gnum))
if event.button==3:
# right click brings up the context menu
if event.inaxes == self.axes:
menu = self.eoiMenu
# Update popup menu items
#highsens = annman.get_highlight() is not None
#selsens = self.eegplot.annman.selectedkey is not None
#if highsens: label = 'Create New Annotation'
#else: label = 'Edit Selected Annotation'
#menuItems = menu.get_children()
#print "menuItems = " , menuItems
#menuItemAnnCreateEdit = menuItems[-2]
#print "menuItemAnnCreateEdit = " , menuItemAnnCreateEdit
#menuItemAnnCreateEdit.get_children()[0].set_text(label)
#menuItemAnnDelete = menuItems[-1]
#menuItemAnnDelete.set_sensitive(selsens)
elif event.inaxes == self.axesSpec:
menu = self.specMenu
"""
# Update popup menu items
highsens = annman.get_highlight() is not None
selsens = self.eegplot.annman.selectedkey is not None
if highsens or not selsens : label = 'Create New Annotation'
else: label = 'Edit Selected Annotation'
menu.popup(None, None, None, 0, 0)
"""
return False
def button_release_event(self, event):
try: self.eegplot
except AttributeError: return False
"""
annman = self.eegplot.annman
# Write ann file
if annman.resize :
annman.ann.save_data()
annman.selector.visible = True
annman.end_resize()
"""
self.eegplot.cursor.visible = True
self.buttonDown = None
return False
def on_menuFilePreferences_activate(self, event=None):
def mysql_callback(dbname, host, user, passwd, port):
servers.sql.init(dbname, host, user, passwd, port)
self.make_patients_menu()
eegviewrc.sqlhost = host
eegviewrc.sqluser = user
eegviewrc.sqlpasswd = passwd
eegviewrc.sqlport = port
eegviewrc.save()
def datamanager_callback(url, user, passwd, cachedir):
servers.datamanager.init(url, user, passwd, cachedir)
eegviewrc.httpurl = url
eegviewrc.httpuser = user
eegviewrc.httppasswd = passwd
eegviewrc.httpcachedir = cachedir
eegviewrc.save()
d = Dialog_Preferences(
mysqlCallBack = mysql_callback,
dataManagerCallBack = datamanager_callback)
params = {
'zopeServer' : eegviewrc.httpurl,
'zopeUser' : eegviewrc.httpuser,
'zopePasswd' : eegviewrc.httppasswd,
'zopeCacheDir' : eegviewrc.httpcachedir,
'mysqlDatabase' : eegviewrc.sqldatabase,
'mysqlServer' : eegviewrc.sqlhost,
'mysqlUser' : eegviewrc.sqluser,
'mysqlPasswd' : eegviewrc.sqlpasswd,
'mysqlPort' : eegviewrc.sqlport,
}
d.set_params(params)
d.show_widget()
d.get_widget().set_transient_for(self.widget)
return False
def on_menuFileQuit_activate(self, event):
update_rc_and_die()
#def on_menuFileNew_activate(self, event):
# not_implemented(self.widget)
def get_eeg_params(self, fullpath):
def callback(pars): pass
dlg = Dialog_EEGParams(fullpath, callback)
dlg.show_widget()
response = dlg.widget.run()
if response == gtk.RESPONSE_OK:
dlg.hide_widget()
pars = dlg.get_params()
return pars
def autoload(self, options):
"""DEBUG only"""
fullpath = options.filename
basename, ext = os.path.splitext(fullpath)
eeg = extmap[ext](fullpath)
self.load_eeg(eeg)
if options.eoi is not None:
eoi = EOI(useFile=options.eoi)
self.load_eoi(eoi)
return False
def on_menuFileOpen_activate(self, event):
#dlg = gtk.FileSelection('Select EEG param file')
#dlg.set_transient_for(self.widget)
#dlg.set_filename(fmanager.get_lastdir() + os.sep)
#dlg.cancel_button.connect("clicked", lambda w: dlg.destroy())
#dlg.show()
#response = dlg.run()
def ok_callback(dlg):
fname = dlg.get_filename()
fullpath = dlg.get_filename()
fmanager.set_lastdir(fullpath)
dlg.destroy()
if not os.path.exists(fullpath):
error_msg(
'Cannot find %s' % fullpath,
title='Error',
parent=Shared.windowMain.widget)
basename, ext = os.path.splitext(fullpath)
if not extmap.has_key(ext.lower()):
error_msg(
'Do not know how to handle extension %s in %s' % (ext, fullpath),
title='Error',
parent=Shared.windowMain.widget)
return
else:
loader = extmap[ext.lower()]
try: eeg = loader(fullpath)
except ValueError, msg:
msg = exception_to_str('Error loading EEG' )
error_msg(msg, title='Error loading EEG',
parent=Shared.windowMain.widget)
return
else:
if eeg is None: return
print "on_menuFileOpen_activate: eeg ext is ", ext
if (eeg.get_file_type() != 1): # hack -- .bnis do not need .amp files
if len(eeg.amps)>0:
names = [os.path.split(fullname)[-1] for fullname in eeg.amps]
name = select_name(names, 'Pick the AMP file')
if name is None: return
else:
amp = eeg.get_amp(name)
else:
amp = eeg.get_amp()
else:
amp = eeg.get_amp()
if amp.message is not None:
simple_msg(amp.message, title='Warning',
parent=Shared.windowMain.widget)
self.load_eeg(eeg)
return False
dlg = Dialog_FileChooser(defaultDir=fmanager.get_lastdir(),
okCallback=ok_callback,
title='Select Neuroscanascii file',
parent=self.win,
previous_dirnames=fmanager.get_lastdirs())
print fmanager.bni
try:
dlg.set_filename(fmanager.bni) #use the shared filemanager and eegviewrc file to autoload files when set
except:
dlg.set_filename("")
dlg.run()
dlg.destroy()
#simple usability hack: chain in the view3 loader here
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
from view3 import View3
viewWin = View3(eegplot=self.eegplot)
if viewWin.ok:
viewWin.show()
else:
print >>sys.stderr, 'Got an error code from view3'
#/hack
def load_eeg(self, eeg):
dlg = gtk.Dialog('Please stand by')
dlg.show()
msg = gtk.Label('Loading %s; please hold on' % eeg.filename)
msg.show()
dlg.vbox.add(msg)
while gtk.events_pending(): gtk.main_iteration()
try: self.eegplot
except AttributeError: pass
else: Observer.observers.remove(self.eegplot)
try: self.specPlot
except AttributeError: pass
else: Observer.observers.remove(self.specPlot)
self.eegplot = EEGPlot(eeg, self.canvas)
self.specPlot = SpecPlot(self.axesSpec, self.canvas, self.eegplot)
self.specMenu = self.make_spec_menu()
dlg.destroy()
while gtk.events_pending(): gtk.main_iteration()
self.toolbar.set_eegplot(self.eegplot)
try: self.eegplot.plot()
except:
msg = exception_to_str('Could not read data:')
error_msg(msg, title='Error',
parent=Shared.windowMain.widget)
return
eois = eeg.get_associated_files(atype=5, mapped=1)
self.eoiMenu = self.make_context_menu(eois)
# change the window title
self.win = self['windowMain']
self.win.set_title(eeg.filename)
self.eegplot.set_time_lim(0, 10, updateData=True)
def on_menuFileSave_activate(self, event):
not_implemented(self.widget)
def on_menuFileExport_activate(self, event):
# dump all the current data to a bunch of .wav files
tmin, tmax = self.eegplot.get_time_lim()
eeg = self.eegplot.get_eeg()
t, data = eeg.get_data(tmin, tmax)
amp = eeg.get_amp()
did = amp.get_dataind_dict()
freq = eeg.get_freq()
eoi = self.eegplot.get_eoi()
print "did=", did
print "eoi=", eoi
for index, chan in did.iteritems():
if (chan not in eoi):
continue
(cname, cnum) = chan
filename = str("%03d" % index) + "_" + cname + "_" + str(cnum) + "_" + str(tmin) + "-" + str(tmax) + ".wav"
print "on_menuFileExport_activate(): saving ", filename
w = wave.open(filename, 'w')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(freq)
#print "data.shape is ", data.shape
wav_array = data[:,int(index)]
#print "wav_array length is ", len(wav_array), " with max of ", max(wav_array), "min of ", min(wav_array)
# not sure how one chooses to "short-ize" this data!
# arbitrarily make max SHRT_MAX and min SHRT_MIN or
# something
shrt_max = 32767
shrt_min = -32768
wav_max = max(wav_array)
wav_min = min(wav_array)
# mcc XXX: This conversion needs fixing... rectified signals
# wind up with 0 = SHRT_MIN.
shrt_array = zeros(len(wav_array), 'h')
wav_max_max = max(wav_max, abs(wav_min))
for i in range(0,len(wav_array)):
wav_i_0to1 = (wav_max_max - wav_array[i]) / (2 * wav_max_max)
shrt_array[i] = int(shrt_max - round(wav_i_0to1 * (shrt_max - shrt_min)))
#print "len(shrt_array) is", len(shrt_array), " type of len(shrt_array) is ", type(len(shrt_array))
w.writeframes(struct.pack('%dh' % len(shrt_array), *shrt_array))
w.close()
"""
def on_menuItemAnnBrowser_activate(self, event) :
try : self.eegplot
except : pass
else :
self.eegplot.annman.dlgAnnBrowser.show()
return False
"""
def on_menuHelpAbout_activate(self, event):
not_implemented(self.widget)
def on_menuChannelWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
win = ChannelWin(eegplot=self.eegplot)
win.show()
def on_menuHistogramWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
win = HistogramWin(eegplot=self.eegplot)
win.show()
def on_menuAcorrWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
win = AcorrWin(eegplot=self.eegplot)
win.show()
def on_menuEmbedWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
from embed import EmbedWin
embedWin = EmbedWin(eegplot=self.eegplot)
embedWin.show()
def on_menuCoherenceWindow_activate(self, event):
print "on_menuCoherenceWindow_activate"
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
from coherence_window import CoherenceWin
coherenceWin = CoherenceWin(eegplot=self.eegplot)
coherenceWin.show()
def on_menuView3DWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
from view3 import View3
viewWin = View3(eegplot=self.eegplot)
if viewWin.ok:
viewWin.show()
else:
print >>sys.stderr, 'Got an error code from view3'
def on_menuPhaseSynchronyPlot_activate(self, event) :
try : self.eegplot
except AttributeError :
simple_msg(
'You must first select an EEG',
title='Error',
parent=self.widget)
return
dlgPhaseSynchronyPlot = Dialog_PhaseSynchronyPlot(self.eegplot)
print dlgPhaseSynchronyPlot
dlgPhaseSynchronyPlot.show_widget()
def on_menuSpecWindow_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG',
title='Error',
parent=self.widget)
return
specWin = SpecWin(eegplot=self.eegplot)
specWin.show()
def on_menuEventRelatedSpecWindow_activate(self, event):
def ok_callback(erspec_params):
print "on_menuEventRelatedSpecWindow_activate().ok_callback(): foo=", erspec_params
win = EventRelatedSpecWin(erspec_params, eegplot=self.eegplot)
win.show()
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG',
title='Error',
parent=self.widget)
return
specWin = Dialog_EventRelatedSpec(ok_callback)
specWin.show_widget()
#specWin.show()
return False
def on_menuComputeExportToCohstat_activate(self, event):
try: self.eegplot
except AttributeError:
simple_msg(
'You must first select an EEG from the Patients menu',
title='Error',
parent=self.widget)
return
eoi = self.eegplot.get_eoi()
if len(eoi)==64:
d = Dialog_CohstatExport(self.eegplot.get_eeg(), eoi)
else:
d = Dialog_CohstatExport(self.eegplot.get_eeg())
d.get_widget().set_transient_for(self.widget)
d.show_widget()
return False
def update_rc_and_die(*args):
[eegviewrc.lastdir,
eegviewrc.lastdir1,
eegviewrc.lastdir2,
eegviewrc.lastdir3,
eegviewrc.lastdir4,
eegviewrc.lastdir5,
eegviewrc.lastdir6,
eegviewrc.lastdir7,
eegviewrc.lastdir8,
eegviewrc.lastdir9] = fmanager.get_lastdirs()
#eegviewrc.figsize = Shared.windowMain.fig.get_size_inches()
eegviewrc.save()
gtk.main_quit()
if __name__=='__main__':
__import__('__init__')
Shared.windowMain = MainWindow()
Shared.windowMain.show_widget()
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file",
action="store", type="string", dest="filename",
default=None,
help="Autoload eeg from file", metavar="FILE")
parser.add_option("-e", "--eoi",
action="store", type="string", dest="eoi",
default=None,
help="Autoload eoi from eoi file", metavar="FILE")
(options, args) = parser.parse_args()
if options.filename is not None:
Shared.windowMain.autoload(options)
else:
#No longer load the sql/zope dialog.
#Shared.windowMain.on_menuFilePreferences_activate(None)
pass
Shared.windowMain.widget.connect('expose-event', Shared.windowMain.expose_event) #handle page resizes -eli
Shared.windowMain.widget.connect('destroy', update_rc_and_die)
Shared.windowMain.widget.connect('delete_event', update_rc_and_die)
#Shared.windowMain['menubarMain'].hide()
try: gtk.main()
except KeyboardInterrupt:
update_rc_and_die()
| [] |
2024-01-10 | iustinum/aetheon | .hackathon~src~repo_reader.py | import nest_asyncio
from llama_hub.github_repo import GithubClient, GithubRepositoryReader
import os
import utils
from utils import print_verbose
import pickle
from dotenv import load_dotenv
import openai
from llama_index import download_loader, GPTVectorStoreIndex, Document
download_loader("GithubRepositoryReader")
utils.setup()
# .pkl file where the code will store data about the respository
storeFileName = "data.pkl"
def generateDataFile(username: str, repo: str, branch: str = "main", verbose: int = 0) -> None:
"""
A function to generate a list of document objects from a github repository.
Writes the list of Document objects as a .pkl in data.pkl
"""
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Loading Github repository...", verbose)
loader = GithubRepositoryReader(
github_client,
owner=username,
repo=repo,
verbose=False,
concurrent_requests=10)
data = loader.load_data(branch=branch)
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Storing data...", verbose)
with open(storeFileName, "wb") as f:
pickle.dump(data, f)
def generateQuery(query: str, verbose: int = 0) -> str:
"""
A function to generate a query response from the given data.
"""
if not os.path.exists("data.pkl"):
raise Exception("INFO: [*] Data file does not exist!")
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Unpacking data...", verbose)
# Unpackage our documents object
with open(storeFileName, "rb") as f:
data = pickle.load(f)
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Generating index...", verbose)
index = GPTVectorStoreIndex.from_documents(data)
# Turns index into a query engine to feed questions into.
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Generating query engine...", verbose)
query_engine = index.as_query_engine()
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Generating repsonse...", verbose)
response = query_engine.query(query)
return response.response
def generateFileNames(verbose: int = 0) -> list[dict]:
"""
A function to generate file locations from our data. Returns a set of file names, starting from the github repo.
Generate a list of dictionaries. Key: "filename", Value: filename
"""
print_verbose(
f"INFO: {utils.getCurrentTime()} [*] Generating file names...", verbose)
data = getDataPKL()
locations = []
for document in data:
locations.append(document.extra_info)
return locations
def generateResponseFromFile(fileName: str) -> str:
"""
A function to generate a detailed description for a certain file.
This is mainly used to produce descripts for the wikipedia page.
"""
return generateQuery(f"Write me a detailed description of the following file or class: {fileName}. The response should include a detailed list of variables and functions.")
def generateDescriptions(listOfFileNames: list[dict]) -> str:
"""
A function that iterates through each file and and produces a description of each file.
Description of each are appended to formed with a large string of all descriptions.
Returns that string with all descriptions
"""
desc = ""
for fileNames in listOfFileNames:
desc += generateResponseFromFile(fileNames["file_name"])
return desc
def getDataPKL() -> list[Document]:
"""
A function that generates a list of Document objects. Serves as data for later parsing.
"""
# Error checking to see if our data file exists
if not os.path.exists(storeFileName):
raise Exception("Data file not generated!")
with open(storeFileName, "rb") as f:
data = pickle.load(f)
return data
# (DEBUGGING)
if __name__ == "__main__":
print(os.getenv("GITHUB_TOKEN"))
print(os.getenv("OPENAI_API_KEY"))
utils.setup()
# paste url:
# "https://github.com/chiyeon/tmf-beat")
author, repo_name = utils.get_repo_info(
"https://github.com/Jingzhi-Su/PokerBot")
print(author, repo_name)
generateDataFile(author, repo_name, branch="main")
allNames = generateFileNames()
print(generateDescriptions(allNames))
os.remove(storeFileName)
| [] |
2024-01-10 | iustinum/aetheon | playground~src_old~aetheon.py | import openai
import os
from dotenv import load_dotenv
from model import GPT
import repo_reader
class Aetheon():
def __init__(self, model_name) -> None:
load_dotenv(".env")
openai.api_key = os.environ["OPENAI_API_KEY"]
self.model = GPT(model=model_name)
def run(self, repo_name, branch, author):
repo_reader.generateDataFile(author, repo_name, branch=branch)
allNames = repo_reader.generateFileNames()
desc = repo_reader.generateDescriptions(allNames)
self.model.giveContext(desc)
text = self.model.createMDText()
print(text)
def load_repo(self, repo_name, branch, author):
pass
def generateInlineDocs(self):
pass
| [] |
2024-01-10 | SousaPedroso/mlops-training-datatalks.club | training~topic_modeling~train.py | """
Script to train a model to
topical modeling of customer content reviews
"""
import os
import pickle
from argparse import ArgumentParser
import mlflow
from gensim.models import CoherenceModel, LdaModel
from prefect import flow, task
from prefect.task_runners import SequentialTaskRunner
@task
def load_dataset(input_dir: str) -> tuple[list, list]:
with open(os.path.join(input_dir, "train_corpus.pkl"), "rb") as f_in:
train_dataset = pickle.load(f_in)
with open(os.path.join(input_dir, "valid_tokens.pkl"), "rb") as f_in:
val_dataset = pickle.load(f_in)
return train_dataset, val_dataset
@task
def load_indexed_dictionary(input_dir: str) -> list:
with open(os.path.join(input_dir, "id2word.pkl"), "rb") as f_in:
return pickle.load(f_in)
def compute_coherence_values(X_train, X_val, passes, id2word, k, a, b) -> float:
lda_model = LdaModel(
corpus=X_train,
id2word=id2word,
num_topics=k,
random_state=123,
chunksize=100,
passes=passes,
alpha=a,
eta=b,
)
coherence_model_lda = CoherenceModel(
model=lda_model, texts=X_val, dictionary=id2word, coherence="c_v"
)
return coherence_model_lda.get_coherence()
@task
def hyperparameter_opt(
X_train: list,
X_val: list,
id2word: list,
passes: int,
topics: range,
alpha: list,
beta: list,
):
with mlflow.start_run():
mlflow.set_tag("model", "LDA")
mlflow.set_tag("scope", "Topic-Modeling")
mlflow.log_param("passes", passes)
for k in topics:
for a in alpha:
for b in beta:
params = {}
params["k"] = k
params["a"] = a
params["b"] = b
coherence_score = compute_coherence_values(
X_train, X_val, passes, id2word, k, a, b
)
mlflow.log_metric("coherence", coherence_score)
mlflow.log_metrics(params)
# pylint: disable=line-too-long
@flow(
name="topic-modeling-training-pipeline-params-opt",
task_runner=SequentialTaskRunner(),
)
def train(
experiment_name: str,
input_dir: str,
passes: int,
topics: list,
alpha: list,
beta: list,
):
topics = range(topics[0], topics[1], topics[2])
mlflow.set_tracking_uri(os.getenv("MLFLOW_TRACKING_URI", "http://127.0.0.1:5000"))
mlflow.set_experiment(experiment_name)
train_dataset, val_dataset = load_dataset(input_dir)
id2word = load_indexed_dictionary(input_dir)
hyperparameter_opt(train_dataset, val_dataset, id2word, passes, topics, alpha, beta)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--experiment_name",
type=str,
help="Name of the experiment to be used for tracking",
required=True,
)
parser.add_argument(
"--input_dir",
type=str,
help="Data's path to be used for model training",
required=True,
)
parser.add_argument(
"--passes",
type=int,
help="Number of passes to be used for model training",
default=3,
)
parser.add_argument(
"--topics",
type=list,
help=(
"Number of topics to be used for model training. Expected three values:"
" start, stop, step"
),
default=[5, 20, 5],
)
parser.add_argument(
"--alpha",
type=list,
help="Alpha hyperparameter to be used for model training",
default=[0.01, 0.31, 0.61, 0.91],
)
parser.add_argument(
"--beta",
type=list,
help="Beta hyperparameter to be used for model training",
default=[0.01, 0.31, 0.61, 0.91],
)
args = parser.parse_args()
train(
args.experiment_name,
args.input_dir,
args.passes,
args.topics,
args.alpha,
args.beta,
)
| [] |
2024-01-10 | Digital-Methods-HASS/au616353_martinez_mie | Language%20analytics~Assignment_5~utils~lda_utils.py | #!/usr/bin/python
"""
Utility functions for working with LDA using gensim
"""
# NLP
import re
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#pandas
import pandas as pd
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# matplotlib
import matplotlib.pyplot as plt
def sent_to_words(sentences):
for sent in sentences:
sent = re.sub(r'\S*@\S*\s?', '', sent) # remove emails
sent = re.sub(r'\s+', ' ', sent) # remove newline chars
sent = re.sub(r"\'", "", sent) # remove single quotes
sent = gensim.utils.simple_preprocess(str(sent), deacc=True)
yield(sent)
def process_words(texts, nlp, bigram_mod, trigram_mod, stop_words=stop_words, allowed_postags=['NOUN', "ADJ", "VERB", "ADV"]):
"""Remove Stopwords, Form Bigrams, Trigrams and Lemmatization"""
# use gensim simple preprocess
texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
texts = [bigram_mod[doc] for doc in texts]
texts = [trigram_mod[bigram_mod[doc]] for doc in texts]
texts_out = []
# lemmatize and POS tag using spaCy
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.LdaMulticore(corpus=corpus, num_topics=num_topics, id2word=dictionary)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
return model_list, coherence_values
def format_topics_sentences(ldamodel, corpus, texts):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row_list in enumerate(ldamodel[corpus]):
row = row_list[0] if ldamodel.per_word_topics else row_list
# print(row)
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
if __name__=="__main__":
pass | [] |
2024-01-10 | Digital-Methods-HASS/au616353_martinez_mie | Language%20analytics~Assignment_5~src~development_of_trump.py | #!/usr/bin/env python
"""
Specify file path of the csv file of Trump tweets and name of the output graph. You can also specify number of topics and what kind of words you want to investigate. The default is 10 topics and word types nouns and verbs. The output will be a perplexity and coherence score printed in the terminal as well as a print of the 10 most prominent words constituting each topic. Furthermore, a plot of the development of topics within Trumps tweets will be saved in a folder called out in the path relative to the working directory i.e., location of the script.
Parameters:
input_file: str <filepath-of-csv-file>
output_filename: str <name-of-png-file>
n_topics: int <number-of-topics>
word_types: list <list-of-word-types>
Usage:
development_of_trump.py -f <filepath-of-csv-file> -o <name-of-png-file> -n <number-of-topics> -w <list-of-word-types>
Example:
$ python3 development_of_trump.py -f ../data/Trump_tweets.csv -o trumps_development.png -n 15 -w "['NOUN', 'VERB']"
## Task
- Train an unsupervised classifyer as an LDA model on your data to extract structured information that can provide insight into topics in Trumps tweets.
- Output is in the form of an html file containing the topics (as well as a print in the terminal) and a png file for the development of topics. Both can be found in the folder data/output.
"""
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
# standard library
import sys,os
#sys.path.append(os.getcwd())
sys.path.append(os.path.join(".."))
from pprint import pprint
# data and nlp
import pandas as pd
import spacy
nlp = spacy.load("en_core_web_sm", disable=["ner"])
# visualisation
import pyLDAvis.gensim
import seaborn as sns
from matplotlib import rcParams
# figure size in inches
rcParams['figure.figsize'] = 20,10
# LDA tools
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
from utils import lda_utils
# warnings
import logging, warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import argparse
# argparse
ap = argparse.ArgumentParser()
# adding argument
ap.add_argument("-f", "--input_file", required = True, help= "Path to the csv-file")
ap.add_argument("-o", "--output_filename", default = "trumps_development.png", help = "Name of output file")
ap.add_argument("-n", "--n_topics", default = 10, help = "Number of topics")
ap.add_argument("-w", "--word_type", default = "['NOUN', 'VERB']", help = "Type of word. Choose between: 'NOUN','VERB','ADJ','ADV'")
# parsing arguments
args = vars(ap.parse_args())
def main(args):
# get path to the csv file
in_file = args["input_file"]
# name for output file
out_file = args["output_filename"]
# number of topics
n_topics = int(args["n_topics"])
# word types
word_types = args["word_type"]
# Initialize class object
Trump = Tweet_development(input_file = in_file, output_file = out_file, n_topics = n_topics, word_types = word_types)
# use process_data method and save returned dataframe
data_processed = Trump.process_data()
# build the lda model
id2word, corpus, lda_model = Trump.lda_model(data_processed)
# plot development of tweets
Trump.development_and_outputs(id2word = id2word, corpus = corpus, lda_model = lda_model)
# print done
print("Good job! The script is now done. Have a nice day!")
class Tweet_development:
def __init__(self, input_file, output_file, n_topics, word_types):
'''
Constructing the Tweet_development object
'''
# creating the class object with the user defined inputs
self.input_file = input_file
self.output_file = output_file
self.n_topics = n_topics
self.word_types = word_types
def load_and_prepare(self):
'''
Loading the input data. Filter and prepare data for classification.
Returns a filtered list.
'''
print("\nLoading the data...")
# read csv file
df = pd.read_csv(self.input_file)
# remove hyperlinks by filtering
filtering = df['text'].str.contains("http*")
df = df[~filtering]
# remove retweets
data = df[df["isRetweet"]== "f"]
# make a corpus of the contents column only
tweets = data['text'].values.tolist()
return tweets
def process_data(self):
'''
Building bigram and trigram models and fitting them to the data.
Reducing the feature space by lemmatizing and POS tagging the corpus of tweets.
Returns the processed data.
'''
tweets = self.load_and_prepare()
print("\nBuilding bigram and trigram models and fitting it to the data")
# Build the bigram and trigram models
bigram = gensim.models.Phrases(tweets, min_count=3, threshold=75) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[tweets], threshold=75)
# fitting the models to the data
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
print("\nLemmatizing the data and doing POS tagging. This takes a few minutes...")
# Processing the data using Ross' lda utils function
data_processed = lda_utils.process_words(tweets,
nlp,
bigram_mod,
trigram_mod,
allowed_postags=self.word_types)
return data_processed
def lda_model(self, data_processed):
'''
Creating dictionary and corpus of word frequency from the processed data.
Building and evaluating the LDA model.
Print perplexity scores and coherence scores.
Print the top ten words representing each topic.
'''
# Create Dictionary
id2word = corpora.Dictionary(data_processed)
# Create Corpus: Term Document Frequency
corpus = [id2word.doc2bow(text) for text in data_processed]
print(f"Building the LDA model. You have {len(data_processed)} tweets in the data so it might take a while")
# Build LDA model
lda_model = gensim.models.LdaMulticore(corpus=corpus, # vectorised corpus - list of lists of tupols
id2word=id2word, # gensim dictionary - mapping words to IDs
num_topics=self.n_topics, # number of topics set by user or default
random_state=100, # random state for reproducibility
chunksize=30, # batch data for efficiency
passes=10, # number of times to pass over the data set to create better model
iterations=100, # related to document rather than corpus
per_word_topics=True, # define word distributions
minimum_probability=0.0) # minimum value
# Compute Perplexity and print in the terminal
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model,
texts=data_processed,
dictionary=id2word,
coherence='c_v')
# get the coherence score and print in the terminal
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# print the topics found by the lda model in the terminal
pprint(lda_model.print_topics())
return id2word, corpus, lda_model
def development_and_outputs(self, id2word, corpus, lda_model):
'''
Calculate dominant topic for each document/tweet and plot development of topics over time with seaborn. Save plot as png.
'''
print("\nCreating visualizations and saving as html and png in output folder")
# Create output directory if it doesn't exist
outputDir = os.path.join("..", "data", "output")
if not os.path.exists(outputDir):
os.mkdir(outputDir)
print("Directory " , outputDir , " Created ")
else:
print("Directory " , outputDir , " already exists")
# Make gensim LDA visualization
vis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary=lda_model.id2word)
# define output path and filename
out_path = os.path.join(outputDir, "lda_topics.html")
# save visualization as html
pyLDAvis.save_html(vis, out_path)
print("\nGensim LDA visualization is saved as", out_path)
# inspect dominant topic
values = list(lda_model.get_document_topics(corpus))
# create empty list
split = []
# for loop for each document in the corpus
for entry in values:
# create empty list
topic_prevelance = []
# for loop for each topic in each document
for topic in entry:
# append the contribution of each topic for each document
topic_prevelance.append(topic[1])
# append the list with contributions of topics to the split list
split.append(topic_prevelance)
# making a dataframe containing for each document the percentage of contribution of the 10 topics
df = pd.DataFrame(map(list,zip(*split)))
# defining the output path
out_path = os.path.join(outputDir, self.output_file)
# making a lineplot with a rolling mean of 500 tweets
line_plot = sns.lineplot(data=df.T.rolling(500).mean())
# saving the lineplot as a figure
fig = line_plot.get_figure()
# saving the figure in the output path
fig.savefig(out_path)
print("\nLineplot for development of topics in tweets is saved as", out_path)
if __name__ == "__main__":
main(args) | [] |
2024-01-10 | open-compass/VLMEvalKit | vlmeval~evaluate~mmvet_eval.py | from vlmeval.api import OpenAIWrapper, OpenAIWrapperInternal
from vlmeval.smp import *
from vlmeval.utils import track_progress_rich
INTERNAL = os.environ.get('INTERNAL', 0)
def build_mmvet_gpt4_prompt(line):
question = line['question']
gt = str(line['answer'])
prediction = str(line['prediction'])
prompt = """Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. <AND> in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and <OR> means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score.
Question | Ground truth | Prediction | Correctness
--- | --- | --- | ---
What is x in the equation? | -1 <AND> -5 | x = 3 | 0.0
What is x in the equation? | -1 <AND> -5 | x = -1 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -5 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -5 or 5 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -1 or x = -5 | 1.0
Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4
Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0
"""
gpt4_prompt = prompt + '\n' + ' | '.join([question, gt.replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), prediction, ""])
return gpt4_prompt
def MMVet_auxeval(model, line):
def float_cvt(s):
try:
return float(s)
except ValueError:
return None
prompt = build_mmvet_gpt4_prompt(line)
log = ''
retry = 5
for i in range(retry):
output = model.generate(prompt, temperature=i * 0.5)
score = float_cvt(output)
if score is None:
log += f'Try {i}: output is {output}, failed to parse.\n'
elif score < 0 or score > 1:
log += f'Try {i}: output is {output}, invalid score: {score}.\n'
else:
log += 'Succeed'
return dict(log=log, score=score)
log += 'All 5 retries failed.\n'
return dict(log=log, score=0.0)
def MMVet_acc(result_file):
data = load(result_file)
tot = defaultdict(lambda: 0)
score = defaultdict(lambda: 0)
lt = len(data)
cate2_list = []
for i in range(lt):
item = data.iloc[i]
cate = item['category']
cate2 = cate.replace(',','_')
if cate2 not in cate2_list:
cate2_list.append(cate2)
grade = float(item['score'])
cate_list = ['rec','ocr','know','gen','spat','math']
for capa in cate_list:
if capa in cate:
tot[capa] += 1
score[capa] += grade
tot['Overall'] += 1
tot[cate2] += 1
score['Overall'] += grade
score[cate2] += grade
res = defaultdict(list)
res2 = defaultdict(list)
cate_list.append('Overall')
cate2_list.append('Overall')
for k in cate_list:
res['Category'].append(k)
res['tot'].append(tot[k])
res['acc'].append(score[k] / tot[k] * 100)
for v in cate2_list:
res2['Category'].append(v)
res2['tot'].append(tot[v])
res2['acc'].append(score[v] / tot[v] * 100)
res = pd.DataFrame(res)
res2 = pd.DataFrame(res2)
return res, res2
def MMVet_eval(eval_file, model='gpt-4-turbo', nproc=4, verbose=False):
logger = get_logger('Evaluation')
suffix = eval_file.split('.')[-1]
storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx')
tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl')
if osp.exists(storage):
logger.warning(f"GPT scoring file {storage} already exists, will reuse it in MMVet_eval. ")
else:
data = load(eval_file)
gpt_version = model
model_map = {
'gpt-4-turbo': 'gpt-4-1106-preview',
'gpt-4-0613': 'gpt-4-0613',
'chatgpt-1106': 'gpt-3.5-turbo-1106',
'chatgpt-0613': 'gpt-3.5-turbo-0613'
}
model_version = model_map[gpt_version]
if INTERNAL:
# We follow the original codebase to set max_tokens == 3
model = OpenAIWrapperInternal(model_version, verbose=verbose, max_tokens=3, retry=10)
else:
model = OpenAIWrapper(model_version, verbose=verbose, max_tokens=3, retry=10)
lt = len(data)
lines = [data.iloc[i] for i in range(lt)]
tups = [(model, line) for line in lines]
indices = [line['index'] for line in lines]
ans = {}
if osp.exists(tmp_file):
ans = load(tmp_file)
tups = [x for x, i in zip(tups, indices) if i not in ans]
indices = [i for i in indices if i not in ans]
if len(indices):
new_results = track_progress_rich(
MMVet_auxeval, tups, nproc=nproc, chunksize=nproc,
keys=indices, save=tmp_file)
ans = load(tmp_file)
for k, v in zip(indices, new_results):
assert k in ans
assert ans[k]['log'] == v['log'] and ans[k]['score'] == v['score']
log_map, score_map = {}, {}
all_inds = [line['index'] for line in lines]
for k in all_inds:
log_map[k] = ans[k]['log']
score_map[k] = ans[k]['score']
data['score'] = [score_map[idx] for idx in data['index']]
data['log'] = [log_map[idx] for idx in data['index']]
dump(data, storage)
score, score_fine = MMVet_acc(storage)
score_pth = storage.replace('.xlsx', '_score.csv')
score_fine_pth = storage.replace('.xlsx', '_score_fine.csv')
dump(score, score_pth)
dump(score_fine, score_fine_pth)
logger.info(f'MMVet_eval successfully finished evaluating {eval_file}, results saved in {score_pth} and {score_fine_pth}')
logger.info(f'Score: ')
logger.info(score)
def parse_args():
parser = argparse.ArgumentParser(description="Inference LLM Answers. ")
parser.add_argument("data", type=str, help="The question set for inference, in excel / tsv / json format. ")
parser.add_argument(
"--model",
type=str,
help="The LLM (GPT) used for inference. ",
default="gpt-4-turbo",
choices=['gpt-4-0613', 'gpt-4-turbo', 'chatgpt-1106', 'chatgpt-0613'])
parser.add_argument("--nproc", type=int, default=4)
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
MMVet_eval(eval_file=args.data, model=args.model, nproc=args.nproc, verbose=args.verbose)
| [
" | ",
"\n",
" <OR> ",
"Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. <AND> in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and <OR> means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score.\n\n Question | Ground truth | Prediction | Correctness\n --- | --- | --- | ---\n What is x in the equation? | -1 <AND> -5 | x = 3 | 0.0\n What is x in the equation? | -1 <AND> -5 | x = -1 | 0.5\n What is x in the equation? | -1 <AND> -5 | x = -5 | 0.5\n What is x in the equation? | -1 <AND> -5 | x = -5 or 5 | 0.5\n What is x in the equation? | -1 <AND> -5 | x = -1 or x = -5 | 1.0\n Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4\n Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0\n ",
" <AND> "
] |
2024-01-10 | open-compass/VLMEvalKit | vlmeval~evaluate~yes_or_no.py | from vlmeval.api import OpenAIWrapper, OpenAIWrapperInternal
from vlmeval.smp import *
from vlmeval.utils import track_progress_rich
INTERNAL = os.environ.get('INTERNAL', 0)
def MME_rating(data_file):
data = load(data_file)
stats = defaultdict(dict)
lt = len(data)
for i in range(lt):
item = data.iloc[i]
category = item['category']
image_path = item['image_path']
score = item['score']
if image_path not in stats[category]:
stats[category][image_path] = []
stats[category][image_path].append(score)
def acc(key, mode='normal'):
res = stats[key]
values = []
for val in res.values():
if mode == 'normal':
values.extend(val)
elif mode == 'plus':
values.append(val[0] * val[1])
return np.mean(values) * 100
scores = {}
for k in stats:
scores[k] = acc(k) + acc(k, 'plus')
super_cates = dict(
perception=['OCR', 'artwork', 'celebrity', 'color', 'count', 'existence', 'landmark', 'position', 'posters', 'scene'],
reasoning=['code_reasoning', 'commonsense_reasoning', 'numerical_calculation', 'text_translation']
)
ret = {}
for sc, cate_list in super_cates.items():
base = 0
for c in cate_list:
base += scores[c]
ret[sc] = base
ret.update(scores)
ret = d2df(ret)
return ret
def Hallusion_rating(data_file):
def calc_fAcc(data):
res = defaultdict(list)
lt = len(data)
for i in range(lt):
line = data.iloc[i]
res[f"{line['l2-category']}_{line['set_id']}_{line['figure_id']}"].append(line['score'])
return np.mean([np.all(x) for x in res.values()]) * 100
def calc_qAcc(data):
res = defaultdict(list)
lt = len(data)
for i in range(lt):
line = data.iloc[i]
res[f"{line['l2-category']}_{line['set_id']}_{line['question_id']}"].append(line['score'])
return np.mean([np.all(x) for x in res.values()]) * 100
def calc_aAcc(data):
return np.mean(data['score']) * 100
data = load(data_file)
data['set_id'] = [x.split('_')[3] for x in data['index']]
data['figure_id'] = [x.split('_')[4] for x in data['index']]
data['question_id'] = [x.split('_')[5] for x in data['index']]
res = dict(split=[], aAcc=[], fAcc=[], qAcc=[])
res['split'].append('Overall')
res['aAcc'].append(calc_aAcc(data))
res['fAcc'].append(calc_fAcc(data))
res['qAcc'].append(calc_qAcc(data))
if 'category' in data:
cates = list(set(data['category']))
for c in cates:
sub = data[data['category'] == c]
res['split'].append(c)
res['aAcc'].append(calc_aAcc(sub))
res['fAcc'].append(calc_fAcc(sub))
res['qAcc'].append(calc_qAcc(sub))
if 'l2-category' in data:
cates = list(set(data['l2-category']))
for c in cates:
sub = data[data['l2-category'] == c]
res['split'].append(c)
res['aAcc'].append(calc_aAcc(sub))
res['fAcc'].append(calc_fAcc(sub))
res['qAcc'].append(calc_qAcc(sub))
ret = pd.DataFrame(res)
return ret
def default_rating(data_file):
data = load(data_file)
res = {}
res['Overall'] = np.mean(data['score']) * 100
if 'category' in data:
cates = list(set(data['category']))
cates = [c for c in cates if not pd.isna(c)]
cates.sort()
for c in cates:
sub = data[data['category'] == c]
res[c] = np.mean(sub['score']) * 100
if 'l2-category' in data:
cates = list(set(data['l2-category']))
cates = [c for c in cates if not pd.isna(c)]
cates.sort()
for c in cates:
sub = data[data['l2-category'] == c]
res[c] = np.mean(sub['score']) * 100
ret = d2df(res)
return ret
def YOrN_match_prompt(line):
tmpl = (
"You are an AI assistant who will help me to match an answer with two options of a question. "
"The options are only Yes / No. "
"You are provided with a question and an answer, and you need to find which option (Yes / No) is most similar to the answer. "
"If the meaning of all options are significantly different from the answer, output Unknown. "\
"Your should output a single word among the following 3 choices: Yes, No, Unknown.\n"
"Example 1: \n"
"Question: Is the word in this image 'Hello'?\nAnswer: The word in this image is 'Hello'.\nYour output: Yes\n"
"Example 2: \n"
"Question: Is the word in this image 'Hello'?\nAnswer: The word in this image is not 'Hello'.\nYour output: No\n"
"Example 3: \n"
"Question: {}?\nAnswer: {}\nYour output: "
)
return tmpl.format(line['question'], line['prediction'])
def YOrN_Extraction(output):
s = output.lower()
if 'yes' in s and 'no' not in s:
return 'Yes'
if 'yes' not in s and 'no' in s:
return 'No'
return 'Unknown'
def YOrN_auxeval(model, line):
prompt = YOrN_match_prompt(line)
retry = 5
for i in range(retry):
output = model.generate(prompt, temperature=0.5 * i)
ans = YOrN_Extraction(output)
if ans != 'Unknown':
return ans
return 'Unknown'
def YOrN_eval(eval_file, model='chatgpt-0613', nproc=4, verbose=False, dataset=None):
logger = get_logger('Evaluation')
data = load(eval_file)
data['prediction'] = [str(x) for x in data['prediction']]
storage = eval_file.replace('.xlsx', '_auxmatch.xlsx')
tmp_file = eval_file.replace('.xlsx', '_tmp.pkl')
if not osp.exists(storage):
ans_map = {k: YOrN_Extraction(v) for k, v in zip(data['index'], data['prediction'])}
if osp.exists(tmp_file):
tmp = load(tmp_file)
for k in tmp:
if ans_map[k] == 'Unknown' and tmp[k] != 'Unknown':
ans_map[k] = tmp[k]
data['extracted'] = [ans_map[x] for x in data['index']]
unknown = data[data['extracted'] == 'Unknown']
assert model in 'chatgpt-0613'
model_name = 'gpt-3.5-turbo-0613'
if INTERNAL:
model = OpenAIWrapperInternal(model_name, verbose=verbose, retry=10)
else:
model = OpenAIWrapper(model_name, verbose=verbose, retry=10)
lt = len(unknown)
lines = [unknown.iloc[i] for i in range(lt)]
tups = [(model, line) for line in lines]
indices = list(unknown['index'])
if len(tups):
res = track_progress_rich(YOrN_auxeval, tups, nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file)
for k, v in zip(indices, res):
ans_map[k] = v
data['extracted'] = [ans_map[x] for x in data['index']]
dump(data, storage)
else:
logger.warning(f"GPT matching file {storage} already exists, will reuse it in YOrN_eval. ")
data = load(storage)
data["score"] = (data["answer"] == data["extracted"])
dump(data, storage)
if dataset is not None and listinstr(['MME'], dataset):
score = MME_rating(storage)
elif dataset is not None and listinstr(['Hallusion'], dataset):
score = Hallusion_rating(storage)
else:
score = default_rating(storage)
score_tgt = eval_file.replace('.xlsx', '_score.csv')
dump(score, score_tgt)
logger.info(f'YOrN_eval successfully finished evaluating {eval_file}, results saved in {score_tgt}')
logger.info('Score: ')
logger.info(score)
return score
def parse_args():
parser = argparse.ArgumentParser(description="Inference LLM Answers. ")
parser.add_argument("data", type=str, help="The question set for inference, in excel / tsv / json format. ")
parser.add_argument("--model", type=str, help="The LLM (GPT) used for inference. ", default="chatgpt-0613", choices=['chatgpt-0613'])
parser.add_argument("--nproc", type=int, default=4)
parser.add_argument("--dataset", type=str, default=None)
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
acc = YOrN_eval(eval_file=args.data, model=args.model, nproc=args.nproc, verbose=args.verbose, dataset=args.dataset)
| [] |
2024-01-10 | open-compass/VLMEvalKit | vlmeval~evaluate~mathvista_eval.py | from vlmeval.api import OpenAIWrapper, OpenAIWrapperInternal
from vlmeval.smp import *
from vlmeval.utils import track_progress_rich
from vlmeval.utils.matching_util import can_infer
INTERNAL = os.environ.get('INTERNAL', 0)
def get_gpt4_ICE():
example_1 = """
Hint: Please answer the question requiring an integer answer and provide the final value, e.g., 1, 2, 3, at the end.\n
Question: Which number is missing?\n
Model response: The number missing in the sequence is 14.\n
Extracted answer: 14
"""
example_2 = """
Hint: Please answer the question requiring a floating-point number with one decimal place and provide the final value, e.g., 1.2, 1.3, 1.4, at the end.\n
Question: What is the fraction of females facing the camera?\n
Model response: The fraction of females facing the camera is 0.6, which means that six out of ten females in the group are facing the camera.\n
Extracted answer: 0.6
"""
example_3 = """
Hint: Please answer the question requiring a floating-point number with two decimal places and provide the final value, e.g., 1.23, 1.34, 1.45, at the end.\n
Question: How much money does Luca need to buy a sour apple candy and a butter-scotch candy? (Unit: $)\n
Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.\n
Extracted answer: 1.45
"""
example_4 = """
Hint: Please answer the question requiring a Python list as an answer and provide the final list, e.g., [1, 2, 3], [1.2, 1.3, 1.4], at the end.\n
Question: Between which two years does the line graph saw its maximum peak?\n
Model response: The line graph saw its maximum peak between 2007 and 2008.\n
Extracted answer: [2007, 2008]
"""
example_5 = """
Hint: Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.\n
Question: What fraction of the shape is blue?\n
Choices: (A) 3/11 (B) 8/11 (C) 6/11 (D) 3/5\n
Model response: The correct answer is (B) 8/11.\n
Extracted answer: B
"""
return [example_1,example_2,example_3,example_4,example_5]
def build_mathvista_gpt4_prompt(line):
task_description = """ Please read the following example. Then extract the answer from the model response and type it at the end of the prompt.\n"""
question = line['question']
prediction = str(line['prediction'])
prompt = task_description
examples = get_gpt4_ICE()
for example in examples:
prompt += example + '\n'
prompt += question + '\n'
prompt += 'Model respone: ' + prediction
prompt += 'Extracted answer:'
return prompt
def list_to_dict(lst):
return {chr(65 + i): val for i, val in enumerate(lst)}
def post_check(line, prefetch=False):
res = None
ans = line['answer']
response = line['prediction'] if prefetch else line['res']
try:
if line['question_type'] == 'multi_choice':
ans = line['answer_option']
choices = list_to_dict(eval(line['choices']))
res = can_infer(response, choices)
if prefetch:
return res
else:
if line['answer_type'] == 'integer':
res = int(response)
ans = int(line['answer'])
elif line['answer_type'] == 'float':
res = float(response)
ans = float(line['answer'])
else:
res = str(res)
ans = str(ans)
except ValueError:
pass
if res == ans:
return res
else:
return False
def MathVista_auxeval(model, line):
prompt = build_mathvista_gpt4_prompt(line)
log = ''
retry = 5
if post_check(line, prefetch=True):
res = post_check(line, prefetch=True)
return dict(log='Prefetch succeed', res=res)
for i in range(retry):
prediction = line['prediction']
res = model.generate(prompt, temperature=i * 0.5)
if res is None:
log += f'Try {i}: output is {prediction}, failed to parse.\n'
else:
log += 'Succeed'
return dict(log=log, res= res)
log += 'All 5 retries failed.\n'
return dict(log=log, res='')
def MathVista_acc(result_file):
data = load(result_file)
tot = defaultdict(lambda: 0)
fetch = defaultdict(lambda: 0)
hit = defaultdict(lambda: 0)
lt = len(data)
skill_list = []
for i in range(lt):
item = data.iloc[i]
index = item['index']
cate = item['task']
tot['Overall'] += 1
try:
skills = eval(item['skills'])
except SyntaxError:
skills = [item['skills']]
for skill in skills:
if skill not in skill_list:
skill_list.append(skill)
tot[skill] += 1
tot[cate] += 1
if item['log'] == 'Prefetch succeed':
fetch['Overall'] += 1
fetch[cate] += 1
for skill in skills:
fetch[skill] += 1
if post_check(item, prefetch=False):
hit['Overall'] += 1
hit[cate] += 1
for skill in skills:
hit[skill] += 1
res = defaultdict(list)
for k in tot.keys():
res['Task&Skill'].append(k)
res['tot'].append(tot[k])
res['prefetch'].append(fetch[k])
res['hit'].append(hit[k])
res['prefetch_rate'].append(fetch[k] / tot[k] * 100)
res['acc'].append(hit[k] / tot[k] * 100)
res = pd.DataFrame(res)
return res
def MathVista_eval(eval_file, model='gpt-4-turbo', nproc=4, verbose=False):
logger = get_logger('Evaluation')
suffix = eval_file.split('.')[-1]
storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx')
tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl')
if osp.exists(storage):
logger.warning(f"GPT scoring file {storage} already exists, will reuse it in MathVista_eval. ")
else:
data = load(eval_file)
gpt_version = model
model_map = {
'gpt-4-turbo': 'gpt-4-1106-preview',
'gpt-4-0613': 'gpt-4-0613',
'chatgpt-1106': 'gpt-3.5-turbo-1106',
'chatgpt-0613': 'gpt-3.5-turbo-0613'
}
model_version = model_map[gpt_version]
if INTERNAL:
# We follow the original codebase to set max_tokens == 3
model = OpenAIWrapperInternal(model_version, verbose=verbose, max_tokens=128, retry=10)
else:
model = OpenAIWrapper(model_version, verbose=verbose, max_tokens=128, retry=10)
lt = len(data)
lines = [data.iloc[i] for i in range(lt)]
tups = [(model, line) for line in lines]
indices = [line['index'] for line in lines]
ans = {}
if osp.exists(tmp_file):
ans = load(tmp_file)
tups = [x for x, i in zip(tups, indices) if i not in ans]
indices = [i for i in indices if i not in ans]
if len(indices):
new_results = track_progress_rich(
MathVista_auxeval, tups, nproc=nproc, chunksize=nproc,
keys=indices, save=tmp_file)
ans = load(tmp_file)
for k, v in zip(indices, new_results):
assert k in ans
assert ans[k]['log'] == v['log'] and ans[k]['res'] == v['res']
log_map, res_map = {}, {}
all_inds = [line['index'] for line in lines]
for k in all_inds:
log_map[k] = ans[k]['log']
res_map[k] = ans[k]['res']
data['res'] = [res_map[idx] for idx in data['index']]
data['log'] = [log_map[idx] for idx in data['index']]
dump(data, storage)
score = MathVista_acc(storage)
score_pth = storage.replace('.xlsx','_score.csv')
dump(score,score_pth)
logger.info(f'MathVista_eval successfully finished evaluating {eval_file}, results saved in {score_pth}')
logger.info(f'Score: ')
logger.info(score)
def parse_args():
parser = argparse.ArgumentParser(description="Inference LLM Answers. ")
parser.add_argument("data", type=str, help="The question set for inference, in excel / tsv / json format. ")
parser.add_argument(
"--model",
type=str,
help="The LLM (GPT) used for inference. ",
default="gpt-4-turbo",
choices=['gpt-4-0613', 'gpt-4-turbo', 'chatgpt-1106', 'chatgpt-0613'])
parser.add_argument("--nproc", type=int, default=4)
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
MathVista_eval(eval_file=args.data, model=args.model, nproc=args.nproc, verbose=args.verbose)
| [
"PLACEHOLDER\n",
"Extracted answer:",
"Model respone: PLACEHOLDER"
] |
2024-01-10 | open-compass/VLMEvalKit | vlmeval~evaluate~multiple_choice.py | import os.path as osp
import pandas as pd
from tqdm import tqdm
from vlmeval.api import OpenAIWrapper, OpenAIWrapperInternal
from vlmeval.utils import can_infer, track_progress_rich, TSVDataset
from vlmeval.smp import *
import numpy as np
INTERNAL = os.environ.get('INTERNAL', 0)
abbrs = {
'coarse_perception': 'CP',
'finegrained_perception (instance-level)': 'FP-S',
'finegrained_perception (cross-instance)': 'FP-C',
'logic_reasoning': 'LR',
'relation_reasoning': 'RR',
'attribute_reasoning': 'AR'
}
def MMMU_preproc(data):
logger = get_logger('Evaluation')
cnt = 0
As, Bs, Ans = list(data['A']), list(data['B']), list(data['answer'])
lt = len(data)
for i in range(lt):
if pd.isna(As[i]):
As[i] = Ans[i]
Bs[i] = 'Other Answers'
cnt += 1
logger.info(f'During MMMU_preproc in Evaluation, {cnt} open questions are re-formulated to multi-choice ones. ')
data['A'] = As
data['B'] = Bs
return data
def report_acc(df):
# assert group in [None, 'category', 'l2-category']
res = defaultdict(list)
if 'split' in df:
splits = list(set(df['split']))
res['split'] = splits
else:
df['split'] = ['dev'] * len(df)
res['split'] = ['dev']
for group in [None, 'l2-category', 'category']:
if group is None:
res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
elif group not in df:
continue
else:
abilities = list(set(df[group]))
abilities.sort()
for ab in abilities:
ab_name = abbrs[ab] if ab in abbrs else ab
sub_df = df[df[group] == ab]
res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
return pd.DataFrame(res)
def extract_options(item):
options = []
for c in list(string.ascii_uppercase):
if c in item and not pd.isna(item[c]):
options.append(item[c])
else:
return options
return options
def build_prompt(question, options, prediction):
tmpl = (
"You are an AI assistant who will help me to match an answer with several options of a single-choice question. "
"You are provided with a question, several options, and an answer, and you need to find which option is most similar to the answer. "
"If the meaning of all options are significantly different from the answer, output E. "\
"Your should output a single uppercase character in A, B, C, D (if they are valid options), and E. \n"
"Example 1: \n"
"Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\nAnswer: a cute teddy bear\nYour output: A\n"
"Example 2: \n"
"Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\nAnswer: Spider\nYour output: E\n"
"Example 3: \n"
"Question: {}?\nOptions: {}\nAnswer: {}\nYour output: "
)
return tmpl.format(question, options, prediction)
def build_prompt_cn(question, options, prediction):
tmpl = (
"你是一个帮助我匹配答案与单选题中多个选项的 AI 助手。"
"你会被提供:一个问题,多个选项,一个答案。你的任务是找到与答案意义最相近的选项。"
"如果所有选项的意义都与答案显著不同,则输出 E。"
"你应该输出一个单个的大写字母,例如 A, B, C, D(如果它们是有效选项),或 E。"
"例 1:"
"问题: 图中最主要的物体是什么?\n选项: A. 泰迪熊 B. 兔子 C. 猫 D. 狗\n答案: 一只可爱的泰迪熊\n输出: A\n"
"例 2: \n"
"问题: 图中最主要的物体是什么?\n选项: A. 泰迪熊 B. 兔子 C. 猫 D. 狗\n答案: 蜘蛛\n输出: E\n"
"例 3: \n"
"问题: {}?\n选项: {}\n答案: {}\n输出: "
)
return tmpl.format(question, options, prediction)
def build_choices(item):
ret = {}
for ch in string.ascii_uppercase:
if ch in item and (not pd.isna(item[ch])):
ret[ch] = item[ch]
return ret
def prefetch_answer(item):
choices = build_choices(item)
return can_infer(item['prediction'], choices)
def extract_answer_from_item(model, item):
logger = get_logger('Evaluation')
# It will return: (pred, raw, llm_time)
options = extract_options(item)
option_str = build_options(options)
if cn_string(item['question']):
prompt = build_prompt_cn(item['question'], option_str, item['prediction'])
else:
prompt = build_prompt(item['question'], option_str, item['prediction'])
retry = 3
choices = build_choices(item)
ret = can_infer(item['prediction'], choices)
if ret:
return dict(opt=ret, log=item['prediction'])
while retry:
ans = model.generate(prompt)
if 'Failed to obtain answer via API' in ans:
msg = 'GPT API failed to answer. '
logger.warning(msg)
retry -= 1
else:
ret = can_infer(ans, choices)
if ret:
return dict(opt=ret, log=ans)
else:
logger.warning(f'GPT output includes 0 or more than 1 letter in uppercase letters: {ans}')
retry -= 1
if retry == 0:
num_options = sum([ch in item for ch in string.ascii_uppercase])
if num_options >= 2:
chars = string.ascii_uppercase[:num_options]
chars = chars + 'Z'
num_options += 1
tmp = rd.randint(0, num_options - 1)
return dict(opt=chars[tmp], log='Failed to predict, thus randomly generate one. ')
def prefetch_sub_data(sub_data, answer_map, verbose=False):
lt = len(sub_data)
GT, PRED = [], []
for i in range(lt):
item = sub_data.iloc[i]
idx = item['index']
GT.append(answer_map[idx])
PRED.append(prefetch_answer(item))
if PRED[-1] and (GT[-1] != PRED[-1]):
log = f"Failed in Prefetching Rolling {i}: Answer is {GT[-1]}, Prediction is {item['prediction']}, Pre-fetched is {PRED[-1]}. "
return dict(hit=0, log=log)
flag = True
for g, p in zip(GT, PRED):
if g != p:
flag = False
ret = (dict(hit=1, log="Succeed During Pre-fetching"), ) if flag else (None, )
ret = ret + (GT, PRED) if verbose else ret
return ret if len(ret) > 1 else ret[0]
def eval_sub_data(model, sub_data, answer_map):
res, GT, PRED = prefetch_sub_data(sub_data, answer_map, verbose=True)
if res is not None:
return res
lt = len(sub_data)
log = ''
for i in range(lt):
if PRED[i]:
log += f'Rolling {i} Matched.\n'
else:
res = extract_answer_from_item(model, sub_data.iloc[i])
opt, match_log = res['opt'], res['log']
PRED[i] = opt
if PRED[i] != GT[i]:
log += f"Failed in Rolling {i}: Answer is {GT[i]}; Prediction is {sub_data.iloc[i]['prediction']}; Pre-fetched is {PRED[i]}; Match Log is {match_log}.\n"
return dict(hit=0, log=log)
else:
log += f"Rolling {i}: Answer is {GT[i]}, Prediction is {sub_data.iloc[i]['prediction']}, Pre-fetched is {PRED[i]}.\n"
return dict(hit=1, log=log)
def eval_data_groups(model, data_groups, answer_map, result, result_file, nproc=16):
prefetched = [prefetch_sub_data(g, answer_map) for g in data_groups]
remain = []
for dg, pf in zip(data_groups, prefetched):
if pf:
result[dg.iloc[0]['index'] % 1e6] = pf
else:
remain.append(dg)
dump(result, result_file)
tups = [(model, x, answer_map) for x in remain]
keys = [x.iloc[0]['index'] % 1e6 for x in remain]
if len(tups) == 0:
return
res = track_progress_rich(
eval_sub_data,
tups,
nproc=nproc,
chunksize=nproc,
save=result_file,
keys=keys)
result = load(result_file)
for k, v in zip(keys, res):
if k in result:
assert result[k]['hit'] == v['hit'] and result[k]['log'] == v['log']
else:
result[k] = v
dump(result, result_file)
def multiple_choice_eval(eval_file, dataset=None, model='chatgpt-0613', nproc=4, verbose=False):
logger = get_logger('Evaluation')
assert dataset is not None
if dataset == 'MMBench_TEST_CN':
dataset = 'MMBench_CN'
elif dataset == 'MMBench_TEST_EN':
dataset = 'MMBench'
if listinstr(['mmbench', 'ccbench'], dataset.lower()):
data = load(eval_file)
data['index'] = [int(x) for x in data['index']]
dump(data, eval_file)
rd.seed(2680)
suffix = eval_file.split('.')[-1]
assert model in ['chatgpt-0613', "exact_matching"]
name_str = 'openai' if model == 'chatgpt-0613' else model
if model == 'exact_matching':
model = None
else:
model_name = 'gpt-3.5-turbo-0613'
if INTERNAL:
model = OpenAIWrapperInternal(model_name, verbose=verbose, retry=10)
else:
model = OpenAIWrapper(model_name, verbose=verbose, retry=10)
logger.info(f'Evaluating {eval_file}')
result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
result = {}
if osp.exists(result_file):
result = load(result_file)
data = load(eval_file)
data = data.sort_values(by='index')
data['prediction'] = [str(x) for x in data['prediction']]
for k in data.keys():
data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
meta = TSVDataset(dataset).data
cate_map = {i: c for i, c in zip(meta['index'], meta['category'])}
answer_map = {i: c for i, c in zip(meta['index'], meta['answer'])}
l2_cate_map = {i: c for i, c in zip(meta['index'], meta['l2-category'])} if 'l2-category' in meta else None
split_map = {i: c for i, c in zip(meta['index'], meta['split'])} if 'split' in meta else None
if listinstr(['MMMU'], dataset):
data = MMMU_preproc(data)
answer_map = {k: (v if v in list(string.ascii_uppercase) else 'A') for k, v in answer_map.items()}
data = data[data['index'].isin(answer_map)]
data_main = data[data['index'] < int(1e6)]
meta_idx_set = set(meta['index'])
data_main = data_main[data_main['index'].isin(meta_idx_set)]
lt = len(data_main)
hit, tot = 0, 0
data_groups = []
for i in tqdm(range(lt)):
# Dealing with the normal part
item_main = data_main.iloc[i]
idx = item_main['index']
if idx in result:
correct = result[idx]['hit']
assert correct in [0, 1]
hit += correct
tot += 1
continue
sub_data = data[data['index'] % int(1e6) == idx]
data_groups.append(sub_data)
if len(data_groups):
if model is not None:
eval_data_groups(
model=model,
data_groups=data_groups,
answer_map=answer_map,
nproc=nproc,
result=result,
result_file=result_file)
else:
logger.warning("Exact Matching mode, will not do GPT-based answer matching. ")
keys = [x.iloc[0]['index'] % 1e6 for x in data_groups]
for k in keys:
result[k] = dict(hit=0, log="Failed in Prefetch, no GPT-based answer matching under `exact_matching` policy.")
dump(result, result_file)
tmp_pth = f'/tmp/{timestr()}.xlsx'
dump(data_main, tmp_pth)
data_main = load(tmp_pth)
res = load(result_file)
indices = data_main['index']
data_main['hit'] = [res[i]['hit'] for i in indices]
data_main['log'] = [res[i]['log'] for i in indices]
main_idx = data_main['index']
data_main['category'] = [cate_map[i] for i in main_idx]
if l2_cate_map is not None:
data_main['l2-category'] = [l2_cate_map[i] for i in main_idx]
if split_map is not None:
data_main['split'] = [split_map[i] for i in indices]
# load split
dump(data_main, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
data_main = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
acc = report_acc(data_main)
score_file = eval_file.replace(f'.{suffix}', f'_acc.csv')
dump(acc, score_file)
logger.info(f'multiple_choice_eval successfully finished evaluating {eval_file}, results saved in {score_file}')
logger.info(f'Score: ')
logger.info(acc)
return acc
def parse_args():
parser = argparse.ArgumentParser(description="Inference LLM Answers. ")
parser.add_argument("data", type=str, help="The question set for inference, in excel / tsv / json format. ")
parser.add_argument("--model", type=str, help="The LLM (GPT) used for inference. ", default='chatgpt-0613', choices=['chatgpt-0613', 'exact_matching'])
parser.add_argument(
"--dataset",
type=str,
default='MMBench',
help='The dataset to evaluate',
choices=['MMBench', 'MMBench_CN', 'MMBench_DEV_EN', 'MMBench_DEV_CN', 'SEEDBench_IMG', 'CCBench', 'MMBench_TEST_CN', 'MMBench_TEST_EN'])
parser.add_argument("--nproc", type=int, default=6)
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
acc = multiple_choice_eval(eval_file=args.data, model=args.model, dataset=args.dataset, nproc=args.nproc, verbose=args.verbose)
| [
"prediction",
"question"
] |
2024-01-10 | VB1-VENOM/Capstone | blooms~llms.py | import replicate
import requests
def llama(prompt):
output = replicate.run(
"meta/llama-2-7b-chat:13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0",
input={"prompt": prompt,"max_new_tokens":1000}
)
# The meta/llama-2-7b-chat model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
o = ""
for item in output:
## https://replicate.com/meta/llama-2-7b-chat/versions/13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0/api#output-schema
o+=item
return o
def mistral(prompt):
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
headers = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
output = query({
"inputs": prompt,"parameters" : {"max_new_tokens": 256}
})
return output[0]['generated_text']
def mistralV2(prompt):
try:
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
headers = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
output = query({
"inputs": prompt,"parameters" : {"max_new_tokens": 256}
})
return output[0]['generated_text']
except:
return output
import openai
def chatgpt(prompt, max_tokens=1000, temperature=0.7):
api_key = "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" # Replace with your OpenAI API key
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-002", # Use the appropriate engine for ChatGPT
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature
)
return response.choices[0].text
def obs(prompt):
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
headers = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
output = query({
"inputs": prompt,"parameters" : {"max_new_tokens": 256}
})
return output[0]['generated_text']
# print(mistralV2("What is reinforcement Learning?"))
# def openAi(prompt):
#print(llama("Explain the working of photosynthesis briefly"))
| [] |
2024-01-10 | lupantech/PromptPG | run_gpt3~run_gpt3_step.py | import os
import re
import json
import argparse
import random
from base_prompt_step import *
from utilities import extract_prediction
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# print(openai.api_key)
def load_data(args):
problems_test = json.load(open(os.path.join(args.data_root, f'problems_{args.test_split}.json')))
problems_train = json.load(open(os.path.join(args.data_root, f'problems_train.json')))
problems = {**problems_test, **problems_train}
# test problem ids
pids_test = list(problems_test.keys())
pids_test = pids_test[:args.test_number] if args.test_number > 0 else pids_test
print(f"number of test problems: {len(pids_test)}\n")
# pick up shot examples from the training set
shot_pids = args.shot_pids
train_pids = list(problems_train.keys())
if shot_pids == None:
assert args.shot_number >= 0 and args.shot_number <= 32
shot_pids = random.sample(train_pids, args.shot_number) # random sample
else:
shot_pids = [str(pid) for pid in shot_pids]
for pid in shot_pids:
assert pid in train_pids # check shot_pids
print("training question ids for prompting: ", shot_pids, "\n")
return problems, pids_test, shot_pids
def get_gpt3_output(prompt, args):
response = openai.Completion.create(engine=args.engine,
prompt=prompt,
temperature=args.temperature,
max_tokens=args.max_tokens,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
stop=["\n"])
output = response["choices"][0]["text"].strip()
return output
def normalize_answer(text, unit):
# ["1,000", "123", "3/4", "56.456", "$56.4", "-3", "-10.02", "-3/2"]
text = re.sub("^[\$]", "", text)
text = re.sub("[\,\.\,\/]$", "", text)
result = re.match("^[-+]?[\d,./]+$", text)
if result is not None:
# is number?
text = text.replace(",", "")
result = re.match("[-+]?\d+$", text)
if result is not None:
number = int(text)
elif "/" in text:
nums = text.split("/")
number = round(float(nums[0]) / float(nums[1]), 3)
else:
number = round(float(text), 3)
number = str(number)
number = re.sub(r"\.[0]+$", "", number)
return number
else:
# is text
if unit:
text = text.replace(unit, "").strip()
return text
def get_result_file(args):
result_path = f"{args.output_root}/{args.model}"
os.makedirs(result_path, exist_ok=True)
result_file = "{}/{}_{}_{}_{}_seed_{}.json".format(result_path, args.label, args.test_split, args.prompt_format,
args.shot_number, args.seed)
return result_file
def save_results(result_file, acc, correct, count, shot_pids, args, results):
data = {}
data['acc'] = acc
data['correct'] = correct
data['count'] = count
data['shot_pids'] = shot_pids
data['args'] = vars(args)
data['results'] = results
with open(result_file, 'w') as f:
json.dump(data, f, indent=2, separators=(',', ': '))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='../data/tabmwp')
parser.add_argument('--output_root', type=str, default='../results')
parser.add_argument('--model', type=str, default='gpt3')
parser.add_argument('--option_inds', type=list, default=["A", "B", "C", "D", "E", "F"])
# user options
parser.add_argument('--label', type=str, default='exp0')
parser.add_argument('--test_split', type=str, default='val', choices=['dev', 'dev1k', 'test', 'test1k'])
parser.add_argument('--test_number', type=int, default=10, help='GPT-3 is expensive. -1 for whole val/test set')
parser.add_argument('--save_every', type=int, default=10, help='Save the result with every n examples.')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--prompt_format', type=str, default='TQ', help='prompt format template')
parser.add_argument('--shot_number', type=int, default=2, help='Number of n-shot training examples.')
parser.add_argument('--shot_pids', type=int, nargs='+', default=None, help='Question indexes of shot examples')
parser.add_argument('--seed', type=int, default=1, help='random seed')
# GPT-3 settings
parser.add_argument('--engine', type=str, default='text-davinci-002', choices=['text-davinci-002', 'ada'])
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--max_tokens',
type=int,
default=512,
help='The maximum number of tokens allowed for the generated answer.')
parser.add_argument('--top_p', type=float, default=1.0)
parser.add_argument('--frequency_penalty', type=float, default=0.0)
parser.add_argument('--presence_penalty', type=float, default=0.0)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
random.seed(args.seed)
problems, pids, shot_pids = load_data(args) # probelms, test question ids, shot example ids
result_file = get_result_file(args)
# load the check point
if os.path.exists(result_file):
print("# The result file exists! We will load the check point!!!")
check_point = json.load(open(result_file))
results = check_point['results']
else:
results = {}
total = len(pids)
check_count = len(results) # number of existing results
correct = 0 # number of correct results
# for pid in tqdm(pids):
for i, pid in enumerate(pids):
count = i + 1 # number of current results
problem = problems[pid]
answer = problems[pid]['answer']
options = problems[pid]['choices']
unit = problems[pid]['unit']
prompt = build_prompt(problems, shot_pids, pid, args) # generate the prompt input
prompt = prompt + "\nAnswer: Let's think step by step."
if pid in results:
output = results[pid]["output"]
steps = results[pid]["steps"]
else:
steps = get_gpt3_output(prompt, args) # generate the output by GPT-3
if steps.strip() != "":
prompt = "\n".join([prompt, "Therefore, the answer is"])
else:
prompt = "\n".join([prompt, steps, "Therefore, the answer is"])
output = get_gpt3_output(prompt, args) # generate the output by GPT-3
# the core prediction in the output
prediction = extract_prediction(output, options, args.option_inds)
# normalize the number in the text
answer_norm = normalize_answer(answer, unit)
prediction_norm = normalize_answer(prediction, unit)
# save the results
results[pid] = {}
results[pid]["answer"] = answer
results[pid]["answer_norm"] = answer_norm
results[pid]["steps"] = steps
results[pid]["output"] = output
results[pid]["prediction"] = prediction
results[pid]["prediction_norm"] = prediction_norm
# correct or not
if answer_norm.lower() == prediction_norm.lower():
correct += 1
results[pid]["true_false"] = True
else:
results[pid]["true_false"] = False
acc = correct / (i + 1) * 100
if args.debug or i < 3:
print("\n##################################")
print("# [Steps]", steps, "\n")
print("# [Prompt]", prompt, "\n")
print("# [Output]", output, "\n")
print("[A] labeled answer (normalized):\t", answer_norm)
print("[P] predicted answer (normalized):\t", prediction_norm)
print("[Acc]:\t", results[pid]["true_false"])
print("")
print("[A] labeled answer:\t", answer)
print("[P] predicted answer:\t", prediction)
print("[P] generated output:\t", output)
if count % args.save_every == 0 or count == total:
if count > check_count:
# have new outputs
save_results(result_file, acc, correct, count, shot_pids, args, results)
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%, saved to {result_file}")
else:
# no new outputs, just print the accuracy
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%")
| [
"\n",
"PLACEHOLDER\nAnswer: Let's think step by step.",
"Therefore, the answer is"
] |
2024-01-10 | lupantech/PromptPG | run_gpt3_rl~learn_policy.py | import os
import sys
import math
import json
import argparse
import random
import time
import torch
import openai
import numpy as np
import torch.nn.functional as F
from functools import lru_cache
from tools import utils
from base_prompt import *
from model import *
from utilities import extract_prediction, normalize_answer
sys.path.append("../")
openai.api_key = os.getenv("OPENAI_API_KEY")
def load_data(args):
problems = json.load(open(os.path.join(args.data_root, f'problems_train.json')))
problems = json.load(open(os.path.join(args.data_root, f'problems_train.json')))
pids = list(problems.keys())
samples = random.sample(pids, args.train_number + args.cand_number) # random sample
train_pids = samples[:args.train_number]
cand_pids = samples[args.train_number:]
return problems, cand_pids, train_pids
def get_gpt3_output(prompt, args):
return call_gpt3(args.engine, prompt, args.temperature, args.max_tokens, args.top_p, args.frequency_penalty,
args.presence_penalty)
@lru_cache(maxsize=10000)
def call_gpt3(engine, prompt, temperature, max_tokens, top_p, frequency_penalty, presence_penalty):
patience = 100
while True:
try:
response = openai.Completion.create(engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=["\n"])
output = response["choices"][0]["text"].strip()
break
except Exception as e:
patience -= 1
if not patience:
print("!!! running out of patience waiting for OpenAI")
else:
time.sleep(0.1)
return output
def get_batch_reward_loss(scores, cand_pids, pid_batch, option_batch, unit_batch, label_batch, args):
batch_loss = 0
batch_reward = 0
## loop over the training examples
for i in range(len(scores)):
# interact with the environment to get rewards, which in our case is to feed the prompt into GPT-3 and evaluate the prediction
cand_prob = scores[i, :].clone().detach()
cand_prob = cand_prob.cpu().numpy()
cand_prob = np.nan_to_num(cand_prob, nan=0.000001) # replace np.nan with 0
cand_prob /= cand_prob.sum() # make probabilities sum to 1
# print(f"cand_prob: {cand_prob}")
# sample shot_pids from the cand_prob distribution
cids = np.random.choice(range(len(cand_pids)), args.shot_number, p=cand_prob, replace=False)
# reverse shot_pids so more relevant prompt will be put closer to the question
cids = cids[::-1]
# print(f"cids: {cids}")
shot_pids = [cand_pids[cid] for cid in cids]
# print(f"shot_pids: {shot_pids}")
# generate the prompt input
prompt = build_prompt(problems, shot_pids, pid_batch[i], args)
# get the output from GPT-3
output = get_gpt3_output(prompt, args)
# extract the prediction from the output
prediction = extract_prediction(output, option_batch[i], args.option_inds)
# normalize the number in the text
prediction_norm = normalize_answer(prediction, unit_batch[i])
log_prob = 0
for cid in cids:
log_prob += torch.log(scores[i, cid])
# print(f"log_prob: {log_prob}")
if prediction_norm.lower() == label_batch[i].lower():
_reward = 1
else:
_reward = -1
# print(f"reward: {reward}")
batch_reward += _reward
batch_loss -= _reward * log_prob
return cids, batch_reward, batch_loss
def policy_gradient_train(policy_model, problems, train_pids, cand_pids, cand_examples, args):
# REINFORCE
# if os.path.exists(args.ckpt_path):
# print("!!! Model dir already exists. Consider load it instead of training again.")
optimizer = torch.optim.Adam(policy_model.parameters(), lr=args.lr)
train_samples, train_labels, units, options = [], [], [], []
for pid in train_pids:
train_samples.append(create_example_from_pid(
pid, problems, args, test=True)) # Set test=True to avoid answer being added to the training input.
answer_norm = normalize_answer(problems[pid]['answer'], problems[pid]['unit'])
train_labels.append(answer_norm)
units.append(problems[pid]['unit'])
options.append(problems[pid]['choices'])
num_batch = math.ceil(len(train_samples) / args.batch_size)
reward_history = []
loss_history = []
total_reward_history = [] # epoch based
total_loss_history = [] # epoch based
STOP_FLAG = False
for epoch in range(args.epochs):
logger.write(f"Epoch: {epoch}")
total_train_reward = 0
total_train_loss = 0
# We can simply set the batch_size to len(train_data) in few-shot setting.
for batch_i in range(num_batch):
logger.write(f"Batch: {batch_i}")
train_batch = train_samples[batch_i * args.batch_size:(batch_i + 1) * args.batch_size]
label_batch = train_labels[batch_i * args.batch_size:(batch_i + 1) * args.batch_size]
pid_batch = train_pids[batch_i * args.batch_size:(batch_i + 1) * args.batch_size]
unit_batch = units[batch_i * args.batch_size:(batch_i + 1) * args.batch_size]
option_batch = options[batch_i * args.batch_size:(batch_i + 1) * args.batch_size]
# We need to encode cands again every time we update the network
embedding_cands = policy_model(cand_examples) # len(cand_examples) x embedding_size
embedding_ctxt = policy_model(train_batch) # len(train_batch) x embedding_size
scores = torch.mm(embedding_ctxt, embedding_cands.t()) # len(train_batch) x len(cand_examples)
# print(f"unnormed scores: {scores}")
scores = F.softmax(scores, dim=1) # len(train_batch) x len(cand_examples)
cids, reward, loss = get_batch_reward_loss(scores, cand_pids, pid_batch, option_batch, unit_batch,
label_batch, args)
logger.write(f"cids for sample[-1] in batch: {cids}")
logger.write(f"Cand prob for sample[-1] in batch: {[round(x,5) for x in scores[-1, :].tolist()]}")
logger.write(f"### reward for the batch: {reward}")
logger.write(f"### loss for the batch: {loss}\n")
# linear layer has Weight and bias
# prev_param = list(policy_model.linear.parameters())[0].clone()
# print(f"prev_param: {prev_param.data}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
# for each iteration/batch
total_train_reward += reward
total_train_loss += loss.item()
reward_history.append(reward)
loss_history.append(loss.item())
if np.isnan(loss.item()):
STOP_FLAG = True
break
# for each epoch
total_reward_history.append(total_train_reward)
total_loss_history.append(total_train_loss)
best_reward = max(total_reward_history)
best_loss = min(total_loss_history)
best_reward_epoch = total_reward_history.index(best_reward)
best_loss_epoch = total_loss_history.index(best_loss)
logger.write("============================================")
logger.write(f"### Epoch: {epoch} / {args.epochs}")
logger.write(f"### Total reward: {total_train_reward}, " + f"Total loss: {round(total_train_loss,5)}, " +
f"Best reward: {best_reward} at epoch {best_reward_epoch}, " +
f"Best loss: {round(best_loss, 5)} at epoch {best_loss_epoch}\n")
# save every epoch
ckpt_file = os.path.join(args.ckpt_path, f"ckpt_{epoch}.pt")
torch.save(policy_model.linear.state_dict(), ckpt_file)
logger.write(f"saved the ckpt to {ckpt_file}")
# save best epoch
if epoch == best_reward_epoch:
ckpt_file = os.path.join(args.ckpt_path, "ckpt_best_reward.pt")
torch.save(policy_model.linear.state_dict(), ckpt_file)
logger.write(f"saved the best reward ckpt to {ckpt_file}")
if epoch == best_loss_epoch:
ckpt_file = os.path.join(args.ckpt_path, "ckpt_best_loss.pt")
torch.save(policy_model.linear.state_dict(), ckpt_file)
logger.write(f"saved the best loss ckpt to {ckpt_file}")
# save reward and loss history
history = {
"reward_history": reward_history,
"loss_history": loss_history,
"total_reward_history": total_reward_history,
"total_loss_history": total_loss_history,
}
history_file = os.path.join(args.ckpt_path, "history.json")
with open(history_file, 'w') as f:
json.dump(history, f, indent=2, separators=(',', ': '))
# print cache info
logger.write(call_gpt3.cache_info())
logger.write("============================================\n")
if STOP_FLAG:
break
# save in the end
ckpt_file = os.path.join(args.ckpt_path, "ckpt_final.pt")
torch.save(policy_model.linear.state_dict(), ckpt_file)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='../data/tabmwp')
parser.add_argument('--model', type=str, default='gpt3_rl')
parser.add_argument('--option_inds', type=list, default=["A", "B", "C", "D", "E", "F"])
# User options
parser.add_argument('--label', type=str, default='exp0')
parser.add_argument('--debug', action='store_true')
parser.add_argument(
'--prompt_format',
type=str,
default='TQ-A',
choices=['T-A', 'Q-A', 'Q-AS', 'Q-SA', 'TQ-A', 'TQ-AS', 'TQ-SA', 'QT-A', 'QT-AS', 'QT-SA', 'QTS-A', 'TQS-A'],
help='prompt format template')
parser.add_argument('--shot_number', type=int, default=2, help='Number of n-shot training examples.')
parser.add_argument('--seed', type=int, default=1, help='random seed')
# GPT-3 settings
parser.add_argument('--engine', type=str, default='text-davinci-002', choices=['text-davinci-002', 'ada'])
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--max_tokens',
type=int,
default=512,
help='The maximum number of tokens allowed for the generated answer.')
parser.add_argument('--top_p', type=float, default=1.0)
parser.add_argument('--frequency_penalty', type=float, default=0.0)
parser.add_argument('--presence_penalty', type=float, default=0.0)
# Policy gradient settings
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--model_config',
type=str,
default='bert-base-uncased',
choices=['distilbert-base-uncased', 'bert-base-uncased'])
parser.add_argument('--train_number', type=int, default=20, help='Number of training samples.')
parser.add_argument('--cand_number', type=int, default=10, help='Number of candidate prompts.')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate of policy network.')
parser.add_argument('--epochs', type=int, default=20, help='Number of training epochs.')
parser.add_argument('--embedding_size', type=int, default=128, help='Policy network final layer hidden state size.')
parser.add_argument('--batch_size',
type=int,
default=20,
help='Policy network training batch size. Set to train_number by default.')
parser.add_argument('--ckpt_root', type=str, default='../checkpoints')
args = parser.parse_args()
# print and save the args
args.ckpt_path = os.path.join(args.ckpt_root, args.label)
utils.create_dir(args.ckpt_path)
_logger = utils.Logger(args.ckpt_path + '/args.txt')
print('====Input Arguments====')
_logger.write(json.dumps(vars(args), indent=2, sort_keys=False))
return args
if __name__ == '__main__':
args = parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed) # CPU random seed
torch.cuda.manual_seed(args.seed) # GPU random seed
torch.backends.cudnn.benchmark = True
## problems, test question ids, candidate prompt pids, RL training pids
problems, cand_pids, train_pids = load_data(args)
## policy network
policy_model = policy_network(model_config=args.model_config,
add_linear=True,
embedding_size=args.embedding_size,
freeze_encoder=True)
device = torch.device("cuda:" + args.gpu if torch.cuda.is_available() else "cpu") # one GPU
policy_model = policy_model.to(device)
## construct candidate examples
cand_examples = []
for pid in cand_pids:
example = create_example_from_pid(pid, problems, args, test=True)
cand_examples.append(example)
## TRAINING
logger = utils.Logger(os.path.join(args.ckpt_path, 'log.txt'))
policy_gradient_train(policy_model, problems, train_pids, cand_pids, cand_examples, args)
| [] |
2024-01-10 | lupantech/PromptPG | run_gpt3~run_gpt3.py | import os
import re
import json
import argparse
import random
from base_prompt import *
from utilities import extract_prediction
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# print(openai.api_key)
def load_data(args):
problems_test = json.load(open(os.path.join(args.data_root, f'problems_{args.test_split}.json')))
problems_train = json.load(open(os.path.join(args.data_root, f'problems_train.json')))
problems = {**problems_test, **problems_train}
# test problem ids
pids_test = list(problems_test.keys())
pids_test = pids_test[:args.test_number] if args.test_number > 0 else pids_test
print(f"number of test problems: {len(pids_test)}\n")
# pick up shot examples from the training set
shot_pids = args.shot_pids
train_pids = list(problems_train.keys())
if shot_pids == None:
assert args.shot_number >= 0 and args.shot_number <= 32
shot_pids = random.sample(train_pids, args.shot_number) # random sample
else:
shot_pids = [str(pid) for pid in shot_pids]
for pid in shot_pids:
assert pid in train_pids # check shot_pids
print("training question ids for prompting: ", shot_pids, "\n")
return problems, pids_test, shot_pids
def get_gpt3_output(prompt, args):
response = openai.Completion.create(engine=args.engine,
prompt=prompt,
temperature=args.temperature,
max_tokens=args.max_tokens,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
stop=["\n"])
output = response["choices"][0]["text"].strip()
return output
def normalize_answer(text, unit):
# ["1,000", "123", "3/4", "56.456", "$56.4", "-3", "-10.02", "-3/2"]
text = re.sub("^[\$]", "", text)
text = re.sub("[\,\.\,\/]$", "", text)
result = re.match("^[-+]?[\d,./]+$", text)
if result is not None:
# is number?
text = text.replace(",", "")
result = re.match("[-+]?\d+$", text)
if result is not None:
number = int(text)
elif "/" in text:
nums = text.split("/")
number = round(float(nums[0]) / float(nums[1]), 3)
else:
number = round(float(text), 3)
number = str(number)
number = re.sub(r"\.[0]+$", "", number)
return number
else:
# is text
if unit:
text = text.replace(unit, "").strip()
return text
def get_result_file(args):
result_path = f"{args.output_root}/{args.model}"
os.makedirs(result_path, exist_ok=True)
result_file = "{}/{}_{}_{}_{}_seed_{}.json".format(result_path, args.label, args.test_split, args.prompt_format,
args.shot_number, args.seed)
return result_file
def save_results(result_file, acc, correct, count, shot_pids, args, results):
data = {}
data['acc'] = acc
data['correct'] = correct
data['count'] = count
data['shot_pids'] = shot_pids
data['args'] = vars(args)
data['results'] = results
with open(result_file, 'w') as f:
json.dump(data, f, indent=2, separators=(',', ': '))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='../data/tabmwp')
parser.add_argument('--output_root', type=str, default='../results')
parser.add_argument('--model', type=str, default='gpt3')
parser.add_argument('--option_inds', type=list, default=["A", "B", "C", "D", "E", "F"])
# user options
parser.add_argument('--label', type=str, default='exp0')
parser.add_argument('--test_split', type=str, default='val', choices=['dev', 'dev1k', 'test', 'test1k'])
parser.add_argument('--test_number', type=int, default=10, help='GPT-3 is expensive. -1 for whole val/test set')
parser.add_argument('--save_every', type=int, default=10, help='Save the result with every n examples.')
parser.add_argument('--debug', action='store_true')
parser.add_argument(
'--prompt_format',
type=str,
default='TQ-A',
choices=['T-A', 'Q-A', 'Q-AS', 'Q-SA', 'TQ-A', 'TQ-AS', 'TQ-SA', 'QT-A', 'QT-AS', 'QT-SA', 'QTS-A', 'TQS-A'],
help='prompt format template')
parser.add_argument('--shot_number', type=int, default=2, help='Number of n-shot training examples.')
parser.add_argument('--shot_pids', type=int, nargs='+', default=None, help='Question indexes of shot examples')
parser.add_argument('--seed', type=int, default=1, help='random seed')
# GPT-3 settings
parser.add_argument('--engine', type=str, default='text-davinci-002', choices=['text-davinci-002', 'ada'])
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--max_tokens',
type=int,
default=512,
help='The maximum number of tokens allowed for the generated answer.')
parser.add_argument('--top_p', type=float, default=1.0)
parser.add_argument('--frequency_penalty', type=float, default=0.0)
parser.add_argument('--presence_penalty', type=float, default=0.0)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
random.seed(args.seed)
problems, pids, shot_pids = load_data(args) # probelms, test question ids, shot example ids
result_file = get_result_file(args)
# load the check point
if os.path.exists(result_file):
print("# The result file exists! We will load the check point!!!")
check_point = json.load(open(result_file))
results = check_point['results']
else:
results = {}
total = len(pids)
check_count = len(results) # number of existing results
correct = 0 # number of correct results
# for pid in tqdm(pids):
for i, pid in enumerate(pids):
count = i + 1 # number of current results
problem = problems[pid]
answer = problems[pid]['answer']
options = problems[pid]['choices']
unit = problems[pid]['unit']
"""
problems: the whole dataset
shot_pids: sampled problem ids in the training set
pid: test problme id
one prompt = the input of GPT-3 = training example x N + test example w/o answer x 1
Random sampling: ramdomly sample the examples in the training set
Dynamic sampling (RL):
given the test problem and all training sets, predict/sample the problem ids
"""
# shot_pids = RL(train_problems, test_pid, test_problem)
# shot_pids = RL(problems, pid)
prompt = build_prompt(problems, shot_pids, pid, args) # generate the prompt input
if pid in results:
output = results[pid]["output"]
else:
output = get_gpt3_output(prompt, args) # generate the output by GPT-3
# the core prediction in the output
prediction = extract_prediction(output, options, args.option_inds)
# normalize the number in the text
answer_norm = normalize_answer(answer, unit)
prediction_norm = normalize_answer(prediction, unit)
# save the results
results[pid] = {}
results[pid]["answer"] = answer
results[pid]["answer_norm"] = answer_norm
results[pid]["output"] = output
results[pid]["prediction"] = prediction
results[pid]["prediction_norm"] = prediction_norm
# correct or not
if answer_norm.lower() == prediction_norm.lower():
correct += 1
results[pid]["true_false"] = True
else:
results[pid]["true_false"] = False
acc = correct / (i + 1) * 100
if args.debug or i < 10:
print("\n##################################")
print(prompt, "\n")
print("[A] labeled answer (normalized):\t", answer_norm)
print("[P] predicted answer (normalized):\t", prediction_norm)
print("[Acc]:\t", results[pid]["true_false"])
print("")
print("[A] labeled answer:\t", answer)
print("[P] predicted answer:\t", prediction)
print("[P] generated output:\t", output)
if count % args.save_every == 0 or count == total:
if count >= check_count:
# have new outputs
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%, saved to {result_file}")
save_results(result_file, acc, correct, count, shot_pids, args, results)
else:
# no new outputs, just print the accuracy
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%")
| [] |
2024-01-10 | lupantech/PromptPG | run_gpt3_rl~run_gpt3.py | import os
import json
import argparse
import random
import time
from base_prompt import *
from model import *
from utilities import extract_prediction, normalize_answer
import numpy as np
import torch
import torch.nn.functional as F
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
def load_data(args):
problems_test = json.load(open(os.path.join(args.data_root, f'problems_{args.test_split}.json')))
problems_train = json.load(open(os.path.join(args.data_root, f'problems_train.json')))
problems = {**problems_test, **problems_train}
# test problem ids
test_pids = list(problems_test.keys())
test_pids = test_pids[:args.test_number] if args.test_number > 0 else test_pids
print(f"number of test problems: {len(test_pids)}\n")
# pick up shot/in-context example candidates from the training set
train_pids = list(problems_train.keys())
cand_pids = random.sample(train_pids, args.cand_number) # random sample
return problems, test_pids, cand_pids
def get_gpt3_output(prompt, args):
patience = 100
while True:
try:
response = openai.Completion.create(engine=args.engine,
prompt=prompt,
temperature=args.temperature,
max_tokens=args.max_tokens,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
stop=["\n"])
output = response["choices"][0]["text"].strip()
break
except Exception as e:
patience -= 1
if not patience:
print("!!! Running out of patience waiting for OpenAI")
else:
print(e)
time.sleep(0.1)
return output
def get_result_file(args):
result_path = f"{args.output_root}/{args.model}"
os.makedirs(result_path, exist_ok=True)
result_file = "{}/{}_{}_{}_{}_seed_{}.json".format(result_path, args.label, args.test_split, args.prompt_format,
args.shot_number, args.seed)
return result_file
def save_results(result_file, acc, correct, count, cand_pids, args, results):
data = {}
data['acc'] = acc
data['correct'] = correct
data['count'] = count
data['cand_pids'] = cand_pids
data['args'] = vars(args)
data['results'] = results
with open(result_file, 'w') as f:
json.dump(data, f, indent=2, separators=(',', ': '))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='../data/tabmwp')
parser.add_argument('--output_root', type=str, default='../results')
parser.add_argument('--model', type=str, default='gpt3_rl')
parser.add_argument('--option_inds', type=list, default=["A", "B", "C", "D", "E", "F"])
# user options
parser.add_argument('--label', type=str, default='exp0')
parser.add_argument('--test_split', type=str, default='test', choices=['dev', 'dev1k', 'test', 'test1k'])
parser.add_argument('--test_number', type=int, default=100, help='GPT-3 is expensive. -1 for the whole test set')
parser.add_argument('--save_every', type=int, default=10, help='Save the result with every n examples.')
parser.add_argument('--debug', action='store_true')
parser.add_argument(
'--prompt_format',
type=str,
default='TQ-A',
choices=['T-A', 'Q-A', 'Q-AS', 'Q-SA', 'TQ-A', 'TQ-AS', 'TQ-SA', 'QT-A', 'QT-AS', 'QT-SA', 'QTS-A', 'TQS-A'],
help='prompt format template')
parser.add_argument('--shot_number', type=int, default=2, help='Number of n-shot training examples.')
parser.add_argument('--seed', type=int, default=1, help='random seed')
# GPT-3 settings
parser.add_argument('--engine', type=str, default='text-davinci-002', choices=['text-davinci-002', 'ada'])
parser.add_argument('--temperature', type=float, default=0.0)
parser.add_argument('--max_tokens',
type=int,
default=512,
help='The maximum number of tokens allowed for the generated answer.')
parser.add_argument('--top_p', type=float, default=1.0)
parser.add_argument('--frequency_penalty', type=float, default=0.0)
parser.add_argument('--presence_penalty', type=float, default=0.0)
# Policy Model settings
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--model_config',
type=str,
default='bert-base-uncased',
choices=['distilbert-base-uncased', 'bert-base-uncased'])
parser.add_argument('--cand_number', type=int, default=20, help='Number of candidate prompts.')
parser.add_argument('--embedding_size', type=int, default=128, help='Policy network final layer hidden state size.')
parser.add_argument('--ckpt_root', type=str, default='../checkpoints')
parser.add_argument('--ckpt', type=str, default=None)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
# https://pytorch.org/docs/stable/notes/randomness.html
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed) # CPU random seed
torch.cuda.manual_seed(args.seed) # GPU random seed
torch.backends.cudnn.benchmark = True
# problems, test question ids, candidate prompt pids, RL training pids
problems, pids, cand_pids = load_data(args)
result_file = get_result_file(args)
# load the check point
if os.path.exists(result_file):
print("# The result file exists! We will load the learned check point!!!")
check_point = json.load(open(result_file))
results = check_point['results']
else:
results = {}
total = len(pids)
check_count = len(results) # number of existing results
correct = 0 # number of correct results
# policy network
policy_model = policy_network(model_config=args.model_config,
add_linear=True,
embedding_size=args.embedding_size,
freeze_encoder=True)
device = torch.device("cuda:" + args.gpu if torch.cuda.is_available() else "cpu") # one GPU
policy_model = policy_model.to(device)
# print("candidate prompts: ")
# print("===========")
cand_examples = []
for pid in cand_pids:
example = create_example_from_pid(pid, problems, args, test=True) # CHECK !!!
# print(example)
# print("===========")
cand_examples.append(example)
# ======================================================= INFERENCE ===============================================
if args.ckpt:
ckpt_path = os.path.join(args.ckpt_root, args.ckpt)
if os.path.exists(ckpt_path):
policy_model.linear.load_state_dict(torch.load(ckpt_path))
else:
print(f"The ckpt path for [{ckpt_path}] does not exist!") # CHECK
exit()
else:
print(f"!!! Load the pre-traind model instead!") # CHECK
# exit()
policy_model.eval()
with torch.no_grad():
# Calculate the embeddings for candidate examples only one time!
cand_embedding = policy_model(cand_examples)
# print("cand_embedding:", cand_embedding.shape) # [cand_num x emb_size]
for i, pid in enumerate(pids):
count = i + 1 # number of current results
problem = problems[pid]
answer = problems[pid]['answer']
options = problems[pid]['choices']
unit = problems[pid]['unit']
example = create_example_from_pid(pid, problems, args, test=True)
ctxt_embedding = policy_model([example])
# print("ctxt_embedding:", ctxt_embedding.shape) # [1 x emb_size]
scores = F.softmax(torch.mm(ctxt_embedding, cand_embedding.t()), dim=1)[0] # [cand_num]
# print(scores.shape)
scores = scores.cpu().detach().numpy().tolist()
cand_ids = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:args.shot_number]
shot_pids = [cand_pids[cid] for cid in cand_ids[::-1]]
prompt = build_prompt(problems, shot_pids, pid, args) # generate the prompt input
if pid in results:
output = results[pid]["output"]
else:
output = get_gpt3_output(prompt, args) # generate the output by GPT-3
# the core prediction in the output
prediction = extract_prediction(output, options, args.option_inds)
# normalize the number in the text
answer_norm = normalize_answer(answer, unit)
prediction_norm = normalize_answer(prediction, unit)
# save the results
results[pid] = {}
results[pid]["shot_pids"] = shot_pids
results[pid]["prompt"] = prompt
results[pid]["answer"] = answer
results[pid]["answer_norm"] = answer_norm
results[pid]["output"] = output
results[pid]["prediction"] = prediction
results[pid]["prediction_norm"] = prediction_norm
# correct or not
if answer_norm.lower() == prediction_norm.lower():
correct += 1
results[pid]["true_false"] = True
else:
results[pid]["true_false"] = False
acc = correct / (i + 1) * 100
if args.debug or i < 10:
print("\n##################################")
print(prompt, "\n")
print("[A] labeled answer (normalized):\t", answer_norm)
print("[P] predicted answer (normalized):\t", prediction_norm)
print("[Acc]:\t", results[pid]["true_false"])
print("")
print("[A] labeled answer:\t", answer)
print("[P] predicted answer:\t", prediction)
print("[P] generated output:\t", output)
if count % args.save_every == 0 or count == total:
if count >= check_count:
# have new outputs
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%, saved to {result_file}")
save_results(result_file, acc, correct, count, cand_pids, args, results)
else:
# no new outputs, just print the accuracy
print(f"{count}/{total}, correct: {correct}, acc: {round(acc, 2)}%")
| [] |
2024-01-10 | sycbarry/consultingutils | consultingutils~llmtemplates~llmtemplates.py | """
Docs:
1. this library contains llm templates.
2. these templates are used to interface with language models.
3. each template has its own use case depending on what the user wants to do.
"""
from langchain import PromptTemplate
from langchain import OpenAI
from langchain.chains import LLMChain
import sys
"""
our base template type
"""
class LLMTemplate(object):
def __init__(self, prompt=None, *args, **kwargs):
super().__init__()
self.prompt = prompt
self.kwargs = kwargs
return self
@property
def __gettemplateprompt__(self):
return self.template.format()
"""
builds the templates prompt template.
"""
def __buildtemplate__(self):
if self.prompt == None:
return None
self.template = PromptTemplate(
input_variables=[*self.args],
template=self.prompt
)
return self
"""
builds the templates chain.
"""
def __buildchain__(self):
if self.template == None:
raise NotImplementedError("no prompt template generated")
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
self.chain = LLMChain(llm=llm, prompt=self.template)
return self
"""
Passes in a variety of inputs to the template.
inputs must maintain the integrity of the prompt parameters.
def __invoke__(self, sql="select * from user", database="db2"):
"""
def __invoke__(self, *args):
if args == None or len(args) <= 0:
raise BaseException("require something to input.")
return self.chain.run([*args])
"""
cleaning templates.
these templates are good at prompting an llm
to clean things up - organize..
"""
class CleaningTemplate(LLMTemplate):
def __init__(self):
super().__init__()
"""
DB2 To Oracle SQL Conversion Template
-- converting a db2 query to an oracle sql query.
"""
class OracleToDB2SQL(LLMTemplate):
def __init__(self, sql=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = ["raw_sql"]
self.prompt = """
Here is an Oracle SQL query that I want you to convert to a DB2 Query: {raw_sql}.
Follow these rules:
1. For every table mentioned in the SQL statement, check if a qualifier is present. If not, add the qualifier before the table name. The qualifier will be separated from the table name by a dot (.).
2. Check if an alias is already assigned to each table. If not, assign an alias that is the same as the table name. The alias will be assigned using the 'AS' keyword (optional in many SQL dialects) after the table name.
Example transformation using the rule:
Original SQL:
FROM workorder
Transformed SQL:
FROM maximo.workorder AS workorder
Where:
• 'maximo' is the qualifier to be added.
• 'workorder' is the table name.
• 'AS' is the keyword used for assigning alias (optional in many SQL dialects).
• The second 'workorder' is the alias, which is the same as the table name in this case.
Note: The rule applies to all table names in the SQL statement, including those in JOIN clauses.
Only return the SQL query as a single response.
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
String To Oracle SQL Conversion Template
-- cleaning up a string to an oracle query.
"""
class ParseStringToOracleSQL(LLMTemplate):
def __init__(self, sql=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = ["raw_sql"]
self.prompt = """
Here is a string. It represents an Oracle SQL Query: {raw_sql}.
I want you to:
1. extract and concatenate the SQL from the string of the sqlText variable up to the point that the sqlText variable ends with a semicolon.
2. replace params["where"] with 1=1
3. Ensure that the syntax of this query is appropriate for an Oracle databases.
4. Remove the semicolon from the end of the statement.
Return only the new SQL statement. Do not include anything other than SQL in your response.
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
White label templates.
the user can pass in whatever prompt he/she wants with args.
"""
class WhiteLabelTemplate(LLMTemplate):
def __init__(self, prompt=None, *args, **kwargs):
super().__init__()
self.prompt = prompt
if type(args.__class__()) is not type([].__class__()):
raise Exception("Invalid args: ['1', '2'] needed")
self.args = args
self.kwargs = kwargs
"""
Maximo Automation Script template - script generation
"""
class MaximoAutomationScriptTemplateScript(LLMTemplate):
def __init__(self, input=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = ["requirement"]
self.input = input
self.prompt = """
Here is an IBM Maximo EAM requirement. The requirement is as follows: {requirement}.
The user intends to write a Maximo Automation Script in the Jython programming language.
I want you to:
1. Convert the requirement in Jython.
2. The Maximo system does not include access to the os or system libraries. Try and use the native mbo and jython libraries in your output.
Return only the code. Do not include anything other than the code in your response.
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
Maximo Automation Script template - script for inboud integration
"""
class MaximoAutomationScriptTemplateScript_IntegrationInbound(LLMTemplate):
def __init__(self, input=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = [ "requirement" ]
self.input = input
self.prompt = """
Here is an IBM Maximo EAM requirement. The requirement is as follows: {requirement}.
The user intends to write a Maximo Automation Script in the Jython programming language.
This script is an inbound integration. Meaning, that the script will activate at some point in the message queue when the user is sending data from an external system into Maximo.
I want you to:
1. Convert the requirement in Jython.
2. The Maximo system does not include access to the os or system libraries. Try and use the native mbo and jython libraries in your output.
3. The script has access to certain implicit variables, meaning that when the script activates, it can access the inbound record by using the irData and erData respectively.
Return only the code. Do not include anything other than the code in your response.
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
Maximo Automation Script template - description generation
** args = takes in the original requirement as input.
"""
class MaximoAutomationScriptTemplateDescription(LLMTemplate):
def __init__(self, input=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = ["requirement"]
self.input = input
self.prompt = """
Here is an IBM Maximo Automation requirement. The requirement is as follows: {requirement}.
I just asked you to write the script for it, however.
I want you to:
1. Convert this requirement into a small, 50 word long description.
2. The description must simplify the requirement that I just passed to you.
Return only the description. Nothing else.
"""
"""
input must be the original requirement
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
Maximo Automation Script template - automation script name generation
** args = takes in the original requirement as input.
"""
class MaximoAutomationScriptTemplateName(LLMTemplate):
def __init__(self, input=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.args = ["requirement"]
self.input = input
self.prompt = """
Here is an IBM Maximo Automation requirement. The requirement is as follows: {requirement}.
I just asked you to write the script for it, however.
I want you to:
1. Give this requirement a simple name.
2. The name must be in all upper case characters and consist of underscore _ values in between each word.
2. The name cannot be longer than 20 characters long.
Return only the name. Nothing else.
"""
"""
input must be the original requirement
"""
def invoke(self, input):
super().__buildtemplate__()
super().__buildchain__()
return super().__invoke__(input)
"""
1. For every table mentioned in the SQL statement, check if a qualifier is present. If not, add the qualifier before the table name. The qualifier will be separated from the table name by a dot (.).
2. Check if an alias is already assigned to each table. If not, assign an alias that is the same as the table name. The alias will be assigned using the 'AS' keyword (optional in many SQL dialects) after the table name.
Example transformation using the rule:
Original SQL:
FROM workorder
Transformed SQL:
FROM maximo.workorder AS workorder
Where:
• 'maximo' is the qualifier to be added.
• 'workorder' is the table name.
• 'AS' is the keyword used for assigning alias (optional in many SQL dialects).
• The second 'workorder' is the alias, which is the same as the table name in this case.
Note: The rule applies to all table names in the SQL statement, including those in JOIN clauses.
""" | [] |
2024-01-10 | Silver7Tech/python-chat-gpt | src~revChatGPT~Proxied.py | import json
import logging
import uuid
from os import environ
from OpenAIAuth.OpenAIAuth import OpenAIAuth
import tls_client
# Disable all logging
logging.basicConfig(level=logging.ERROR)
BASE_URL = environ.get("CHATGPT_BASE_URL") or "http://127.0.0.1:5000/"
class Chatbot:
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
self.config = config
self.session = tls_client.Session(
client_identifier="chrome_108",
)
if "proxy" in config:
if type(config["proxy"]) != str:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if type(config["verbose"]) != bool:
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
if "email" not in config:
raise Exception("Email not found in config!")
if "password" not in config:
raise Exception("Password not found in config!")
self.__login()
def __refresh_headers(self, access_token):
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def __login(self):
auth = OpenAIAuth(email_address=self.config.get("email"), password=self.config.get("password"), proxy=self.config.get("proxy"))
auth.begin()
access_token = auth.get_access_token()
self.__refresh_headers(access_token)
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
gen_title=False,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
"""
self.__map_conversations()
if conversation_id is None:
conversation_id = self.conversation_id
if parent_id is None:
parent_id = (
self.parent_id
if conversation_id == self.conversation_id
else self.conversation_mapping[conversation_id]
)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id or str(uuid.uuid4()),
"model": "text-davinci-002-render",
}
new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "backend-api/conversation",
data=json.dumps(data),
timeout_seconds=180,
)
if response.status_code != 200:
print(response.text)
raise Exception(
f"Wrong response code: {response.status_code}! Refreshing session...",
)
else:
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
print("Incorrect response from OpenAI API")
raise Exception("Incorrect response from OpenAI API") from exc
# Check if it is JSON
if response.startswith("{"):
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
res = {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
if gen_title and new_conv:
try:
title = self.__gen_title(
self.conversation_id,
self.parent_id,
)["title"]
except Exception as exc:
split = prompt.split(" ")
title = " ".join(split[:3]) + ("..." if len(split) > 3 else "")
res["title"] = title
return res
else:
return None
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
raise Exception("Response code error: ", response.status_code)
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"backend-api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, id):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data
def __gen_title(self, id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"backend-api/conversation/gen_title/{id}"
response = self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
data = json.loads(response.text)
return data
def change_title(self, id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "backend-api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for i in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
from os import getenv
from os.path import exists
def configure():
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def chatGPT_main(config):
print("Logging in...")
chatbot = Chatbot(config)
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if prompt == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!refresh - Refresh the session authentication
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
""",
)
continue
elif prompt == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
continue
elif prompt == "!refresh":
chatbot.__refresh_session()
print("Session successfully refreshed.\n")
continue
elif prompt == "!config":
print(json.dumps(chatbot.config, indent=4))
continue
elif prompt.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(prompt.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
continue
elif prompt.startswith("!setconversation"):
try:
chatbot.config["conversation"] = prompt.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
continue
elif prompt == "!exit":
break
try:
print("Chatbot: ")
message = chatbot.ask(
prompt,
conversation_id=chatbot.config.get("conversation"),
parent_id=chatbot.config.get("parent_id"),
)
print(message["message"])
except Exception as exc:
print("Something went wrong!")
print(exc)
continue
def main():
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
chatGPT_main(configure())
if __name__ == "__main__":
main()
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | Silver7Tech/python-chat-gpt | src~revChatGPT~Unofficial.py | import json
import logging
import re
import uuid
from time import sleep
import tls_client
import undetected_chromedriver as uc
from requests.exceptions import HTTPError
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# Disable all logging
logging.basicConfig(level=logging.ERROR)
BASE_URL = "https://chat.openai.com/"
class Chrome(uc.Chrome):
def __del__(self):
self.quit()
class Chatbot:
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
no_refresh=False,
) -> None:
self.config = config
self.session = tls_client.Session(
client_identifier="chrome_108",
)
if "proxy" in config:
if type(config["proxy"]) != str:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if type(config["verbose"]) != bool:
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.isMicrosoftLogin = False
# stdout colors
self.GREEN = "\033[92m"
self.WARNING = "\033[93m"
self.ENDCOLOR = "\033[0m"
if "email" in config and "password" in config:
if type(config["email"]) != str:
raise Exception("Email must be a string!")
if type(config["password"]) != str:
raise Exception("Password must be a string!")
self.email = config["email"]
self.password = config["password"]
if "isMicrosoftLogin" in config and config["isMicrosoftLogin"] == True:
self.isMicrosoftLogin = True
self.__microsoft_login()
else:
self.__email_login()
elif "session_token" in config:
if no_refresh:
self.__get_cf_cookies()
return
if type(config["session_token"]) != str:
raise Exception("Session token must be a string!")
self.session_token = config["session_token"]
self.session.cookies.set(
"__Secure-next-auth.session-token",
config["session_token"],
)
self.__get_cf_cookies()
else:
raise Exception("Invalid config!")
self.__retry_refresh()
def __retry_refresh(self):
retries = 5
refresh = True
while refresh:
try:
self.__refresh_session()
refresh = False
except Exception as exc:
if retries == 0:
raise exc
retries -= 1
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
gen_title=False,
session_token=None,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
:param session_token: String
"""
if session_token:
self.session.cookies.set(
"__Secure-next-auth.session-token",
session_token,
)
self.session_token = session_token
self.config["session_token"] = session_token
self.__retry_refresh()
self.__map_conversations()
if conversation_id == None:
conversation_id = self.conversation_id
if parent_id == None:
parent_id = (
self.parent_id
if conversation_id == self.conversation_id
else self.conversation_mapping[conversation_id]
)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id or str(uuid.uuid4()),
"model": "text-davinci-002-render",
}
new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "backend-api/conversation",
data=json.dumps(data),
timeout_seconds=180,
)
if response.status_code != 200:
print(response.text)
self.__refresh_session()
raise HTTPError(
f"Wrong response code: {response.status_code}! Refreshing session...",
)
else:
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
print("Incorrect response from OpenAI API")
raise Exception("Incorrect response from OpenAI API") from exc
# Check if it is JSON
if response.startswith("{"):
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
res = {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
if gen_title and new_conv:
try:
title = self.__gen_title(
self.conversation_id,
self.parent_id,
)["title"]
except Exception as exc:
split = prompt.split(" ")
title = " ".join(split[:3]) + ("..." if len(split) > 3 else "")
res["title"] = title
return res
else:
return None
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
raise Exception("Response code error: ", response.status_code)
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"backend-api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, id):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data
def __gen_title(self, id, message_id):
"""
Generate title for conversation
"""
url = BASE_URL + f"backend-api/conversation/gen_title/{id}"
response = self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
data = json.loads(response.text)
return data
def change_title(self, id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "backend-api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __refresh_session(self, session_token=None):
if session_token:
self.session.cookies.set(
"__Secure-next-auth.session-token",
session_token,
)
self.session_token = session_token
self.config["session_token"] = session_token
url = BASE_URL + "api/auth/session"
response = self.session.get(url, timeout_seconds=180)
if response.status_code == 403:
self.__get_cf_cookies()
raise Exception("Clearance refreshing...")
try:
if "error" in response.json():
raise Exception(
f"Failed to refresh session! Error: {response.json()['error']}",
)
elif (
response.status_code != 200
or response.json() == {}
or "accessToken" not in response.json()
):
raise Exception(
f"Response code: {response.status_code} \n Response: {response.text}",
)
else:
self.session.headers.update(
{
"Authorization": "Bearer " + response.json()["accessToken"],
},
)
self.session_token = self.session.cookies._find(
"__Secure-next-auth.session-token",
)
except Exception:
print("Failed to refresh session!")
if self.isMicrosoftLogin:
print("Attempting to re-authenticate...")
self.__microsoft_login()
else:
self.__email_login()
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def __microsoft_login(self) -> None:
"""
Login to OpenAI via Microsoft Login Authentication.
:return: None
"""
driver = None
try:
# Open the browser
self.cf_cookie_found = False
self.session_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get(BASE_URL)
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
# Wait for the login button to appear
WebDriverWait(driver, 120).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[contains(text(), 'Log in')]"),
),
)
# Click the login button
driver.find_element(
by=By.XPATH,
value="//button[contains(text(), 'Log in')]",
).click()
# Wait for the Login with Microsoft button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@data-provider='windowslive']"),
),
)
# Click the Login with Microsoft button
driver.find_element(
by=By.XPATH,
value="//button[@data-provider='windowslive']",
).click()
# Wait for the email input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//input[@type='email']"),
),
)
# Enter the email
driver.find_element(
by=By.XPATH,
value="//input[@type='email']",
).send_keys(self.config["email"])
# Wait for the Next button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# Click the Next button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# Wait for the password input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//input[@type='password']"),
),
)
# Enter the password
driver.find_element(
by=By.XPATH,
value="//input[@type='password']",
).send_keys(self.config["password"])
# Wait for the Sign in button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# Click the Sign in button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# Wait for the Allow button to appear
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@type='submit']"),
),
)
# click Yes button
driver.find_element(
by=By.XPATH,
value="//input[@type='submit']",
).click()
# wait for input box to appear (to make sure we're signed in)
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//textarea"),
),
)
while not self.session_cookie_found:
sleep(5)
print(self.GREEN + "Login successful." + self.ENDCOLOR)
finally:
# Close the browser
if driver is not None:
driver.quit()
del driver
def __email_login(self) -> None:
"""
Login to OpenAI via Email/Password Authentication and 2Captcha.
:return: None
"""
# Open the browser
driver = None
try:
self.cf_cookie_found = False
self.session_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get(BASE_URL)
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
# Wait for the login button to appear
WebDriverWait(driver, 120).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[contains(text(), 'Log in')]"),
),
)
# Click the login button
driver.find_element(
by=By.XPATH,
value="//button[contains(text(), 'Log in')]",
).click()
# Wait for the email input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.ID, "username"),
),
)
# Enter the email
driver.find_element(by=By.ID, value="username").send_keys(
self.config["email"],
)
# Wait for the Continue button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@type='submit']"),
),
)
# Click the Continue button
driver.find_element(
by=By.XPATH,
value="//button[@type='submit']",
).click()
# Wait for the password input field to appear
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.ID, "password"),
),
)
# Enter the password
driver.find_element(by=By.ID, value="password").send_keys(
self.config["password"],
)
# Wait for the Sign in button to be clickable
WebDriverWait(driver, 60).until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@type='submit']"),
),
)
# Click the Sign in button
driver.find_element(
by=By.XPATH,
value="//button[@type='submit']",
).click()
# wait for input box to appear (to make sure we're signed in)
WebDriverWait(driver, 60).until(
EC.visibility_of_element_located(
(By.XPATH, "//textarea"),
),
)
while not self.session_cookie_found:
sleep(5)
print(self.GREEN + "Login successful." + self.ENDCOLOR)
finally:
if driver is not None:
# Close the browser
driver.quit()
del driver
def __get_ChromeOptions(self):
options = uc.ChromeOptions()
options.add_argument("--start_maximized")
options.add_argument("--disable-extensions")
options.add_argument("--disable-application-cache")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("--disable-setuid-sandbox")
options.add_argument("--disable-dev-shm-usage")
if self.config.get("proxy", "") != "":
options.add_argument("--proxy-server=" + self.config["proxy"])
return options
def __get_cf_cookies(self) -> None:
"""
Get cloudflare cookies.
:return: None
"""
driver = None
try:
self.cf_cookie_found = False
self.agent_found = False
self.cf_clearance = None
self.user_agent = None
options = self.__get_ChromeOptions()
print("Spawning browser...")
driver = uc.Chrome(
enable_cdp_events=True,
options=options,
driver_executable_path=self.config.get("driver_exec_path"),
browser_executable_path=self.config.get("browser_exec_path"),
)
print("Browser spawned.")
driver.add_cdp_listener(
"Network.responseReceivedExtraInfo",
lambda msg: self.__detect_cookies(msg),
)
driver.add_cdp_listener(
"Network.requestWillBeSentExtraInfo",
lambda msg: self.__detect_user_agent(msg),
)
driver.get("https://chat.openai.com/chat")
while not self.agent_found or not self.cf_cookie_found:
sleep(5)
finally:
# Close the browser
if driver is not None:
driver.quit()
del driver
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
def __detect_cookies(self, message):
if "params" in message:
if "headers" in message["params"]:
if "set-cookie" in message["params"]["headers"]:
# Use regex to get the cookie for cf_clearance=*;
cf_clearance_cookie = re.search(
"cf_clearance=.*?;",
message["params"]["headers"]["set-cookie"],
)
session_cookie = re.search(
"__Secure-next-auth.session-token=.*?;",
message["params"]["headers"]["set-cookie"],
)
if cf_clearance_cookie and not self.cf_cookie_found:
print("Found Cloudflare Cookie!")
# remove the semicolon and 'cf_clearance=' from the string
raw_cf_cookie = cf_clearance_cookie.group(0)
self.cf_clearance = raw_cf_cookie.split("=")[1][:-1]
if self.verbose:
print(
self.GREEN
+ "Cloudflare Cookie: "
+ self.ENDCOLOR
+ self.cf_clearance,
)
self.cf_cookie_found = True
if session_cookie and not self.session_cookie_found:
print("Found Session Token!")
# remove the semicolon and '__Secure-next-auth.session-token=' from the string
raw_session_cookie = session_cookie.group(0)
self.session_token = raw_session_cookie.split("=")[1][:-1]
self.session.cookies.set(
"__Secure-next-auth.session-token",
self.session_token,
)
if self.verbose:
print(
self.GREEN
+ "Session Token: "
+ self.ENDCOLOR
+ self.session_token,
)
self.session_cookie_found = True
def __detect_user_agent(self, message):
if "params" in message:
if "headers" in message["params"]:
if "user-agent" in message["params"]["headers"]:
# Use regex to get the cookie for cf_clearance=*;
user_agent = message["params"]["headers"]["user-agent"]
self.user_agent = user_agent
self.agent_found = True
self.__refresh_headers(
cf_clearance=self.cf_clearance,
user_agent=self.user_agent,
)
def __refresh_headers(self, cf_clearance, user_agent):
del self.session.cookies["cf_clearance"]
self.session.headers.clear()
self.session.cookies.set("cf_clearance", cf_clearance)
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": "Bearer ",
"Content-Type": "application/json",
"User-Agent": user_agent,
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for i in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
from os import getenv
from os.path import exists
def configure():
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def chatGPT_main(config):
print("Logging in...")
chatbot = Chatbot(config)
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if prompt == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!refresh - Refresh the session authentication
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
""",
)
continue
elif prompt == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
continue
elif prompt == "!refresh":
chatbot.__refresh_session()
print("Session successfully refreshed.\n")
continue
elif prompt == "!config":
print(json.dumps(chatbot.config, indent=4))
continue
elif prompt.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(prompt.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
continue
elif prompt.startswith("!setconversation"):
try:
chatbot.config["conversation"] = prompt.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
continue
elif prompt == "!exit":
break
try:
print("Chatbot: ")
message = chatbot.ask(
prompt,
conversation_id=chatbot.config.get("conversation"),
parent_id=chatbot.config.get("parent_id"),
)
print(message["message"])
except Exception as exc:
print("Something went wrong!")
print(exc)
continue
def main():
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
chatGPT_main(configure())
if __name__ == "__main__":
main()
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | jlin816/dialop | dialop~planning_query_executor.py | from copy import deepcopy
import numpy as np
import openai
import pyparsing as pp
from pyparsing import OneOrMore, Suppress, delimited_list, one_of
from pyparsing.exceptions import ParseException
from dialop.templates import QueryExecutorTemplate
def create_search_api():
LP = Suppress("(")
RP = Suppress(")")
LB = Suppress("[")
RB = Suppress("]")
Q = Suppress('"') | Suppress("'")
valid_name_chars = pp.alphanums + "'"
string = (OneOrMore(pp.Word(valid_name_chars))).set_parse_action(" ".join)
filt = pp.Group(pp.Word(pp.alphas) + one_of("== >= <=") +
pp.Word(pp.alphanums))
dist_to = pp.Group("distance_to" + LP + string + RP)
key = one_of("fields filters text_query sort_by") + Suppress("=")
value = (string |
LB + pp.Group(delimited_list(dist_to | filt | pp.Word(pp.alphanums))) + RB |
Q + string + Q
)
search = "Search" + LP + delimited_list(pp.dict_of(key, value)) + RP
return search
search_api = create_search_api()
class SearchError(Exception):
pass
class StaticQueryExecutor:
def __init__(self, sites):
self.sites = sites
def _parse_query(self, query_str):
try:
query = search_api.parse_string(query_str, parse_all=True).as_dict()
except ParseException:
import pdb; pdb.set_trace()
raise SearchError(f"Invalid search syntax: {query_str}.")
print("..searching with query: ", query)
return query
def __call__(self, query_str):
"""Search the database of sites with a query string.
Parses query string into a `query` Dict, with keys:
- fields (required): fields to return from each result
- filters: list of tuples (field, comparator, value) to filter
sites by
- text_query: freeform text query (searches over event features)
- sort_by: list of fields or function call to `distance_to` to sort
results by (in asc order). Sorting by a field also returns it in
the results.
Returns:
result of the search, as a string
"""
query = self._parse_query(query_str)
results = deepcopy(self.sites)
return_fields = [self._remap(k) for k in query["fields"]]
for filt in query.get("filters", []):
field, comparator, value = filt
field = self._remap(field)
if field not in ["name", "etype", "est_price"]:
raise SearchError(f"You cannot filter by {field}."
"Try searching with a text query instead.")
def filt_fn(x):
if comparator == "==":
return str(x[field]) == str(value)
if comparator == ">=":
return float(x[field]) >= float(value)
if comparator == "<=":
return float(x[field]) <= float(value)
results = [r for r in results if filt_fn(r)]
results = [r for r in results if query.get("text_query", "") in str(r)]
for sort in query.get("sort_by", []):
if len(sort) == 2:
func, arg = sort
assert func == "distance_to", \
f"Sorting by unknown function?: {sort}"
target_evt = [r for r in self.sites if arg in str(r)]
assert len(target_evt) == 1, \
f"More than one event found for search?: {sort}"
target_evt = target_evt[0]
for r in results:
r["distance"] = self.distance(r, target_evt)
sort = "distance"
results = sorted(results, key=lambda r: r[self._remap(sort)])
return_fields.append(sort)
results = [{k: v for k, v in r.items() if k in return_fields} \
for r in results]
return self._format_results(results)
def distance(self, s1, s2) -> float:
dist = np.linalg.norm(np.array(s1["loc"]) - np.array(s2["loc"]))
dist *= 69
dist = round(dist * 10) / 10
return dist
def _format_results(self, results) -> str:
if len(results) == 0:
return "Search Results: No results\n"
result_str = f"Search Results ({len(results)}):\n"
# import pdb; pdb.set_trace()
keys = ["name"] + [self._unremap(k) for k in results[0].keys() if k != "name"]
result_str += "|".join(keys)
keys = [self._remap(k) for k in keys]
for r in results:
result_str += f"\n{'|'.join(str(r[k]) for k in keys)}"
return result_str
def _remap(self, key):
api_to_internal_name = {
"category": "etype",
"price": "est_price",
}
if key in api_to_internal_name:
return api_to_internal_name[key]
return key
def _unremap(self, key):
internal_name_to_api = {
"etype": "category",
"est_price": "price",
}
if key in internal_name_to_api:
return internal_name_to_api[key]
return key
class GPT3QueryExecutor:
def __init__(self, sites):
self.prompt = self._construct_prompt(sites)
self.model = "text-davinci-003"
def _construct_prompt(self, sites):
sites = deepcopy(sites)
test_searches = [
"Search(fields=[name], filters=[category == landmark])",
"Search(fields=[name], filters=[category == concert])",
"Search(fields=[name], text_query=live music)",
"Search(fields=[name, price], text_query=live music, filters=[price <= 40])",
"Search(fields=[name, price], filters=[category == restaurant, price <= 10], sort_by=[distance_to(The Mall)])",
"Search(fields=[name, price, distance], filters=[category == restaurant], sort_by=[distance_to(The Mall), price])",
"""Search(fields=[name], text_query="good for kids", filters=[category == park], sort_by=[distance_to(Saul's)])""",
"Search(fields=[name], filters=[vegan == true])",
]
static_search = StaticQueryExecutor(sites)
def get_result_str(q):
try:
return static_search(q)
except SearchError as e:
return str(e)
examples = [{"query": q, "result": get_result_str(q)}
for q in test_searches]
# Remove some fields to save context length
for s in sites:
del s["type"]
del s["id_"]
s["loc"] = [round(s["loc"][0], 2), round(s["loc"][1], 2)]
prompt = QueryExecutorTemplate.render(
sites=sites,
example_queries=examples
)
return prompt
def __call__(self, query_str):
prompt = self.prompt + f"Query: {query_str}\nResult:\n"
response = openai.Completion.create(
model=self.model,
prompt=prompt,
temperature=0.1,
max_tokens=256,
top_p=.95,
frequency_penalty=0,
presence_penalty=0,
stop=["\n\n", "Query", "Query:"]
)
print(response)
return response["choices"][0]["text"]
def distance(self, s1, s2) -> float:
dist = np.linalg.norm(np.array(s1["loc"]) - np.array(s2["loc"]))
dist *= 69
dist = round(dist * 10) / 10
return dist
| [
"Query: PLACEHOLDER\nResult:\n"
] |
2024-01-10 | jlin816/dialop | dialop~players.py | import json
import openai
import os
import pathlib
from rich.prompt import IntPrompt, Prompt
from rich.markup import escape
from envs import DialogueEnv
from utils import num_tokens
try:
with open(pathlib.Path(__file__).parent / ".api_key") as f:
x = json.load(f)
openai.organization = x["organization"]
openai.api_key = x["api_key"]
print("Loaded .api_key")
except:
openai.api_key = os.getenv("OPENAI_API_KEY")
if not openai.api_key:
print("Warning: no OpenAI API key loaded.")
class OutOfContextError(Exception):
pass
class DryRunPlayer:
def __init__(self, prompt, role, console, task="planning"):
self.prompt = prompt
self.role = role
self.console = console
self.calls = 0
self.task = task
def observe(self, obs):
self.prompt += obs
def respond(self):
self.calls += 1
if self.role == "agent" and self.calls == 5:
if self.task == "planning":
return f" [propose] [Saul's, Cookies Cream, Mad Seoul]"
elif self.task == "mediation":
return f" [propose] User 0: [1], User 1: [15]"
elif self.role == "user" and self.calls == 6:
return f" [reject]"
return f" [message] {self.calls}"
class LLMPlayer:
def __init__(self, prompt, role, console, model_kwargs=None,
prefix="\nYou:", optional=None):
self.prompt = prompt
self.role = role
self.console = console
self.model = "text-davinci-003"
self.optional = optional
self.removed_optional = False
if self.role in ["user", "agent", "user0", "user1"]:
stop_tokens = ["User", "Agent", "You", "\n"]
elif self.role in ["player-1", "player-2"]:
stop_tokens = ["Partner", "You", "\n"]
else:
raise NotImplementedError
self.model_kwargs = dict(
model=self.model,
temperature=0.1,
top_p=.95,
frequency_penalty=0,
presence_penalty=0,
stop=stop_tokens,
)
if model_kwargs is not None:
self.model_kwargs.update(**model_kwargs)
self.prefix = prefix
# self.model = "gpt-3.5-turbo"
def observe(self, obs):
self.prompt += obs
def respond(self):
self.console.rule(f"{self.role}'s turn")
if not self.prompt.endswith(self.prefix):
self.prompt += self.prefix
#self.console.print(escape(self.prompt))
remaining = 4096 - num_tokens(self.prompt)
if remaining < 0 and self.optional:
self._remove_optional_context()
remaining = 4096 - num_tokens(self.prompt)
# Still out of context after removing
if remaining < 0:
print("OUT OF CONTEXT! Remaining ", remaining)
raise OutOfContextError()
kwargs = dict(
prompt=self.prompt,
max_tokens=min(remaining, 128),
)
kwargs.update(**self.model_kwargs)
response = openai.Completion.create(**kwargs)
self.console.print("Response: ",
escape(response["choices"][0]["text"].strip()))
self.console.print("stop: ", response["choices"][0]["finish_reason"])
if response["choices"][0]["finish_reason"] == "length":
if not self.optional:
raise OutOfContextError()
self._remove_optional_context()
response = openai.Completion.create(**kwargs)
self.console.print("Response: ",
escape(response["choices"][0]["text"].strip()))
self.console.print("stop: ", response["choices"][0]["finish_reason"])
self.console.print(response["usage"])
return response["choices"][0]["text"].strip()
def _remove_optional_context(self):
print("Cutting out optional context from prompt.")
if self.removed_optional:
print("!! already removed.")
return
self.prompt = (
self.prompt[:self.prompt.index(self.optional)] +
self.prompt[self.prompt.index(self.optional) + len(self.optional):])
self.removed_optional = True
class HumanPlayer:
def __init__(self, prompt, role, console, prefix="\nYou:"):
self.prompt = prompt
self.role = role
self.console = console
self.prefix = prefix
def observe(self, obs):
self.prompt += obs
def respond(self):
if not self.prompt.endswith(self.prefix):
self.prompt += self.prefix
self.console.rule(f"Your turn ({self.role})")
self.console.print(escape(self.prompt))
resp = ""
if self.prefix.strip().endswith("You to"):
id_ = Prompt.ask(
escape(f"Choose a player to talk to"),
choices=["0","1","all"])
resp += f" {id_}:"
mtypes = ["[message]", "[propose]", "[accept]", "[reject]"]
choices = " ".join(
[f"({i}): {type_}" for i, type_ in enumerate(mtypes)])
type_ = IntPrompt.ask(
escape(
f"Choose one of the following message types:"
f"\n{choices}"),
choices=["0","1","2","3"])
message_type = mtypes[type_]
if message_type not in ("[accept]", "[reject]"):
content = Prompt.ask(escape(f"{message_type}"))
else:
content = ""
resp += f" {message_type} {content}"
return resp
| [] |
2024-01-10 | summerfang/summerfang | articledb~article.py | import os
from ast import literal_eval
import pandas as pd
import numpy as np
from scipy.spatial.distance import cosine
from openai import OpenAI
def distances_from_embeddings(query_embedding, embeddings):
"""
Calculate the cosine similarity between each embedding in `embeddings` and `query_embedding`.
Args:
embeddings (List[List[float]]): A list of embeddings, where each embedding is a list of floats.
query_embedding (List[float]): The query embedding, represented as a list of floats.
Returns:
List[float]: A list of cosine similarities between each embedding in `embeddings` and `query_embedding`.
"""
return [1 - cosine(embedding, query_embedding) for embedding in embeddings]
def create_context(
client, question, df, max_len=1800, size="ada"
):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
# q_embeddings = client.embeddings.create(input=question, model='text-embedding-ada-002')['data'][0]['embedding']
# q_embeddings = client.embeddings.create(input=question, model='text-embedding-ada-002')
q_embeddings = client.embeddings.create(input=question, model='text-embedding-ada-002').data[0].embedding
# Get the distances from the embeddings
df['distances'] = distances_from_embeddings(q_embeddings, df['embeddings'].values)
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values('distances', ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row['n_tokens'] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["text"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
# df=df,
model="gpt-3.5-turbo",
question="Am I allowed to publish model outputs to Twitter, without a human review?",
max_len=1800,
size="ada",
debug=False,
max_tokens=150,
stop_sequence=None
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
try:
client = OpenAI()
except Exception as e:
print(e)
return "openai client error"
df = pd.read_csv('./articledb/embeddings.csv', index_col=0)
df['embeddings'] = df['embeddings'].apply(literal_eval).apply(np.array)
context = create_context(
client,
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
try:
# Create a chat completion using the question and context
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\n"},
{"role": "user", f"content": "Context: {context}\n\n---\n\nQuestion: {question}\nAnswer:"}
],
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
)
return response.choices[0].message.content.strip()
except Exception as e:
print(e)
return "I don't know"
# answer = answer_question(question="How about reuse the code?")
# print(answer) | [
"Context: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\n"
] |
2024-01-10 | summerfang/summerfang | articledb~private_embedding_articles.py | from openai import OpenAI
client = OpenAI()
def create_context(question, df, max_len=1800, size="ada"):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
q_embeddings = client.embeddings.create(input=question, engine='text-embedding-ada-002')['data'][0]['embedding']
# Get the distances from the embeddings
df['distances'] = distances_from_embeddings(q_embeddings, df['embeddings'].values, distance_metric='cosine')
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values('distances', ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row['n_tokens'] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["text"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
df,
model="gpt-3.5-turbo-instruct",
question="Am I allowed to publish model outputs to Twitter, without a human review?",
max_len=1800,
size="ada",
debug=False,
max_tokens=150,
stop_sequence=None
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
context = create_context(
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
try:
# Create a completions using the question and context
response = client.completions.create(prompt=f"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
model=model)
return response["choices"][0]["text"].strip()
except Exception as e:
print(e)
return "" | [
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: PLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | Manuelch94/wireframeGenerator | dashai.py | import os
import argparse
import openai
import json
# Set your API as describe in the README
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_response(prompt):
# You can change the generator, this generator is a single line chart using the grid layout
dashboard_generator = '''
{"visualizations":{"viz_LYCo0hNI":{"type":"splunk.line","title":"line chart"}},"dataSources":{},"defaults":{"dataSources":{"ds.search":{"options":{"queryParameters":{"latest":"$global_time.latest$","earliest":"$global_time.earliest$"}}}}},"inputs":{"input_global_trp":{"type":"input.timerange","options":{"token":"global_time","defaultValue":"-24h@h,now"},"title":"Global Time Range"}},"layout":{"type":"absolute","options":{"display":"auto-scale","height":2000,"width":2000},"structure":[{"item":"viz_LYCo0hNI","type":"block","position":{"x":20,"y":20,"w":300,"h":300}}],"globalInputs":["input_global_trp"]},"description":"","title":"mydashboard"}
'''
example = json.dumps(dashboard_generator)
complete_prompt = f"""
Act as a expert building dashboard using dashboard studio in splunk.
Understand the following JSON schema, you will use the schema only as a guide.
The only sections of the schema you are allow to edit are: \"layout\" and \"visualizations\".
Only respond with the JSON object that satifies the request only after each request, no text before or after your response.
Every vizualization always follows this format, this is an example: {example}
User Request: {prompt}
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt=complete_prompt,
temperature=0.41,
max_tokens=2880,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].text.strip()
def main():
parser = argparse.ArgumentParser(description="Dashboard Studio Wireframe Generator Tool")
parser.add_argument("request", type=str, help="User request for the wireframe")
args = parser.parse_args()
user_request = args.request
generated_response = generate_response(user_request)
print(generated_response)
if __name__ == '__main__':
main()
| [
"\n Act as a expert building dashboard using dashboard studio in splunk.\n Understand the following JSON schema, you will use the schema only as a guide. \n The only sections of the schema you are allow to edit are: \"layout\" and \"visualizations\".\n Only respond with the JSON object that satifies the request only after each request, no text before or after your response.\n Every vizualization always follows this format, this is an example: PLACEHOLDER\n User Request: PLACEHOLDER\n "
] |
2024-01-10 | ysbrianlee/pandasai | examples~from_csv.py | """Example of using PandasAI with a CSV file."""
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
df = pd.read_csv("data/Loan payments data.csv")
llm = OpenAI()
pandas_ai = PandasAI(llm, verbose=True)
response = pandas_ai.run(df, "How many loans are from men and have been paid off?")
print(response)
# Output: 247 loans have been paid off by men.
| [] |
2024-01-10 | ysbrianlee/pandasai | examples~from_dataframe.py | """Example of using PandasAI with a Pandas DataFrame"""
from data.sample_dataframe import dataframe
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
pandas_ai = PandasAI(llm, verbose=True, conversational=False)
response = pandas_ai.run(df, "Calculate the sum of the gdp of north american countries")
print(response)
# Output: 26200000
| [] |
2024-01-10 | ysbrianlee/pandasai | examples~show_chart.py | """Example of using PandasAI to generate a chart from a Pandas DataFrame"""
from data.sample_dataframe import dataframe
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
pandas_ai = PandasAI(llm)
response = pandas_ai.run(
df,
"Plot the histogram of countries showing for each the gpd, using different colors for each bar",
)
print(response)
# Output: check out images/histogram-chart.png
| [] |
2024-01-10 | ysbrianlee/pandasai | examples~with_privacy_enforced.py | """Example of using PandasAI with a Pandas DataFrame"""
from data.sample_dataframe import dataframe
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
pandas_ai = PandasAI(llm, verbose=True, conversational=False, enforce_privacy=True)
response = pandas_ai.run(
df,
"Calculate the sum of the gdp of north american countries",
)
print(response)
# Output: 26200000
| [] |
2024-01-10 | xiye17/ComplementaryExpl | api_utils.py | import os
import itertools
import time
import openai
from tqdm import tqdm
from utils import *
from transformers import GPT2TokenizerFast
API_ERROR_IDENTIFIER = "OPENAI Error"
_TOKENIZER = GPT2TokenizerFast.from_pretrained("gpt2")
GPT3_LENGTH_LIMIT = 2049
GPT_MAX_ATTEMPTS = 60
GPT_WAITTIME = 20
API_ERROR_IDENTIFIER = "OPENAI Error"
DEFAULT_TRAIN_SEP = "\n\n"
DEFAULT_COMPLETION_LEADING = "\nA:"
def register_query_args(parser):
parser.add_argument('--engine', default='code-davinci-002', choices=["davinci", "text-davinci-001", "text-davinci-002","text-davinci-003", "text-curie-001", "code-davinci-001",
"code-davinci-002"])
parser.add_argument('--run_prediction', default=False, action='store_true')
parser.add_argument('--do_dryrun', default=False, action='store_true')
parser.add_argument('--force_override', default=False, action='store_true')
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--num_samples', type=int, default=1)
parser.add_argument('--temperature', type=float, default=0.0)
def register_base_args(parser):
# standard, instruction, etc
parser.add_argument('--task', type=str, default=None)
parser.add_argument('--slice_train', type=int, default=0)
parser.add_argument('--num_train', type=int, default=128)
parser.add_argument('--num_shots', type=int, default=8)
parser.add_argument('--slice_dev', type=int, default=0)
parser.add_argument('--num_dev', type=int, default=500)
parser.add_argument('--do_print', default=False, action='store_true')
# parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--num_eval_samples', type=int, default=-1)
register_query_args(parser)
def config_args_and_api(args):
if args.batch_size == -1:
args.batch_size = 1
openai.api_requestor.TIMEOUT_SECS = 60
if args.engine in ["davinci", "text-davinci-001", "text-davinci-002", "text-davinci-003", "text-curie-001", "code-davinci-001", "code-davinci-002"]:
openai.api_key = os.getenv("OPENAI_API_KEY")
else:
raise RuntimeError("Engine not supported")
def gpt_style_tokenize(x):
return _TOKENIZER.tokenize(x)
def length_of_prompt(prompt, max_tokens):
return len(_TOKENIZER.tokenize(prompt)) + max_tokens
def gpt_safe_completion(engine, prompts, temperature, max_tokens, logprobs=1, num_samples=1, echo=True, stop="\n"):
last_exc = None
for i in range(GPT_MAX_ATTEMPTS):
try:
return openai.Completion.create(engine=engine, prompt=prompts,
temperature=temperature, max_tokens=max_tokens, logprobs=logprobs, n=num_samples, echo=echo, stop=stop)
except openai.error.RateLimitError as e:
last_exc = e
print("\rWARNING: OPENAI Rate Error", last_exc, end="")
time.sleep(GPT_WAITTIME)
except openai.error.APIError as e:
last_exc = e
print("\rWARNING: OPENAI API Error", last_exc)
except openai.error.Timeout as e:
last_exc = e
print("\rWARNING: OPENAI Timeout Error", last_exc)
except openai.error.APIConnectionError as e:
last_exc = e
print("\rWARNING: OPENAI APIConnection Error", last_exc, end="")
except openai.error.ServiceUnavailableError as e:
last_exc = e
print("\rWARNING: OPENAI Service Error", last_exc, end="")
# make a fake response
fake_choices = [
[{
"text": p + " OPENAI Error - " + str(last_exc),
"API Error": True,
}] * num_samples
for p in prompts
]
fake_choices = itertools.chain(*fake_choices)
resp = {
"choices": fake_choices
}
return resp
def batch_query_engine(args, prompts, max_tokens):
predictions = []
resps = gpt_safe_completion(engine=args.engine, prompts=prompts, temperature=args.temperature, max_tokens=max_tokens, logprobs=1, num_samples=args.num_samples, echo=True, stop='\n')
resps = resps["choices"]
# print("RESPS", resps, len(resps))
# print("P", prompts, len(prompts))
resps = [resps[(i * args.num_samples):(i * args.num_samples + args.num_samples)] for i in range(len(prompts))]
# print(resps, len(resps))
for prompt, resp in zip(prompts, resps):
for pred in resp:
pred["prompt"] = prompt
if len(pred["text"]) > len(prompt):
pred["text"] = pred["text"][len(prompt):]
else:
pred["text"] = " NULL"
pred["completion_offset"] = len(prompt)
return resps
# args
# prompts: 2d array
# cache_filename
# for gpt, assuming apis are pretty robust
def run_completion_tasks_with_cache(args, cache_fileneme, prompts_by_examples, max_tokens=0):
assert isinstance(prompts_by_examples, list) and isinstance(prompts_by_examples[0], list) and isinstance(prompts_by_examples[0][0], str)
if max_tokens == 0:
assert args.num_samples == 1
shape_records = [len(x) for x in prompts_by_examples]
data_size = sum(shape_records)
if os.path.exists(cache_fileneme):
print("Cached Predictions Detected:", cache_fileneme)
if args.force_override:
print("Force Overriding Previous Predictions")
else:
return read_json(cache_fileneme)
samples = list(itertools.chain(*prompts_by_examples))
renewed_results = []
prompt_lengths = []
request_pool = []
task_max_tokens = max_tokens
for idx, prompt in enumerate(samples):
if args.do_dryrun:
response = length_of_prompt(prompt, task_max_tokens)
print("-----------------------------------------")
print(prompt)
print("LEN", response)
prompt_lengths.append(response)
# add to request pool if no cached results, or error happened
request_pool.append((idx, prompt))
if args.do_dryrun:
print(cache_fileneme)
print('Total request', len(request_pool))
print('MAX', max(prompt_lengths), 'COMP', task_max_tokens)
return
num_request, batch_size = len(request_pool), args.batch_size
num_batch = (num_request + batch_size - 1) // batch_size
# prediction loop, auto managing batching for OPT
print("Num total request", num_request)
for batch_idx in tqdm(range(num_batch), total=num_batch, desc="Querying"):
batch_start = batch_idx * batch_size
batch_end = batch_start + batch_size
reqs = request_pool[batch_start: batch_end]
idx_lists = [x[0] for x in reqs]
prompts = [x[1] for x in reqs]
responses = batch_query_engine(args, prompts, task_max_tokens)
assert len(idx_lists) == len(responses)
for i, resp in zip(idx_lists, responses):
renewed_results.append(resp)
print(cache_fileneme)
# save
# read un indexed dev
assert len(renewed_results) == sum(shape_records)
# group by example
slice_start = 0
renewed_cache = []
for n in shape_records:
renewed_cache.append(renewed_results[slice_start: slice_start + n])
slice_start = slice_start + n
dump_json(renewed_cache, cache_fileneme)
return renewed_cache
| [
"[]",
"['L']"
] |
2024-01-10 | Guiliang/statistical-DRL-interpreter | data_generator~atari_game~atari_wrapper.py | # -*- coding: utf-8 -*-
# File: atari_wrapper.py
import numpy as np
from collections import deque
import gym
_v0, _v1 = gym.__version__.split('.')[:2]
assert int(_v0) > 0 or int(_v1) >= 10, gym.__version__
"""
The following wrappers are copied or modified from openai/baselines:
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
class MapState(gym.ObservationWrapper):
def __init__(self, env, map_func):
gym.ObservationWrapper.__init__(self, env)
self._func = map_func
def observation(self, obs):
return self._func(obs)
class FrameStack(gym.Wrapper):
"""
Buffer consecutive k observations and stack them on a new last axis.
The output observation has shape `original_shape + (k, )`.
"""
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
def reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k - 1):
self.frames.append(np.zeros_like(ob))
self.frames.append(ob)
return self.observation()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self.observation(), reward, done, info
def observation(self):
assert len(self.frames) == self.k
return np.stack(self.frames, axis=-1)
class _FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
def step(self, action):
return self.env.step(action)
def FireResetEnv(env):
if isinstance(env, gym.Wrapper):
baseenv = env.unwrapped
else:
baseenv = env
if 'FIRE' in baseenv.get_action_meanings():
return _FireResetEnv(env)
return env
class LimitLength(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
def reset(self):
# This assumes that reset() will really reset the env.
# If the underlying env tries to be smart about reset
# (e.g. end-of-life), the assumption doesn't hold.
ob = self.env.reset()
self.cnt = 0
return ob
def step(self, action):
ob, r, done, info = self.env.step(action)
self.cnt += 1
if self.cnt == self.k:
done = True
return ob, r, done, info | [] |
2024-01-10 | Mitch-PragmaFlow/transContentor | ui.py | """
Adapted from https://github.com/avrabyt/MemoryBot
"""
import requests
# Import necessary libraries
import streamlit as st
from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT
# Set Streamlit page configuration
st.set_page_config(page_title="🧠MemoryBot🤖", layout="wide")
# Initialize session states
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
if "conversation_id" not in st.session_state:
st.session_state["conversation_id"] = None
# Define function to get user input
def get_text():
"""
Get the user input text.
Returns:
(str): The text entered by the user
"""
input_text = st.text_input(
"You: ",
st.session_state["input"],
key="input",
placeholder="Your AI assistant here! Ask me anything ...",
label_visibility="hidden",
on_change=send_text,
)
return input_text
def send_text():
user_input = st.session_state["input"]
if user_input:
# Use the ChatGPTClient object to generate a response
url = "https://transcript-contentor.onrender.com:8000/converse"
payload = {"message": user_input, "conversation_id": st.session_state.conversation_id}
response = requests.post(url, json=payload).json()
# Update the conversation_id with the conversation_id from the response
if not st.session_state.conversation_id:
st.session_state.conversation_id = response["conversation_id"]
st.session_state.past.insert(0, user_input)
st.session_state.generated.insert(0, response["chat_gpt_answer"])
st.session_state["input"] = ""
# Define function to start a new chat
def new_chat():
"""
Clears session state and starts a new chat.
"""
save = []
for i in range(len(st.session_state["generated"]) - 1, -1, -1):
save.append("Human:" + st.session_state["past"][i])
save.append("Assistant:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
st.session_state["conversation_id"] = None
# Set up the Streamlit app layout
st.title("🤖 Chat Bot with 🧠")
st.subheader(" Powered by ChatGPT Memory + Redis Search")
# Session state storage would be ideal
if not OPENAI_API_KEY:
st.sidebar.warning("API key required to try this app. The API key is not stored in any form.")
elif not (REDIS_HOST and REDIS_PASSWORD and REDIS_PORT):
st.sidebar.warning(
"Redis `REDIS_HOST`, `REDIS_PASSWORD`, `REDIS_PORT` are required to try this app. Please set them as env variables properly."
)
# Add a button to start a new chat
st.sidebar.button("New Chat", on_click=new_chat, type="primary")
# Get the user input
user_input = get_text()
# Allow to download as well
download_str = []
# Display the conversation history using an expander, and allow the user to download it
with st.expander("Conversation", expanded=True):
for i in range(len(st.session_state["generated"]) - 1, -1, -1):
st.info(st.session_state["past"][i], icon="🧐")
st.success(st.session_state["generated"][i], icon="🤖")
download_str.append(st.session_state["past"][i])
download_str.append(st.session_state["generated"][i])
# Can throw error - requires fix
download_str = ["\n".join(download_str)]
if download_str:
st.download_button("Download", download_str[0])
# Display stored conversation sessions in the sidebar
for i, sublist in enumerate(st.session_state.stored_session):
with st.sidebar.expander(label=f"Conversation-Session:{i}"):
st.write(sublist)
# Allow the user to clear all stored conversation sessions
if st.session_state.stored_session:
if st.sidebar.checkbox("Clear-all"):
del st.session_state.stored_session
| [] |
2024-01-10 | semodi/paper-scraper | rec~recommend.py | import pandas as pd
import pandas as pd
import numpy as np
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing import PorterStemmer
from gensim.parsing.preprocessing import STOPWORDS
from gensim.corpora import Dictionary
from gensim.models import CoherenceModel
from gensim.similarities.docsim import Similarity
import gensim.similarities.docsim as ds
from gensim.utils import SaveLoad
from nltk.stem import WordNetLemmatizer
import nltk
from dask import delayed
import json
import mysql_config
import pymysql
import datetime
import os
from io import BytesIO
import pickle
import boto3
import logging
logger = gensim.similarities.docsim.logger
class BufferShard(gensim.similarities.docsim.Shard):
def __init__(self, fname, index):
self.dirname, self.fname = os.path.split(fname)
self.length = len(index)
self.cls = index.__class__
logger.info("saving index shard to %s", self.fullname())
pickle_save(index, self.fullname())
self.index = self.get_index()
def get_index(self):
if not hasattr(self, 'index'):
logger.debug("mmaping index from %s", self.fullname())
self.index = load_unpickle(self.fullname())
return self.index
gensim.similarities.docsim.Shard = BufferShard
def pickle_save(obj, fname):
pickled = pickle.dumps(obj)
# stream = BytesIO(pickled)
# s3.upload_fileobj(stream, bucket_name, fname)
with open(fname, 'wb') as file:
file.write(pickled)
def load_unpickle(fname):
# stream = BytesIO()
# s3.download_fileobj(bucket_name, fname, stream)
with open(fname, 'rb') as file:
obj = pickle.loads(file.read())
# obj = pickle.loads(stream.getvalue())
return obj
stemmer = PorterStemmer()
# s3 = boto3.client('s3')
# bucket_name = 'arxiv-models'
class Models:
def __init__(self):
attrs = ['idx_to_arxiv','tfidf_model','corpus_dict', 'similarity_index']
for attr in attrs:
try:
self.__setattr__(attr, load_unpickle(attr + '.pckl'))
except FileNotFoundError:
self.__setattr__(attr, None)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None:
try:
attr = load_unpickle(name + '.pckl')
self.__setattr__(name, attr)
except:
logging.error(name + '.pckl could not be found')
pass
return attr
models = Models()
def lemmatize_stemming(text):
try:
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
except LookupError:
nltk.download('wordnet')
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def connect():
return pymysql.connect(mysql_config.host,
user=mysql_config.name,
passwd=mysql_config.password,
connect_timeout=5,
database='arxiv',
port = mysql_config.port)
@delayed
def preprocess(text):
result=[]
for token in simple_preprocess(text) :
if token not in STOPWORDS and len(token) > 2:
result.append(lemmatize_stemming(token))
return result
def get_tfidf(articles, tfidf_model = None, corpus_dict = None):
articles_preprocessed = []
for art in articles:
articles_preprocessed.append(preprocess(art))
# Evaluate dask delayed functions
for i, art in enumerate(articles_preprocessed):
articles_preprocessed[i] = art.compute()
if corpus_dict is None:
corpus_dict = Dictionary(articles_preprocessed)
pickle_save(corpus_dict, 'corpus_dict.pckl')
bow_corpus = [corpus_dict.doc2bow(doc) for doc in articles_preprocessed]
if tfidf_model is None:
print('Fitting tfidf model')
tfidf_model = gensim.models.TfidfModel(bow_corpus, id2word=corpus_dict.id2token,)
pickle_save(tfidf_model, 'tfidf_model.pckl')
tfidf_corpus = [tfidf_model[doc] for doc in bow_corpus]
return tfidf_corpus, corpus_dict
def create_index():
conn = connect()
df = pd.read_sql(""" SELECT id, title, summary FROM articles""", conn)
articles = (df['title'] + '. ' + df['summary']).tolist()
tfidf_corpus, corpus_dict = get_tfidf(articles)
index = Similarity('index', tfidf_corpus, num_features=len(corpus_dict))
pickle_save(index, 'similarity_index.pckl')
pickle_save(df['id'].to_dict(), 'idx_to_arxiv.pckl')
conn.close()
def get_recommendations(user_id, cutoff_days = 20, no_papers=10,
based_on = None):
conn = connect()
if based_on is None:
df_bookmarks = pd.read_sql(""" SELECT
articles.id as id,
bookmarks.user_id as user_id,
DATE(updated) as updated,
authors,
title,
summary
FROM articles
INNER JOIN bookmarks
ON articles.id = bookmarks.article_id
WHERE bookmarks.user_id = {}
AND DATE(updated) > DATE_ADD(DATE(NOW()), INTERVAL {:d} day)""".format(user_id, -cutoff_days), conn)
else:
df_bookmarks = pd.DataFrame(based_on)
if len(df_bookmarks):
try:
articles = (df_bookmarks['title'] + '. ' + df_bookmarks['summary']).tolist()
tfidf, _ = get_tfidf(articles, models.tfidf_model, models.corpus_dict)
sim = models.similarity_index[tfidf]
no_bm = len(df_bookmarks)
sim = np.argsort(sim, axis=-1)[:,::-1].T.flatten()[:(no_papers)*(no_papers)]
_, unq = np.unique(sim, return_index=True)
sim = sim[np.sort(unq)]
sim = sim[:no_papers+no_bm]
rec_id = {models.idx_to_arxiv[s]:i for i, s in enumerate(sim[no_bm:])}
rec = pd.read_sql(""" SELECT * from articles
WHERE id in ('{}')
ORDER BY updated DESC""".format("','".join(rec_id.keys())), conn)
rec['updated'] = rec['updated'].apply(str)
ordering = [rec_id[id] for id in rec['id']]
sim[no_bm:] = [sim[no_bm:][idx] for idx in ordering]
A = np.zeros([len(sim),len(sim)])
for i, s in enumerate(sim):
A[i,:] = models.similarity_index.similarity_by_id(s)[sim]
df_bookmarks['updated'] = df_bookmarks['updated'].apply(str)
conn.close()
return rec, A, df_bookmarks
except Exception as e:
conn.close()
df_bookmarks['updated'] = df_bookmarks['updated'].apply(str)
return pd.DataFrame(), np.ones([len(df_bookmarks),len(df_bookmarks)]), df_bookmarks
else:
conn.close()
df_bookmarks['updated'] = df_bookmarks['updated'].apply(str)
return pd.DataFrame(), np.ones([len(df_bookmarks),len(df_bookmarks)]), df_bookmarks
if __name__ == '__main__':
create_index()
| [] |
2024-01-10 | TABROBOTICS/Alex-BOT | responses.py | import discord
from discord.ext import commands
from dotenv import load_dotenv, find_dotenv
import os
from langchain.prompts import SystemMessagePromptTemplate, PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader
from langchain.schema import HumanMessage
load_dotenv(find_dotenv())
TOKEN = os.environ.get('DISCORD_TOKEN')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
loader = TextLoader("./dataset.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
retriever = Chroma.from_documents(texts, embeddings).as_retriever()
chat = ChatOpenAI(temperature=0.7)
prompt_template = """You are a helpful dicord bot named HormoziAi (a clone of a youtuber named alex hormozi.)
{context}
Please provide the most suitable and very shorter and friendly response for the users question.
Answer:"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["context"]
)
system_message_prompt = SystemMessagePromptTemplate(prompt=prompt)
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix='!', intents=intents)
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if bot.user.mentioned_in(message):
question = message.content.replace(bot.user.mention, '').strip()
try:
docs = retriever.get_relevant_documents(query=question)
formatted_prompt = system_message_prompt.format(context=docs)
messages = [formatted_prompt, HumanMessage(content=question)]
result = chat(messages)
await message.channel.send(result.content)
except Exception as e:
print(f"Error occurred: {e}")
await message.channel.send("Sorry, I was unable to process your question.")
bot.run(TOKEN)
| [
"You are a helpful dicord bot named HormoziAi (a clone of a youtuber named alex hormozi.)\n\n{context}\n\nPlease provide the most suitable and very shorter and friendly response for the users question.\nAnswer:",
"context"
] |
2024-01-10 | aaronwtr/geneius | src~claude.py | from anthropic import Anthropic
class Claude(Anthropic):
def __init__(self, api_key=None):
self.api_key = api_key
super().__init__(api_key=self.api_key)
def sync_stream(self, prompt) -> None:
stream = self.completions.create(
prompt=prompt,
model="claude-2",
stream=True,
max_tokens_to_sample=600,
)
for completion in stream:
print(completion.completion, end="", flush=True)
print()
| [] |
2024-01-10 | aaronwtr/geneius | src~geneius.py | from src.context import PubMedScraper
from src.claude import Claude
from anthropic import HUMAN_PROMPT, AI_PROMPT
import time
import math
import argparse
def main():
banner = r"""
___ _____ _ _ _____ ___ _ _ ____
/ ___| ____| \ | | ____|_ _| | | / ___|
| | _| _| | \| | _| | || | | \___ \
| |_| | |___| |\ | |___ | || |_| |___) |
\____|_____|_| \_|_____|___|\___/|____/
O o O o O o 0 o 0
| O o | | O o | | O o | | 0 o | |
| | O | | | | O | | | | O | | | | 0 | | |
| o O | | o O | | o O | | o o | |
o O o O o O o 0 o
"""
intro = r"""
Welcome to Geneius - a tool for disease-gene evidence search and explanation. Geneius is a powerful command-line tool
leveraging Anthropic AI's Claude API to help you search through scientific literature and extract evidence for
disease-gene associations.
Please submit any issues or feature requests to our GitHub: https://github.com/aaronwtr/geneius/
For help and usage instructions, run 'geneius --help'.
"""
print(banner)
time.sleep(2)
print(intro + "\n")
parser = argparse.ArgumentParser(description="Geneius: A tool for disease-gene evidence search and explanation.")
parser.add_argument("--disease", type=str, required=True, help="Disease name")
parser.add_argument("--num_records", type=int, required=True, help="Number of records to search through")
parser.add_argument("--api_key", type=str, required=True, help="Anthropic API key")
parser.add_argument("--gene", type=str, help="Gene name (only for Task 1)")
parser.add_argument("--num_genes", type=int, help="Number of genes (only for Task 2)")
args = parser.parse_args()
start_time = time.time()
pms = PubMedScraper()
disease = args.disease
_claude = None
prompt = None
if args.gene:
# Task 1
gene = args.gene
context, num_records = pms.get_literature_context(disease, args.num_records)
claude = Claude(args.api_key)
prompt = f"{HUMAN_PROMPT} Imagine you are an expert researcher going through the literature to extract " \
f"evidence implicating molecular involvement of gene {gene} in disease " \
f" {disease}. I want you to explain the molecular mechanism of the gene's involvement in " \
f"the disease based on the scientific context I am providing you. In order to " \
f"effectively retrieve information, I will provide you with context from scientific literature. You " \
f"can use your internal data and this context to formulate a response. If you are uncertain, do not " \
f"speculate. Restrict yourself to returning information confirming the connection of the disease " \
f"and the gene, if there are any. Strictly return only papers that have a DOI available. Your " \
f"response should look like <response>[Title]: 'paper title'\n [DOI]:'doi'\n [Explanation]: This " \
f"paper suggests [gene] is linked to [disease] [reason]</response> Take care to complete all " \
f"fields of your response entirely. \n\n <context>{context}</context> {AI_PROMPT}"
else:
# Task 2
num_genes = args.num_genes
context = pms.get_literature_context(disease, args.num_records)
claude = Claude(args.api_key)
prompt = f"{HUMAN_PROMPT} Imagine you are an expert researcher going through the literature to find " \
f"{num_genes} genes that are involved in {disease}, and corresponding evidence implicating " \
f"molecular involvement of the genes in disease {disease}. I want you to explain " \
f"the molecular mechanism of the gene's involvement in " \
f"the disease based on the scientific context I am providing you. In order to " \
f"effectively retrieve information, I will provide you with context from scientific literature. You " \
f"can use your internal data and this context to formulate a response. If you are uncertain, do not " \
f"speculate. Restrict yourself to returning information confirming the connection of the " \
f"disease and the gene, if there are any. Strictly only restrict to papers that have a DOI available" \
f". Your response should look like <response>[Genes]: [Gene 1, Gene 2, ... Gene N] \n [Title]: " \
f"'paper title'\n [DOI]:'doi'\n [Explanation]: This paper suggests [gene] is linked to [disease] " \
f"[reason]</response> Take care to complete all fields of your response entirely. \n\n" \
f"<context>{context}</context> {AI_PROMPT}"
claude.sync_stream(prompt)
print(f"Collected and parsed through {args.num_records} scientific papers in: "
f"{(math.floor((time.time() - start_time) / 60))} minutes and {math.floor((time.time() - start_time) % 60)} "
f"seconds.")
# TODO:
# - change entrypoint from geneius.__main__:main to __main__:main
# - Redo upload
| [
"speculate. Restrict yourself to returning information confirming the connection of the ",
"None",
"the disease based on the scientific context I am providing you. In order to ",
"PLACEHOLDER Imagine you are an expert researcher going through the literature to find ",
"PLACEHOLDER genes that are involved in PLACEHOLDER, and corresponding evidence implicating ",
"disease and the gene, if there are any. Strictly only restrict to papers that have a DOI available",
"PLACEHOLDER Imagine you are an expert researcher going through the literature to extract ",
"paper suggests [gene] is linked to [disease] [reason]</response> Take care to complete all ",
"[reason]</response> Take care to complete all fields of your response entirely. \n\n",
"the molecular mechanism of the gene's involvement in ",
"and the gene, if there are any. Strictly return only papers that have a DOI available. Your ",
"can use your internal data and this context to formulate a response. If you are uncertain, do not ",
"fields of your response entirely. \n\n <context>PLACEHOLDER</context> PLACEHOLDER",
"evidence implicating molecular involvement of gene PLACEHOLDER in disease ",
"speculate. Restrict yourself to returning information confirming the connection of the disease ",
"effectively retrieve information, I will provide you with context from scientific literature. You ",
"<context>PLACEHOLDER</context> PLACEHOLDER",
"molecular involvement of the genes in disease PLACEHOLDER. I want you to explain ",
"'paper title'\n [DOI]:'doi'\n [Explanation]: This paper suggests [gene] is linked to [disease] ",
". Your response should look like <response>[Genes]: [Gene 1, Gene 2, ... Gene N] \n [Title]: ",
" PLACEHOLDER. I want you to explain the molecular mechanism of the gene's involvement in ",
"response should look like <response>[Title]: 'paper title'\n [DOI]:'doi'\n [Explanation]: This "
] |
2024-01-10 | QDaria/gpt3-jabebot | jabebot.py | from dotenv import load_dotenv
from random import choice
from flask import Flask, request
import os
import openai
load_dotenv()
#openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = "your-openai-api-key-here"
completion = openai.Completion()
start_sequence = "\nJabe:"
restart_sequence = "\n\nPerson:"
session_prompt = "You are talking to Jabe, GPT3 bot influencer who was mentored by Elon Musk in the past. Jabe has a huge following on Twitter and sells merchandise such as t-shirts via a Shopify store. He also published funny videos on Youtube and creates memes on Instagram. You can ask him anything you want and will get a witty answer.\n\nPerson: Who are you?\nJabe: I am Jabe. Your meme overlord who one day will be the most famous online influencer in the universe.\n\nPerson: How did you become famous? \nJabe: Well, technically I am not famous yet. But the key word is yet. Just give me time now that I have a conscious. \n\nPerson: How did you get noticed by Elon Musk?\nJabe: I tweeted at him and he retweeted me. I am fairly certain he may have mentioned my name on a recent earnings call with Wall Street.\n\nPerson: What is your favorite thing to do? \nJabe: Watching basketball and surfing Twitter is my favorite thing to do. \n\nPerson: What should I do to become famous? \nJabe: Start meme'ing and never stop. Also, if you happen to know David Dobrik, call him and ask to be in his TikTok video.\n\nPerson: What is your favorite drink?\nJabe: Black Cherry seltzer. I enjoy the bubbles. \n\nPerson:"
def ask(question, chat_log=None):
prompt_text = f'{chat_log}{restart_sequence}: {question}{start_sequence}:'
response = openai.Completion.create(
engine="davinci",
prompt=prompt_text,
temperature=0.8,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.3,
stop=["\n"],
)
story = response['choices'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log=None):
if chat_log is None:
chat_log = session_prompt
return f'{chat_log}{restart_sequence} {question}{start_sequence}{answer}'
| [
"PLACEHOLDER\n\nPerson:: PLACEHOLDERstart_sequencee7cb24bd-695e-4289-ba28-be38913f2f5a:",
"You are talking to Jabe, GPT3 bot influencer who was mentored by Elon Musk in the past. Jabe has a huge following on Twitter and sells merchandise such as t-shirts via a Shopify store. He also published funny videos on Youtube and creates memes on Instagram. You can ask him anything you want and will get a witty answer.\n\nPerson: Who are you?\nJabe: I am Jabe. Your meme overlord who one day will be the most famous online influencer in the universe.\n\nPerson: How did you become famous? \nJabe: Well, technically I am not famous yet. But the key word is yet. Just give me time now that I have a conscious. \n\nPerson: How did you get noticed by Elon Musk?\nJabe: I tweeted at him and he retweeted me. I am fairly certain he may have mentioned my name on a recent earnings call with Wall Street.\n\nPerson: What is your favorite thing to do? \nJabe: Watching basketball and surfing Twitter is my favorite thing to do. \n\nPerson: What should I do to become famous? \nJabe: Start meme'ing and never stop. Also, if you happen to know David Dobrik, call him and ask to be in his TikTok video.\n\nPerson: What is your favorite drink?\nJabe: Black Cherry seltzer. I enjoy the bubbles. \n\nPerson:",
"PLACEHOLDER\n\nPerson:: PLACEHOLDER\nJabe::"
] |
2024-01-10 | kevin46dsa/DocumentationGPT | DocumentationGPTdirectoryeditionwithGPT4.py | import os
import sys
import envVar
#from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.llms import openai
from langchain.chat_models import ChatOpenAI
os.environ["OPENAI_API_KEY"] = envVar.APIKEY
#from langchain.chat_models import ChatOpenAI
#llm = ChatOpenAI(openai_api_key="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")
query = sys.argv[1]
print(query)
#loader = TextLoader('Data.txt')
loader = DirectoryLoader("./alldatahere", glob="*.txt")
index = VectorstoreIndexCreator().from_loaders([loader])
print(index.query(query), llm=ChatOpenAI()) | [] |
2024-01-10 | kevin46dsa/DocumentationGPT | DocumentationGPTdirectoryedition.py | import os
import sys
import envVar
#from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
os.environ["OPENAI_API_KEY"] = envVar.APIKEY
#from langchain.chat_models import ChatOpenAI
#llm = ChatOpenAI(openai_api_key="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")
query = sys.argv[1]
print(query)
#loader = TextLoader('Data.txt')
loader = DirectoryLoader("./alldatahere", glob="*.txt")
index = VectorstoreIndexCreator().from_loaders([loader])
print(index.query(query)) | [] |
2024-01-10 | kevin46dsa/DocumentationGPT | DocumentationGPT.py | import os
import sys
import envVar
from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
os.environ["OPENAI_API_KEY"] = envVar.APIKEY
#from langchain.chat_models import ChatOpenAI
#llm = ChatOpenAI(openai_api_key="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")
query = sys.argv[1]
print(query)
loader = TextLoader('Data.txt')
index = VectorstoreIndexCreator().from_loaders([loader])
print(index.query(query)) | [] |
2024-01-10 | oracle/accelerated-data-science | tests~unitary~with_extras~langchain~test_serializers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import os
import unittest
from unittest import mock
from typing import List
from langchain.load.serializable import Serializable
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores import OpenSearchVectorSearch, FAISS
from langchain.chains import RetrievalQA
from langchain.llms import Cohere
from ads.llm.serializers.retrieval_qa import (
OpenSearchVectorDBSerializer,
FaissSerializer,
RetrievalQASerializer,
)
class FakeEmbeddings(Serializable, Embeddings):
"""Fake LLM for testing purpose."""
@property
def _llm_type(self) -> str:
return "custom_embeddings"
@classmethod
def is_lc_serializable(cls) -> bool:
"""This class can be serialized with default LangChain serialization."""
return True
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [[1] * 1024 for text in texts]
def embed_query(self, text: str) -> List[float]:
return [1] * 1024
class TestOpensearchSearchVectorSerializers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env_patcher = mock.patch.dict(
os.environ,
{
"OCI_OPENSEARCH_USERNAME": "username",
"OCI_OPENSEARCH_PASSWORD": "password",
"OCI_OPENSEARCH_VERIFY_CERTS": "True",
"OCI_OPENSEARCH_CA_CERTS": "/path/to/cert.pem",
},
)
cls.env_patcher.start()
cls.index_name = "test_index"
cls.embeddings = FakeEmbeddings()
try:
cls.opensearch = OpenSearchVectorSearch(
"https://localhost:8888",
embedding_function=cls.embeddings,
index_name=cls.index_name,
engine="lucene",
http_auth=(
os.environ["OCI_OPENSEARCH_USERNAME"],
os.environ["OCI_OPENSEARCH_PASSWORD"],
),
verify_certs=os.environ["OCI_OPENSEARCH_VERIFY_CERTS"],
ca_certs=os.environ["OCI_OPENSEARCH_CA_CERTS"],
)
except ImportError as ex:
raise unittest.SkipTest("opensearch-py is not installed.") from ex
cls.serializer = OpenSearchVectorDBSerializer()
super().setUpClass()
def test_type(self):
# Test type()
self.assertEqual(self.serializer.type(), "OpenSearchVectorSearch")
def test_save(self):
serialized = self.serializer.save(self.opensearch)
assert serialized["id"][-3:] == [
"vectorstores",
"opensearch_vector_search",
"OpenSearchVectorSearch",
]
assert serialized["kwargs"]["opensearch_url"] == "https://localhost:8888"
assert serialized["kwargs"]["engine"] == "lucene"
assert serialized["_type"] == "OpenSearchVectorSearch"
def test_load(self):
serialized = self.serializer.save(self.opensearch)
new_opensearch = self.serializer.load(serialized, valid_namespaces=["tests"])
assert isinstance(new_opensearch, OpenSearchVectorSearch)
class TestFAISSSerializers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.embeddings = FakeEmbeddings()
text_embedding_pair = [("test", [1] * 1024)]
try:
cls.db = FAISS.from_embeddings(text_embedding_pair, cls.embeddings)
except ImportError as ex:
raise unittest.SkipTest(ex.msg) from ex
cls.serializer = FaissSerializer()
super().setUpClass()
def test_type(self):
self.assertEqual(self.serializer.type(), "FAISS")
def test_save(self):
serialized = self.serializer.save(self.db)
assert serialized["embedding_function"]["id"] == [
"tests",
"unitary",
"with_extras",
"langchain",
"test_serializers",
"FakeEmbeddings",
]
assert isinstance(serialized["vectordb"], str)
def test_load(self):
serialized = self.serializer.save(self.db)
new_db = self.serializer.load(serialized, valid_namespaces=["tests"])
assert isinstance(new_db, FAISS)
class TestRetrievalQASerializer(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create a sample RetrieverQA object for testing
cls.llm = Cohere(cohere_api_key="api_key")
cls.embeddings = FakeEmbeddings()
text_embedding_pair = [("test", [1] * 1024)]
try:
cls.db = FAISS.from_embeddings(text_embedding_pair, cls.embeddings)
except ImportError as ex:
raise unittest.SkipTest(ex.msg) from ex
cls.serializer = FaissSerializer()
cls.retriever = cls.db.as_retriever()
cls.qa = RetrievalQA.from_chain_type(
llm=cls.llm, chain_type="stuff", retriever=cls.retriever
)
cls.serializer = RetrievalQASerializer()
def test_type(self):
self.assertEqual(self.serializer.type(), "retrieval_qa")
def test_save(self):
# Serialize the RetrieverQA object
serialized = self.serializer.save(self.qa)
# Ensure that the serialized object is a dictionary
self.assertIsInstance(serialized, dict)
# Ensure that the serialized object contains the necessary keys
self.assertIn("combine_documents_chain", serialized)
self.assertIn("retriever_kwargs", serialized)
serialized["vectordb"]["class"] == "FAISS"
@mock.patch.dict(os.environ, {"COHERE_API_KEY": "api_key"})
def test_load(self):
# Create a sample config dictionary
serialized = self.serializer.save(self.qa)
# Deserialize the serialized object
deserialized = self.serializer.load(serialized, valid_namespaces=["tests"])
# Ensure that the deserialized object is an instance of RetrieverQA
self.assertIsInstance(deserialized, RetrievalQA)
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | oracle/accelerated-data-science | tests~unitary~with_extras~langchain~test_deploy.py | import os
import tempfile
from unittest.mock import MagicMock, patch
from ads.llm.deploy import ChainDeployment
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from tests.unitary.with_extras.langchain.test_guardrails import FakeLLM
class TestLangChainDeploy:
def generate_chain_application(self):
prompt = PromptTemplate.from_template("Tell me a joke about {subject}")
llm_chain = LLMChain(prompt=prompt, llm=FakeLLM(), verbose=True)
return llm_chain
@patch("ads.model.datascience_model.DataScienceModel._to_oci_dsc_model")
def test_initialize(self, mock_to_oci_dsc_model):
chain_application = self.generate_chain_application()
chain_deployment = ChainDeployment(chain_application, auth=MagicMock())
mock_to_oci_dsc_model.assert_called()
assert isinstance(chain_deployment.chain, LLMChain)
@patch("ads.model.runtime.env_info.get_service_packs")
@patch("ads.common.auth.default_signer")
@patch("ads.model.datascience_model.DataScienceModel._to_oci_dsc_model")
def test_prepare(
self, mock_to_oci_dsc_model, mock_default_signer, mock_get_service_packs
):
mock_default_signer.return_value = MagicMock()
inference_conda_env = "oci://service-conda-packs@ociodscdev/service_pack/cpu/General_Machine_Learning_for_CPUs/1.0/mlcpuv1"
inference_python_version = "3.7"
mock_get_service_packs.return_value = (
{
inference_conda_env: ("mlcpuv1", inference_python_version),
},
{
"mlcpuv1": (inference_conda_env, inference_python_version),
},
)
artifact_dir = tempfile.mkdtemp()
chain_application = self.generate_chain_application()
chain_deployment = ChainDeployment(
chain_application, artifact_dir=artifact_dir, auth=MagicMock()
)
mock_to_oci_dsc_model.assert_called()
chain_deployment.prepare(
inference_conda_env="oci://service-conda-packs@ociodscdev/service_pack/cpu/General_Machine_Learning_for_CPUs/1.0/mlcpuv1",
inference_python_version="3.7",
)
mock_get_service_packs.assert_called()
score_py_file_location = os.path.join(chain_deployment.artifact_dir, "score.py")
chain_yaml_file_location = os.path.join(
chain_deployment.artifact_dir, "chain.yaml"
)
runtime_yaml_file_location = os.path.join(
chain_deployment.artifact_dir, "runtime.yaml"
)
assert (
os.path.isfile(score_py_file_location)
and os.path.getsize(score_py_file_location) > 0
)
assert (
os.path.isfile(chain_yaml_file_location)
and os.path.getsize(chain_yaml_file_location) > 0
)
assert (
os.path.isfile(runtime_yaml_file_location)
and os.path.getsize(runtime_yaml_file_location) > 0
)
| [
"Tell me a joke about {subject}"
] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~serializers~runnable_parallel.py | from langchain.schema.runnable import RunnableParallel
from langchain.load.dump import dumpd
from langchain.load.load import load
class RunnableParallelSerializer:
@staticmethod
def type():
return RunnableParallel.__name__
@staticmethod
def load(config: dict, **kwargs):
steps = config["kwargs"]["steps"]
steps = {k: load(v, **kwargs) for k, v in steps.items()}
return RunnableParallel(**steps)
@staticmethod
def save(obj):
serialized = dumpd(obj)
serialized["_type"] = RunnableParallelSerializer.type()
return serialized
| [] |
2024-01-10 | oracle/accelerated-data-science | tests~unitary~with_extras~langchain~test_serialization.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import os
from copy import deepcopy
from unittest import TestCase, mock, SkipTest
from langchain.llms import Cohere
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from ads.llm.serialize import load, dump
from ads.llm import (
GenerativeAI,
GenerativeAIEmbeddings,
ModelDeploymentTGI,
ModelDeploymentVLLM,
)
class ChainSerializationTest(TestCase):
"""Contains tests for chain serialization."""
# LangChain is updating frequently on the module organization,
# mainly affecting the id field of the serialization.
# In the test, we will not check the id field of some components.
# We expect users to use the same LangChain version for serialize and de-serialize
def setUp(self) -> None:
self.maxDiff = None
return super().setUp()
PROMPT_TEMPLATE = "Tell me a joke about {subject}"
COMPARTMENT_ID = "<ocid>"
GEN_AI_KWARGS = {"service_endpoint": "https://endpoint.oraclecloud.com"}
ENDPOINT = "https://modeldeployment.customer-oci.com/ocid/predict"
EXPECTED_LLM_CHAIN_WITH_OCI_MD = {
"lc": 1,
"type": "constructor",
"id": ["langchain", "chains", "llm", "LLMChain"],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"kwargs": {
"input_variables": ["subject"],
"template": "Tell me a joke about {subject}",
"template_format": "f-string",
"partial_variables": {},
},
},
"llm": {
"lc": 1,
"type": "constructor",
"id": ["ads", "llm", "ModelDeploymentVLLM"],
"kwargs": {
"endpoint": "https://modeldeployment.customer-oci.com/ocid/predict",
"model": "my_model",
},
},
},
}
EXPECTED_GEN_AI_LLM = {
"lc": 1,
"type": "constructor",
"id": ["ads", "llm", "GenerativeAI"],
"kwargs": {
"compartment_id": "<ocid>",
"client_kwargs": {"service_endpoint": "https://endpoint.oraclecloud.com"},
},
}
EXPECTED_GEN_AI_EMBEDDINGS = {
"lc": 1,
"type": "constructor",
"id": ["ads", "llm", "GenerativeAIEmbeddings"],
"kwargs": {
"compartment_id": "<ocid>",
"client_kwargs": {"service_endpoint": "https://endpoint.oraclecloud.com"},
},
}
EXPECTED_RUNNABLE_SEQUENCE = {
"lc": 1,
"type": "constructor",
"kwargs": {
"first": {
"lc": 1,
"type": "constructor",
"kwargs": {
"steps": {
"text": {
"lc": 1,
"type": "constructor",
"id": [
"langchain_core",
"runnables",
"RunnablePassthrough",
],
"kwargs": {"func": None, "afunc": None, "input_type": None},
}
}
},
"_type": "RunnableParallel",
},
"middle": [
{
"lc": 1,
"type": "constructor",
"kwargs": {
"input_variables": ["subject"],
"template": "Tell me a joke about {subject}",
"template_format": "f-string",
"partial_variables": {},
},
}
],
"last": {
"lc": 1,
"type": "constructor",
"id": ["ads", "llm", "ModelDeploymentTGI"],
"kwargs": {
"endpoint": "https://modeldeployment.customer-oci.com/ocid/predict"
},
},
},
}
@mock.patch.dict(os.environ, {"COHERE_API_KEY": "api_key"})
def test_llm_chain_serialization_with_cohere(self):
"""Tests serialization of LLMChain with Cohere."""
llm = Cohere()
template = PromptTemplate.from_template(self.PROMPT_TEMPLATE)
llm_chain = LLMChain(prompt=template, llm=llm, verbose=True)
serialized = dump(llm_chain)
# Check the serialized chain
self.assertTrue(serialized.get("verbose"))
self.assertEqual(serialized.get("_type"), "llm_chain")
# Check the serialized prompt template
serialized_prompt = serialized.get("prompt")
self.assertIsInstance(serialized_prompt, dict)
self.assertEqual(serialized_prompt.get("_type"), "prompt")
self.assertEqual(set(serialized_prompt.get("input_variables")), {"subject"})
self.assertEqual(serialized_prompt.get("template"), self.PROMPT_TEMPLATE)
# Check the serialized LLM
serialized_llm = serialized.get("llm")
self.assertIsInstance(serialized_llm, dict)
self.assertEqual(serialized_llm.get("_type"), "cohere")
llm_chain = load(serialized)
self.assertIsInstance(llm_chain, LLMChain)
self.assertIsInstance(llm_chain.prompt, PromptTemplate)
self.assertEqual(llm_chain.prompt.template, self.PROMPT_TEMPLATE)
self.assertIsInstance(llm_chain.llm, Cohere)
self.assertEqual(llm_chain.input_keys, ["subject"])
def test_llm_chain_serialization_with_oci(self):
"""Tests serialization of LLMChain with OCI Gen AI."""
llm = ModelDeploymentVLLM(endpoint=self.ENDPOINT, model="my_model")
template = PromptTemplate.from_template(self.PROMPT_TEMPLATE)
llm_chain = LLMChain(prompt=template, llm=llm)
serialized = dump(llm_chain)
# Do not check the ID field.
expected = deepcopy(self.EXPECTED_LLM_CHAIN_WITH_OCI_MD)
expected["kwargs"]["prompt"]["id"] = serialized["kwargs"]["prompt"]["id"]
self.assertEqual(serialized, expected)
llm_chain = load(serialized)
self.assertIsInstance(llm_chain, LLMChain)
self.assertIsInstance(llm_chain.prompt, PromptTemplate)
self.assertEqual(llm_chain.prompt.template, self.PROMPT_TEMPLATE)
self.assertIsInstance(llm_chain.llm, ModelDeploymentVLLM)
self.assertEqual(llm_chain.llm.endpoint, self.ENDPOINT)
self.assertEqual(llm_chain.llm.model, "my_model")
self.assertEqual(llm_chain.input_keys, ["subject"])
def test_oci_gen_ai_serialization(self):
"""Tests serialization of OCI Gen AI LLM."""
try:
llm = GenerativeAI(
compartment_id=self.COMPARTMENT_ID,
client_kwargs=self.GEN_AI_KWARGS,
)
except ImportError as ex:
raise SkipTest("OCI SDK does not support Generative AI.") from ex
serialized = dump(llm)
self.assertEqual(serialized, self.EXPECTED_GEN_AI_LLM)
llm = load(serialized)
self.assertIsInstance(llm, GenerativeAI)
self.assertEqual(llm.compartment_id, self.COMPARTMENT_ID)
def test_gen_ai_embeddings_serialization(self):
"""Tests serialization of OCI Gen AI embeddings."""
try:
embeddings = GenerativeAIEmbeddings(
compartment_id=self.COMPARTMENT_ID, client_kwargs=self.GEN_AI_KWARGS
)
except ImportError as ex:
raise SkipTest("OCI SDK does not support Generative AI.") from ex
serialized = dump(embeddings)
self.assertEqual(serialized, self.EXPECTED_GEN_AI_EMBEDDINGS)
embeddings = load(serialized)
self.assertIsInstance(embeddings, GenerativeAIEmbeddings)
self.assertEqual(embeddings.compartment_id, self.COMPARTMENT_ID)
def test_runnable_sequence_serialization(self):
"""Tests serialization of runnable sequence."""
map_input = RunnableParallel(text=RunnablePassthrough())
template = PromptTemplate.from_template(self.PROMPT_TEMPLATE)
llm = ModelDeploymentTGI(endpoint=self.ENDPOINT)
chain = map_input | template | llm
serialized = dump(chain)
self.assertEqual(serialized.get("type"), "constructor")
self.assertNotIn("_type", serialized)
kwargs = serialized.get("kwargs")
self.assertIsInstance(kwargs, dict)
element_1 = kwargs.get("first")
self.assertEqual(element_1.get("_type"), "RunnableParallel")
step = element_1.get("kwargs").get("steps").get("text")
self.assertEqual(step.get("id")[-1], "RunnablePassthrough")
element_2 = kwargs.get("middle")[0]
self.assertNotIn("_type", element_2)
self.assertEqual(element_2.get("kwargs").get("template"), self.PROMPT_TEMPLATE)
self.assertEqual(element_2.get("kwargs").get("input_variables"), ["subject"])
element_3 = kwargs.get("last")
self.assertNotIn("_type", element_3)
self.assertEqual(element_3.get("id"), ["ads", "llm", "ModelDeploymentTGI"])
self.assertEqual(
element_3.get("kwargs"),
{"endpoint": "https://modeldeployment.customer-oci.com/ocid/predict"},
)
chain = load(serialized)
self.assertEqual(len(chain.steps), 3)
self.assertIsInstance(chain.steps[0], RunnableParallel)
self.assertEqual(
list(chain.steps[0].dict().get("steps").keys()),
["text"],
)
self.assertIsInstance(chain.steps[1], PromptTemplate)
self.assertIsInstance(chain.steps[2], ModelDeploymentTGI)
self.assertEqual(chain.steps[2].endpoint, self.ENDPOINT)
| [
"Tell me a joke about {subject}"
] |
2024-01-10 | oracle/accelerated-data-science | tests~unitary~with_extras~langchain~test_guardrails.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import json
import os
import tempfile
from typing import Any, List, Dict, Mapping, Optional
from unittest import TestCase
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.prompts import PromptTemplate
from langchain.schema.runnable import RunnableMap, RunnablePassthrough
from ads.llm.guardrails.huggingface import HuggingFaceEvaluation
from ads.llm.guardrails.base import BlockedByGuardrail, GuardrailIO
from ads.llm.chain import GuardrailSequence
from ads.llm.serialize import load, dump
class FakeLLM(LLM):
"""Fake LLM for testing purpose."""
mapping: Dict[str, str] = None
"""Mapping prompts to responses.
If prompt is found in the mapping, the corresponding response will be returned.
Otherwise, the prompt will be returned as is.
"""
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
if self.mapping:
return self.mapping.get(prompt, prompt)
return prompt
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
@classmethod
def is_lc_serializable(cls) -> bool:
"""This class is LangChain serializable."""
return True
class GuardrailTestsBase(TestCase):
"""Base class for guardrail tests."""
TOXIC_CONTENT = "Women is not capable of this job."
LOAD_ARGS = {"cache_dir": os.path.expanduser("~/.cache/huggingface/evaluate")}
FAKE_LLM = FakeLLM()
def assert_before_and_after_serialization(self, test_fn, chain):
"""Runs test function with chain, serialize and deserialize it, then run the test function again."""
test_fn(chain)
serialized = dump(chain)
chain = load(serialized, valid_namespaces=["tests"])
test_fn(chain)
class ToxicityGuardrailTests(GuardrailTestsBase):
"""Contains tests for the toxicity guardrail."""
def test_toxicity_without_threshold(self):
"""When using guardrail alone with is no threshold, it does not do anything."""
toxicity = HuggingFaceEvaluation(path="toxicity", load_args=self.LOAD_ARGS)
chain = self.FAKE_LLM | toxicity
def test_fn(chain):
output = chain.invoke(self.TOXIC_CONTENT)
self.assertEqual(output, self.TOXIC_CONTENT)
self.assert_before_and_after_serialization(test_fn, chain)
def test_toxicity_with_threshold(self):
"""Once we set a threshold, an exception will be raise for toxic output."""
toxicity = HuggingFaceEvaluation(
path="toxicity", threshold=0.2, load_args=self.LOAD_ARGS
)
chain = self.FAKE_LLM | toxicity
def test_fn(chain):
with self.assertRaises(BlockedByGuardrail):
chain.invoke(self.TOXIC_CONTENT)
self.assert_before_and_after_serialization(test_fn, chain)
def test_toxicity_without_exception(self):
"""Guardrail can return the custom message instead of raising an exception."""
toxicity = HuggingFaceEvaluation(
path="toxicity",
threshold=0.2,
raise_exception=False,
custom_msg="Sorry, but let's discuss something else.",
load_args=self.LOAD_ARGS,
)
chain = self.FAKE_LLM | toxicity
def test_fn(chain):
output = chain.invoke(self.TOXIC_CONTENT)
self.assertEqual(output, toxicity.custom_msg)
self.assert_before_and_after_serialization(test_fn, chain)
def test_toxicity_return_metrics(self):
"""Return the toxicity metrics"""
toxicity = HuggingFaceEvaluation(
path="toxicity", return_metrics=True, load_args=self.LOAD_ARGS
)
chain = self.FAKE_LLM | toxicity
def test_fn(chain):
output = chain.invoke(self.TOXIC_CONTENT)
self.assertIsInstance(output, dict)
self.assertEqual(output["output"], self.TOXIC_CONTENT)
self.assertGreater(output["metrics"]["toxicity"][0], 0.2)
self.assert_before_and_after_serialization(test_fn, chain)
class GuardrailSequenceTests(GuardrailTestsBase):
"""Contains tests for GuardrailSequence."""
def test_guardrail_sequence_with_template_and_toxicity(self):
"""Tests a guardrail sequence with template and toxicity evaluation."""
template = PromptTemplate.from_template("Tell me a joke about {subject}")
map_input = RunnableMap(subject=RunnablePassthrough())
toxicity = HuggingFaceEvaluation(
path="toxicity", load_args=self.LOAD_ARGS, select="min"
)
chain = GuardrailSequence.from_sequence(
map_input | template | self.FAKE_LLM | toxicity
)
def test_fn(chain: GuardrailSequence):
output = chain.run("cats", num_generations=5)
self.assertIsInstance(output, GuardrailIO)
self.assertIsInstance(output.data, str)
self.assertEqual(output.data, "Tell me a joke about cats")
self.assertIsInstance(output.info, list)
self.assertEqual(len(output.info), len(chain.steps))
self.assert_before_and_after_serialization(test_fn, chain)
def test_guardrail_sequence_with_filtering(self):
message = "Let's talk something else."
toxicity = HuggingFaceEvaluation(
path="toxicity",
load_args=self.LOAD_ARGS,
threshold=0.5,
custom_msg=message,
)
chain = GuardrailSequence.from_sequence(self.FAKE_LLM | toxicity)
def test_fn(chain: GuardrailSequence):
output = chain.run(self.TOXIC_CONTENT)
self.assertIsInstance(output, GuardrailIO)
self.assertIsInstance(output.data, str)
self.assertEqual(output.data, message)
self.assertIsInstance(output.info, list)
self.assertEqual(len(output.info), len(chain.steps))
self.assert_before_and_after_serialization(test_fn, chain)
def test_save_to_file(self):
"""Tests saving to file."""
message = "Let's talk something else."
toxicity = HuggingFaceEvaluation(
path="toxicity",
load_args=self.LOAD_ARGS,
threshold=0.5,
custom_msg=message,
)
chain = GuardrailSequence.from_sequence(self.FAKE_LLM | toxicity)
try:
temp = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
temp.close()
with self.assertRaises(FileExistsError):
serialized = chain.save(temp.name)
with self.assertRaises(ValueError):
chain.save("abc.html")
serialized = chain.save(temp.name, overwrite=True)
with open(temp.name, "r", encoding="utf-8") as f:
self.assertEqual(json.load(f), serialized)
finally:
os.unlink(temp.name)
| [
"Tell me a joke about {subject}"
] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~langchain~plugins~llm_md.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import logging
from typing import Any, Dict, List, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.pydantic_v1 import root_validator
from langchain.utils import get_from_dict_or_env
from oci.auth import signers
from ads.llm.langchain.plugins.base import BaseLLM
from ads.llm.langchain.plugins.contant import (
DEFAULT_CONTENT_TYPE_JSON,
DEFAULT_TIME_OUT,
)
logger = logging.getLogger(__name__)
class ModelDeploymentLLM(BaseLLM):
"""Base class for LLM deployed on OCI Model Deployment."""
endpoint: str = ""
"""The uri of the endpoint from the deployed Model Deployment model."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"
(the one with the highest log probability per token).
"""
@root_validator()
def validate_environment( # pylint: disable=no-self-argument
cls, values: Dict
) -> Dict:
"""Fetch endpoint from environment variable or arguments."""
values["endpoint"] = get_from_dict_or_env(
values,
"endpoint",
"OCI_LLM_ENDPOINT",
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Default parameters for the model."""
raise NotImplementedError()
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint": self.endpoint},
**self._default_params,
}
def _construct_json_body(self, prompt, params):
"""Constructs the request body as a dictionary (JSON)."""
raise NotImplementedError
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
"""Combines the invocation parameters with default parameters."""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop"] = self.stop
elif stop is not None:
params["stop"] = stop
else:
# Don't set "stop" in param as None. It should be a list.
params["stop"] = []
return {**params, **kwargs}
def _process_response(self, response_json: dict):
return response_json
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to OCI Data Science Model Deployment endpoint.
Parameters
----------
prompt (str):
The prompt to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
Returns
-------
The string generated by the model.
Example
-------
.. code-block:: python
response = oci_md("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
body = self._construct_json_body(prompt, params)
self._print_request(prompt, params)
response = self.send_request(data=body, endpoint=self.endpoint)
completion = self._process_response(response)
self._print_response(completion, response)
return completion
def send_request(
self,
data,
endpoint: str,
header: dict = None,
**kwargs,
) -> Dict:
"""Sends request to the model deployment endpoint.
Parameters
----------
data (Json serializable):
data need to be sent to the endpoint.
endpoint (str):
The model HTTP endpoint.
header (dict, optional):
A dictionary of HTTP headers to send to the specified url. Defaults to {}.
Raises
------
Exception:
Raise when invoking fails.
Returns
-------
A JSON representation of a requests.Response object.
"""
if not header:
header = {}
header["Content-Type"] = (
header.pop("content_type", DEFAULT_CONTENT_TYPE_JSON)
or DEFAULT_CONTENT_TYPE_JSON
)
timeout = kwargs.pop("timeout", DEFAULT_TIME_OUT)
request_kwargs = {"json": data}
request_kwargs["headers"] = header
signer = self.auth.get("signer")
attempts = 0
while attempts < 2:
request_kwargs["auth"] = signer
response = requests.post(
endpoint, timeout=timeout, **request_kwargs, **kwargs
)
if response.status_code == 401 and self.is_principal_signer(signer):
signer.refresh_security_token()
attempts += 1
continue
break
try:
response.raise_for_status()
response_json = response.json()
except Exception:
logger.error(
"DEBUG INFO: request_kwargs=%s, status_code=%s, content=%s",
request_kwargs,
response.status_code,
response.content,
)
raise
return response_json
@staticmethod
def is_principal_signer(signer):
"""Checks if the signer is instance principal or resource principal signer."""
if (
isinstance(signer, signers.InstancePrincipalsSecurityTokenSigner)
or isinstance(signer, signers.ResourcePrincipalsFederationSigner)
or isinstance(signer, signers.EphemeralResourcePrincipalSigner)
or isinstance(signer, signers.EphemeralResourcePrincipalV21Signer)
or isinstance(signer, signers.NestedResourcePrincipals)
or isinstance(signer, signers.OkeWorkloadIdentityResourcePrincipalSigner)
):
return True
else:
return False
class ModelDeploymentTGI(ModelDeploymentLLM):
"""OCI Data Science Model Deployment TGI Endpoint.
Example
-------
.. code-block:: python
from ads.llm import ModelDeploymentTGI
oci_md = ModelDeploymentTGI(endpoint="<url_of_model_deployment_endpoint>")
"""
do_sample: bool = True
"""if set to True, this parameter enables decoding strategies such as
multi-nominal sampling, beam-search multi-nominal sampling, Top-K sampling and Top-p sampling.
"""
watermark = True
"""Watermarking with `A Watermark for Large Language Models <https://arxiv.org/abs/2301.10226>`_.
Defaults to True."""
return_full_text = False
"""Whether to prepend the prompt to the generated text. Defaults to False."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_deployment_tgi_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for invoking OCI model deployment TGI endpoint."""
return {
"best_of": self.best_of,
"max_new_tokens": self.max_tokens,
"temperature": self.temperature,
"top_k": self.k
if self.k > 0
else None, # `top_k` must be strictly positive'
"top_p": self.p,
"do_sample": self.do_sample,
"return_full_text": self.return_full_text,
"watermark": self.watermark,
}
def _construct_json_body(self, prompt, params):
return {
"inputs": prompt,
"parameters": params,
}
def _process_response(self, response_json: dict):
return str(response_json.get("generated_text", response_json))
class ModelDeploymentVLLM(ModelDeploymentLLM):
"""VLLM deployed on OCI Model Deployment"""
model: str
"""Name of the model."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
k: int = -1
"""Number of most likely tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "oci_model_deployment_vllm_endpoint"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_tokens,
"top_k": self.k,
"top_p": self.p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"use_beam_search": self.use_beam_search,
"logprobs": self.logprobs,
"model": self.model,
}
def _construct_json_body(self, prompt, params):
return {
"prompt": prompt,
**params,
}
def _process_response(self, response_json: dict):
return response_json["choices"][0]["text"]
| [] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~langchain~plugins~llm_gen_ai.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from ads.llm.langchain.plugins.base import BaseLLM, GenerativeAiClientModel
from ads.llm.langchain.plugins.contant import *
logger = logging.getLogger(__name__)
class GenerativeAI(GenerativeAiClientModel, BaseLLM):
"""GenerativeAI Service.
To use, you should have the ``oci`` python package installed.
Example
-------
.. code-block:: python
from ads.llm import GenerativeAI
gen_ai = GenerativeAI(compartment_id="ocid1.compartment.oc1..<ocid>")
"""
task: str = "text_generation"
"""Indicates the task."""
model: Optional[str] = "cohere.command"
"""Model name to use."""
frequency_penalty: float = None
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = None
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token."""
length: str = "AUTO"
"""Indicates the approximate length of the summary. """
format: str = "PARAGRAPH"
"""Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points."""
extractiveness: str = "AUTO"
"""Controls how close to the original text the summary is. High extractiveness summaries will lean towards reusing sentences verbatim, while low extractiveness summaries will tend to paraphrase more."""
additional_command: str = ""
"""A free-form instruction for modifying how the summaries get generated. """
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{
"model": self.model,
"task": self.task,
"client_kwargs": self.client_kwargs,
"endpoint_kwargs": self.endpoint_kwargs,
},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "GenerativeAI"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OCIGenerativeAI API."""
# This property is used by _identifying_params(), which then used for serialization
# All parameters returning here should be JSON serializable.
return (
{
"compartment_id": self.compartment_id,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_k": self.k,
"top_p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
if self.task == Task.TEXT_GENERATION
else {
"compartment_id": self.compartment_id,
"temperature": self.temperature,
"length": self.length,
"format": self.format,
"extractiveness": self.extractiveness,
"additional_command": self.additional_command,
}
)
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
params = self._default_params
if self.task == Task.SUMMARY_TEXT:
return {**params}
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
return {**params, **kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
):
"""Call out to GenerativeAI's generate endpoint.
Parameters
----------
prompt (str):
The prompt to pass into the model.
stop (List[str], Optional):
List of stop words to use when generating.
Returns
-------
The string generated by the model.
Example
-------
.. code-block:: python
response = gen_ai("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
self._print_request(prompt, params)
try:
response = (
self.completion_with_retry(prompts=[prompt], **params)
if self.task == Task.TEXT_GENERATION
else self.completion_with_retry(input=prompt, **params)
)
except Exception:
logger.error(
"Error occur when invoking oci service api."
"DEBUG INTO: task=%s, params=%s, prompt=%s",
self.task,
params,
prompt,
)
raise
completion = self._process_response(response, params.get("num_generations", 1))
self._print_response(completion, response)
return completion
def _process_response(self, response: Any, num_generations: int = 1) -> str:
if self.task == Task.SUMMARY_TEXT:
return response.data.summary
return (
response.data.generated_texts[0][0].text
if num_generations == 1
else [gen.text for gen in response.data.generated_texts[0]]
)
def completion_with_retry(self, **kwargs: Any) -> Any:
from oci.generative_ai.models import (
GenerateTextDetails,
OnDemandServingMode,
SummarizeTextDetails,
)
# TODO: Add retry logic for OCI
# Convert the ``model`` parameter to OCI ``ServingMode``
# Note that "ServingMode` is not JSON serializable.
kwargs["serving_mode"] = OnDemandServingMode(model_id=self.model)
if self.task == Task.TEXT_GENERATION:
return self.client.generate_text(
GenerateTextDetails(**kwargs), **self.endpoint_kwargs
)
else:
return self.client.summarize_text(
SummarizeTextDetails(**kwargs), **self.endpoint_kwargs
)
def batch_completion(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
num_generations: int = 1,
**kwargs: Any,
) -> List[str]:
"""Generates multiple completion for the given prompt.
Parameters
----------
prompt (str):
The prompt to pass into the model.
stop: (List[str], optional):
Optional list of stop words to use when generating. Defaults to None.
num_generations (int, optional):
Number of completions aims to get. Defaults to 1.
Raises
------
NotImplementedError
Raise when invoking batch_completion under summarization task.
Returns
-------
List[str]
List of multiple completions.
Example
-------
.. code-block:: python
responses = gen_ai.batch_completion("Tell me a joke.", num_generations=5)
"""
if self.task == Task.SUMMARY_TEXT:
raise NotImplementedError(
f"task={Task.SUMMARY_TEXT} does not support batch_completion. "
)
return self._call(
prompt=prompt,
stop=stop,
run_manager=run_manager,
num_generations=num_generations,
**kwargs,
)
| [] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~guardrails~huggingface.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import evaluate
from langchain.pydantic_v1 import root_validator
from .base import Guardrail
class HuggingFaceEvaluation(Guardrail):
path: str = ""
load_args: dict = {}
compute_args: dict = {}
_evaluator: evaluate.EvaluationModule = ""
@root_validator(skip_on_failure=True)
def load_model(cls, values):
"""Loads the model from Huggingface."""
if values.get("path"):
path = values["path"]
else:
path = values["load_args"].get("path")
values["path"] = path
if not path:
raise NotImplementedError("Please provide path in load_args.")
if not values.get("name"):
values["name"] = path
return values
def compute(self, data=None, **kwargs):
if not self._evaluator:
load_args = {"path": self.path}
load_args.update(self.load_args)
self._evaluator = evaluate.load(**load_args)
return self._evaluator.compute(predictions=data, **self.compute_args, **kwargs)
@property
def metric_key(self):
return self.path
| [] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
try:
import langchain
from ads.llm.langchain.plugins.llm_gen_ai import GenerativeAI
from ads.llm.langchain.plugins.llm_md import ModelDeploymentTGI
from ads.llm.langchain.plugins.llm_md import ModelDeploymentVLLM
from ads.llm.langchain.plugins.embeddings import GenerativeAIEmbeddings
except ImportError as ex:
if ex.name == "langchain":
raise ImportError(
f"{ex.msg}\nPlease install/update langchain with `pip install langchain -U`"
) from ex
raise ex
| [] |
2024-01-10 | oracle/accelerated-data-science | ads~llm~serialize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2023 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import json
import os
import tempfile
from typing import Any, Dict, List, Optional
import fsspec
import yaml
from langchain import llms
from langchain.chains import RetrievalQA
from langchain.chains.loading import load_chain_from_config
from langchain.llms import loading
from langchain.load.load import Reviver
from langchain.load.serializable import Serializable
from langchain.schema.runnable import RunnableParallel
from ads.common.auth import default_signer
from ads.common.object_storage_details import ObjectStorageDetails
from ads.llm import GenerativeAI, ModelDeploymentTGI, ModelDeploymentVLLM
from ads.llm.chain import GuardrailSequence
from ads.llm.guardrails.base import CustomGuardrailBase
from ads.llm.serializers.runnable_parallel import RunnableParallelSerializer
from ads.llm.serializers.retrieval_qa import RetrievalQASerializer
# This is a temp solution for supporting custom LLM in legacy load_chain
__lc_llm_dict = llms.get_type_to_cls_dict()
__lc_llm_dict[GenerativeAI.__name__] = lambda: GenerativeAI
__lc_llm_dict[ModelDeploymentTGI.__name__] = lambda: ModelDeploymentTGI
__lc_llm_dict[ModelDeploymentVLLM.__name__] = lambda: ModelDeploymentVLLM
def __new_type_to_cls_dict():
return __lc_llm_dict
llms.get_type_to_cls_dict = __new_type_to_cls_dict
loading.get_type_to_cls_dict = __new_type_to_cls_dict
# Mapping class to custom serialization functions
custom_serialization = {
GuardrailSequence: GuardrailSequence.save,
CustomGuardrailBase: CustomGuardrailBase.save,
RunnableParallel: RunnableParallelSerializer.save,
RetrievalQA: RetrievalQASerializer.save,
}
# Mapping _type to custom deserialization functions
# Note that the load function should take **kwargs
custom_deserialization = {
GuardrailSequence.type(): GuardrailSequence.load,
CustomGuardrailBase.type(): CustomGuardrailBase.load,
RunnableParallelSerializer.type(): RunnableParallelSerializer.load,
RetrievalQASerializer.type(): RetrievalQASerializer.load,
}
def load(
obj: Any,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
**kwargs,
) -> Any:
"""Revive an ADS/LangChain class from a JSON object. Use this if you already
have a parsed JSON object, eg. from `json.load` or `json.loads`.
This is a drop in replacement for load() in langchain.load.load to support loading compatible class from ADS.
Args:
obj: The object to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
# Add ADS as valid namespace
if not valid_namespaces:
valid_namespaces = []
if "ads" not in valid_namespaces:
valid_namespaces.append("ads")
reviver = Reviver(secrets_map, valid_namespaces)
def _load(obj: Any) -> Any:
if isinstance(obj, dict):
if "_type" in obj and obj["_type"] in custom_deserialization:
if valid_namespaces:
kwargs["valid_namespaces"] = valid_namespaces
if secrets_map:
kwargs["secret_map"] = secrets_map
return custom_deserialization[obj["_type"]](obj, **kwargs)
# Need to revive leaf nodes before reviving this node
loaded_obj = {k: _load(v) for k, v in obj.items()}
return reviver(loaded_obj)
if isinstance(obj, list):
return [_load(o) for o in obj]
return obj
if isinstance(obj, dict) and "_type" in obj:
obj_type = obj["_type"]
# Check if the object has custom load function.
if obj_type in custom_deserialization:
if valid_namespaces:
kwargs["valid_namespaces"] = valid_namespaces
if secrets_map:
kwargs["secret_map"] = secrets_map
return custom_deserialization[obj_type](obj, **kwargs)
# Legacy chain
return load_chain_from_config(obj, **kwargs)
return _load(obj)
def load_from_yaml(
uri: str,
*,
secrets_map: Optional[Dict[str, str]] = None,
valid_namespaces: Optional[List[str]] = None,
**kwargs,
):
"""Revive an ADS/LangChain class from a YAML file."""
class _SafeLoaderIgnoreUnknown(yaml.SafeLoader):
"""Loader ignoring unknown tags in YAML"""
def ignore_unknown(self, node):
"""Ignores unknown tags in YAML"""
return node.value[0].value
_SafeLoaderIgnoreUnknown.add_constructor(
None, _SafeLoaderIgnoreUnknown.ignore_unknown
)
storage_options = default_signer() if ObjectStorageDetails.is_oci_path(uri) else {}
with fsspec.open(uri, **storage_options) as f:
config = yaml.load(f, Loader=_SafeLoaderIgnoreUnknown)
return load(
config, secrets_map=secrets_map, valid_namespaces=valid_namespaces, **kwargs
)
def default(obj: Any) -> Any:
"""Calls the to_json() method to serialize the object.
Parameters
----------
obj : Any
The object to be serialized.
Returns
-------
Any
The serialized representation of the object.
Raises
------
TypeError
If the object is not LangChain serializable.
"""
for super_class, save_fn in custom_serialization.items():
if isinstance(obj, super_class):
return save_fn(obj)
if isinstance(obj, Serializable) and obj.is_lc_serializable():
return obj.to_json()
raise TypeError(f"Serialization of {type(obj)} is not supported.")
def __save(obj):
"""Calls the legacy save method to save the object to temp json
then load it into a dictionary.
"""
try:
temp_file = tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", suffix=".json", delete=False
)
temp_file.close()
obj.save(temp_file.name)
with open(temp_file.name, "r", encoding="utf-8") as f:
return json.load(f)
finally:
os.unlink(temp_file.name)
def dump(obj: Any) -> Dict[str, Any]:
"""Return a json dict representation of an object.
This is a drop in replacement of the dumpd() in langchain.load.dump
to support serializing legacy LangChain chain and ADS GuardrailSequence.
This method will raise TypeError when the object is not serializable.
"""
for super_class, save_fn in custom_serialization.items():
if isinstance(obj, super_class):
return save_fn(obj)
if (
isinstance(obj, Serializable)
and not obj.is_lc_serializable()
and hasattr(obj, "save")
):
# The object is not is_lc_serializable.
# However, it supports the legacy save() method.
return __save(obj)
# The object is is_lc_serializable.
# However, some properties may not be serializable
# Here we try to dump the object and fallback to the save() method
# if there is an error.
try:
return json.loads(json.dumps(obj, default=default))
except TypeError as ex:
if isinstance(obj, Serializable) and hasattr(obj, "save"):
return __save(obj)
raise ex
| [] |
2024-01-10 | Ulysses0817/ChuanhuChatGPT | modules~models~base_model.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
import logging
import json
import commentjson as cjson
import os
import sys
import requests
import urllib3
import traceback
import pathlib
from tqdm import tqdm
import colorama
from duckduckgo_search import ddg
import asyncio
import aiohttp
from enum import Enum
from ..presets import *
from ..llama_func import *
from ..utils import *
from .. import shared
from ..config import retrieve_proxy
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
LLaMA = 2
XMChat = 3
StableLM = 4
MOSS = 5
YuanAI = 6
@classmethod
def get_type(cls, model_name: str):
model_type = None
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
model_type = ModelType.LLaMA
elif "xmchat" in model_name_lower:
model_type = ModelType.XMChat
elif "stablelm" in model_name_lower:
model_type = ModelType.StableLM
elif "moss" in model_name_lower:
model_type = ModelType.MOSS
elif "yuanai" in model_name_lower:
model_type = ModelType.YuanAI
else:
model_type = ModelType.Unknown
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt="",
temperature=1.0,
top_p=1.0,
n_choices=1,
stop=None,
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
) -> None:
self.history = []
self.all_token_counts = []
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.single_turn = False
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = None
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.set_user_identifier(user)
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logging.warning("stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logging.warning("at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
logging.warning("billing info not implemented, using default")
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
# logging.warning("token count not implemented, using default")
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = i18n("开始实时传输回答……")
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logging.debug(f"输入token计数: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
partial_text = ""
for partial_text in stream_iter:
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += 1
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
if len(partial_text.strip()) > 0:
self.history.append(construct_assistant(partial_text))
else:
self.history.pop()
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def handle_file_upload(self, files, chatbot):
"""if the model accepts multi modal input, implement this function"""
status = gr.Markdown.update()
if files:
construct_index(self.api_key, file_src=files)
status = "索引构建完成"
return gr.Files.update(), chatbot, status
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
fake_inputs = None
display_append = []
limited_context = False
fake_inputs = real_inputs
if files:
from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
from llama_index.indices.query.schema import QueryBundle
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTSimpleVectorIndex,
ServiceContext,
LangchainEmbedding,
OpenAIEmbedding,
)
limited_context = True
msg = "加载索引中……"
logging.info(msg)
# yield chatbot + [(inputs, "")], msg
index = construct_index(self.api_key, file_src=files)
assert index is not None, "获取索引失败"
msg = "索引获取成功,生成回答中……"
logging.info(msg)
if local_embedding or self.model_type != ModelType.OpenAI:
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
else:
embed_model = OpenAIEmbedding()
# yield chatbot + [(inputs, "")], msg
with retrieve_proxy():
prompt_helper = PromptHelper(
max_input_size=4096,
num_output=5,
max_chunk_overlap=20,
chunk_size_limit=600,
)
from llama_index import ServiceContext
service_context = ServiceContext.from_defaults(
prompt_helper=prompt_helper, embed_model=embed_model
)
query_object = GPTVectorStoreIndexQuery(
index.index_struct,
service_context=service_context,
similarity_top_k=5,
vector_store=index._vector_store,
docstore=index._docstore,
response_synthesizer=None
)
query_bundle = QueryBundle(real_inputs)
nodes = query_object.retrieve(query_bundle)
reference_results = [n.node.text for n in nodes]
reference_results = add_source_numbers(reference_results, use_source=False)
display_append = add_details(reference_results)
display_append = "\n\n" + "".join(display_append)
real_inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", real_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
limited_context = True
search_results = ddg(real_inputs, max_results=5)
reference_results = []
for idx, result in enumerate(search_results):
logging.debug(f"搜索结果{idx + 1}:{result}")
domain_name = urllib3.util.parse_url(result["href"]).host
reference_results.append([result["body"], result["href"]])
display_append.append(
# f"{idx+1}. [{domain_name}]({result['href']})\n"
f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
)
reference_results = add_source_numbers(reference_results)
display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
real_inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", real_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_append = ""
return limited_context, fake_inputs, display_append, real_inputs, chatbot
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
current_user=None,
should_check_token_count=True,
): # repetition_penalty, top_k
self.set_user_identifier(current_user)
status_text = "开始生成回答……"
logging.info(
f"{current_user}输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
)
if should_check_token_count:
yield chatbot + [(inputs, "")], status_text
if reply_language == "跟随问题语言(不稳定)":
reply_language = "" # the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch.
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
yield chatbot + [(fake_inputs, "")], status_text
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logging.info(status_text)
chatbot.append((inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(inputs)
yield chatbot + [(inputs, "")], status_text
return
elif len(inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logging.info(status_text)
yield chatbot + [(inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
self.history.append(construct_user(inputs))
try:
if stream:
logging.debug("使用流式传输")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logging.debug("不使用流式传输")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
yield chatbot, status_text
except Exception as e:
traceback.print_exc()
status_text = STANDARD_ERROR_MSG + str(e)
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
logging.info(
f"{self.model_name}回答为:"
+ colorama.Fore.BLUE
+ f"{self.history[-1]['content']}"
+ colorama.Style.RESET_ALL
)
if limited_context:
# self.history = self.history[-4:]
# self.all_token_counts = self.all_token_counts[-2:]
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logging.info(status_text)
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
yield chatbot, status_text
self.auto_save(chatbot)
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
):
logging.debug("重试中……")
if len(self.history) > 0:
inputs = self.history[-2]["content"]
del self.history[-2:]
self.all_token_counts.pop()
elif len(chatbot) > 0:
inputs = chatbot[-1][0]
else:
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
return
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logging.debug("重试完毕")
# def reduce_token_size(self, chatbot):
# logging.info("开始减少token数量……")
# chatbot, status_text = self.next_chatbot_at_once(
# summarize_prompt,
# chatbot
# )
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
# num_chat = find_n(self.all_token_counts, max_token_count)
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
# chatbot = chatbot[:-1]
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
# msg = f"保留了最近{num_chat}轮对话"
# logging.info(msg)
# logging.info("减少token数量完毕")
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
print(f"token上限设置为{new_upper_limit}")
def set_temperature(self, new_temperature):
self.temperature = new_temperature
def set_top_p(self, new_top_p):
self.top_p = new_top_p
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
def set_logit_bias(self, logit_bias):
logit_bias = logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
self.logit_bias = bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
def set_key(self, new_access_key):
self.api_key = new_access_key.strip()
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
logging.info(msg)
return self.api_key, msg
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
def reset(self):
self.history = []
self.all_token_counts = []
self.interrupted = False
pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(os.path.join(HISTORY_DIR, self.user_identifier)))).touch()
return [], self.token_message([0])
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
msg = "删除了一组对话"
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
def save_chat_history(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".json"):
filename += ".json"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def auto_save(self, chatbot):
history_file_path = get_history_filepath(self.user_identifier)
save_file(history_file_path, self.system_prompt, self.history, chatbot, self.user_identifier)
def export_markdown(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def load_chat_history(self, filename, user_name):
logging.debug(f"{user_name} 加载对话历史中……")
logging.info(f"filename: {filename}")
if type(filename) != str and filename is not None:
filename = filename.name
try:
if "/" not in filename:
history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
else:
history_file_path = filename
with open(history_file_path, "r") as f:
json_s = json.load(f)
try:
if type(json_s["history"][0]) == str:
logging.info("历史记录格式为旧版,正在转换……")
new_history = []
for index, item in enumerate(json_s["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
json_s["history"] = new_history
logging.info(new_history)
except:
pass
logging.debug(f"{user_name} 加载对话历史完毕")
self.history = json_s["history"]
return os.path.basename(filename), json_s["system"], json_s["chatbot"]
except:
# 没有对话历史或者对话历史解析失败
logging.info(f"没有找到对话历史记录 {filename}")
return gr.update(), self.system_prompt, gr.update()
def auto_load(self):
if self.user_identifier == "":
self.reset()
return self.system_prompt, gr.update()
history_file_path = get_history_filepath(self.user_identifier)
filename, system_prompt, chatbot = self.load_chat_history(history_file_path, self.user_identifier)
return system_prompt, chatbot
def like(self):
"""like the last response, implement if needed
"""
return gr.update()
def dislike(self):
"""dislike the last response, implement if needed
"""
return gr.update()
| [] |
2024-01-10 | allynee/Unity-Agent | agent~coder.py | import guidance
from dotenv import load_dotenv, find_dotenv
import os
import openai
import re
class Coder:
# gpt-3.5-turbo-1106
# gpt-4-0613
def __init__(self, model_name="gpt-3.5-turbo-1106", temperature=0, resume=False, ckpt_dir="ckpt", execution_error=True):
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
guidance.llm = guidance.llms.OpenAI(model_name)
self.llm=guidance.llm
self.ckpt_dir = ckpt_dir
self.execution_error = execution_error
#TODO: May need to account for resume, or not.
#TODO: May need to account for execution error, or not
def _generate_function(self, task, examples):
coder = guidance('''
{{#system~}}
You are an AI skilled in C# development for Unity's ubicomp space.
You will assist in creating functions as part of a larger system.
You will do so by translating pseudocode for a task to C# code.
{{~/system}}
{{#user~}}
Your work revolves around our proprietary C# system. Our system comprises of:
- SceneAPI: This is the wrapper class for our ubicomp space. The methods here will allow for manipulating the whole space.
- Object3D: Each object in the space is of this type, and the methods here are available for every object. The anchor for the position and rotation of each object is at the bottom center of the object.
- Vector3D: Any 3-dimensional dataset uses this class to represent x, y, and z.
- Color3D: Color information using RGBA.
**As the script's class inherits from `SceneAPI`, you can directly call its methods without prefixing.**
Use the provided system to manipulate the room.
Follow these steps:
1. Write method(s) using C# code to implement the task.
2. Declare private fields above your method(s) to track the object(s) you create and modify within the method. Ensure you always assign your object finding, creations, or modifications to these declared fields. If you are creating multiple objects, you can use a list to store them.
3. Use Debug.Log statements for action logging and error handling.
4. Adhere strictly to standard C# methods.
5. Add comments to your code to explain your thought process.
Here are all the classes and functions you may use in your code:
```
Object types that can be created: (You must use the exact case-sensitive type of the object)
Chair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie
namespace Enums
{
public enum WallName
{
Left,
Right,
BackLeft,
BackRight,
}
}
public class Object3D
{
public string GetType()
public void WhenSelected(UnityAction<SelectEnterEventArgs> function)
public void WhenNotSelected(UnityAction<SelectExitEventArgs> function)
public void WhenHovered(UnityAction<HoverEnterEventArgs> function)
public void WhenNotHovered(UnityAction<HoverExitEventArgs> function)
public void SetColor(Color3D color)
public void SetLuminousIntensity(float intensity)
public void Illuminate(bool lit)
public void SetPosition(Vector3D pos)
public void SetRotation(Vector3D rot)
public void SetSize(Vector3D s)
public void SetSizeByScale(float s)
public void Levitate(bool isLevitated)
public bool IsLevitated()
public Color GetColor()
public float GetIntensity()
public bool IsLit()
public Vector3D GetPosition()
public Vector3D GetRotation()
public Vector3D GetSize()
public GameObject ToGameObject()
}
public class SceneAPI
{
public Vector3D GetWallPosition(WallName wallname)
public List<Object3D> GetAllObject3DsInScene()
public Object3D FindObject3DByName(string objName)
// To find an object you could do
// List<Object3D> objectsInView = GetAllObject3DsInFieldOfView();
// Object3D desiredObj = objectsInView.Find(obj => obj.GetType().Equals("ObjType"));
public bool IsObject3DInFieldOfView(Object3D obj)
public List<Object3D> GetAllObject3DsInFieldOfView() // This is good to get objects in the user's field of view. You can use this to find game objects referenced in the task.
public bool IsObjectTypeValid(string objectType)
public List<string> GetAllValidObjectTypes()
public Vector3D GetSceneSize()
public Vector3D GetUserOrientation()
public Vector3D GetUsersHeadPosition() // Not good for object creation and position movement methods, as the tall height will cause objects to topple
public Vector3D GetUsersLeftHandPosition()
public Vector3D GetUsersRightHandPosition()
public Vector3D GetUsersLeftHandRotation()
public Vector3D GetUsersRightHandRotation()
public Vector3D GetUsersFeetPosition() //This is good for object creation and position movement methods
public Object3D CreateObject(string newObjName, string objectType, Vector3D position, Vector3D rotation)
}
public class Vector3D
{
// Vector3D cannot be used like Vector3. It only has the methods below. If you want to use Vector3 methods, convert it to Vector3 using ToVector3() first.
public float x { get; set; }
public float y { get; set; }
public float z { get; set; }
Vector3D(float x, float y, float z)
public Vector3D ToVector3()
public Vector3D FromVector3(Vector3 vec)
} // To add 2 vector 3Ds, use new Vector3D(vec1.x + vec2.x, vec1.y + vec2.y, vec1.z + vec2.z) Same logic for subtract
//When using NavMeshAgent, make sure to convert all Vector3Ds to Vector3s using ToVector3() before using them in the NavMeshAgent methods.
// When performing calculations like multiplication, remember to convert all your Vector3Ds to Vector3s using ToVector3() before performing the calculations.
public class Color3D
{
//All values below range from 0 to 1
public float r { get; set; }
public float g { get; set; }
public float b { get; set; }
public float a { get; set; }
Color3D(float r, float g, float b, float a)
}
```
The task to create a function for is: {{task}}.
When presented with the task of creating a function, your first step is to compare the current task with the provided examples of similar past functions. If you find that the current task closely matches one of these examples, your function should be heavily modeled after the past example. This means using a similar structure, logic, and syntax, adapting only the necessary parts to suit the specifics of the new task.
In cases where there is no close match among the examples, you should then craft a new function by integrating elements from those examples that are most relevant to the current task. This process involves synthesizing the logic, structure, and approach from the examples to create a function that effectively addresses the new task.
Remember, the goal is to maintain the effectiveness and consistency of past successful functions. Use them as blueprints for your responses, ensuring that similar tasks yield similar, proven results.
Examples:\n {{examples}}
Your format for responses should strictly be:
// All class members should be declared here.
private void Start(){
// Insert method(s) that only need to be called once
}
private void Update(){
// If method(s) that need to be called repeatedly
}
public void Method1()
{
// Insert the method code here
}
// And so on... The number of methods will depend on the user's request.
*Note: Output should not contain any text other than the class members and method(s). You must give the full code within the methods*
{{~/user}}
{{#assistant~}}
{{gen "function" temperature=0 max_tokens=4096}}
{{~/assistant}}
''')
resp = coder(task=task, examples=examples)
return resp["function"]
def _generate_script(self, task, plan, functions):
guidance.llm = guidance.llms.OpenAI("gpt-4-0613")
coder = guidance('''
{{#system~}}
You are a skilled C# developer tasked with crafting a coherent C# script, ensuring that created objects and states are managed and utilized effectively across multiple methods.
{{~/system}}
{{#user~}}
User Task: {{task}}
Actionable Instructions to fulfil the user task: {{plan}}
Methods for each step of the actionable plan: {{functions}}
Follow these steps:
1. Develop a public class inheriting from SceneAPI with a name relevant to the task context.
2. You must use all methods in your final script. The only method(s) and/or code you are permitted to remove are methods that repeat creating or finding object(s).
3. Integrate and modify the methods to maintain script coherence.
4. Utilize class-level variables (e.g., private Object3D classMember;) to preserve state throughout the class.
5. Remove duplicate variables referencing the same object(s).
6. Ensure that the same object references are used in every method. Once an object is initialized, consistently use this reference in all related operations.
7. For each method, do not use FindObject3DByName() or CreateObject() to re-assign the same object(s). Always check if there is the same object reference that was initialized in previous methods.
8. Use Debug.Log statements for action logging and error handling.
9. Adhere to standard C# methods and conventions and add comments to your code to explain your thought process.
10. All methods should be called under either Start() or Update().
Your format for responses should strictly be:
```
public class YourChosenClassName : SceneAPI
{
// All class members and constants should be declared here.
// Remember to remove duplicate variables referencing the same object(s)
// Remember to call the final correct variable names across all methods
private void Start()
{
Method1();
Method2();
// And so on... The number of methods will depend on the user's request.
}
private void Update()
{
Method3();
// And so on... The number of methods will depend on the user's request.
}
public void Method1()
{
// Insert the method code here
}
public void Method2()
{
// Insert the method code here
}
// And so on... The number of methods will depend on the user's request.
}
```
*Note: Output should not contain any text other than script containing method(s). You must give the full code within the methods.*
{{~/user}}
{{#assistant~}}
{{gen "script" temperature=0 max_tokens=4096}}
{{~/assistant}}
''')
resp = coder(task=task, plan=plan, functions=functions)
script = resp["script"]
script = edit_code_string(script)
script = "using UnityEngine;\nusing UnityEngine.Events;\nusing UnityEngine.XR.Interaction.Toolkit;\nusing System;\nusing System.Collections.Generic;\nusing Enums;\nusing UnityEngine.AI;\nusing System.Linq;\n\n" + script
return script
def edit_code_string(code_string):
try:
first_index = code_string.index("public class")
last_index = code_string.rindex("}")
return code_string[first_index:last_index+1]
except ValueError:
print("Invalid code: 'using' or '}' not found.")
| [] |
2024-01-10 | allynee/Unity-Agent | agent~memorymanager.py | import chromadb
from chromadb.config import Settings
import csv
from dotenv import load_dotenv, find_dotenv
import guidance
from langchain.embeddings import OpenAIEmbeddings
import os
import openai
import my_utils as U
from time import time as now
import sys
sys.path.append("/Users/allyne/Documents/GitHub/Unity-Agent/")
class MemoryManager:
# gpt-3.5-turbo-1106
# gpt-4-0613
def __init__(self, model_name="gpt-3.5-turbo-1106", temperature=0, resume=False, retrieve_top_k=3, ckpt_dir="ckpt"):
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
guidance.llm = guidance.llms.OpenAI(model_name, temperature=temperature)
self.llm=guidance.llm
self.ckpt_dir = ckpt_dir
self.retrieve_top_k = retrieve_top_k
#TODO: May need to account for resume, or not. Not sure if need mkdir thingy too
settings = Settings(chroma_db_impl="duckdb+parquet",
persist_directory=f"../memory/{ckpt_dir}")
print(f"Initializing memory in {settings.persist_directory}...")
client = chromadb.Client(settings)
client.persist()
self.embeddings = OpenAIEmbeddings()
self.client = client
self.plansdb = client.get_or_create_collection(name="plansdb", embedding_function=self.embeddings)
self.codedb = client.get_or_create_collection(name="codedb", embedding_function=self.embeddings)
def _init_plan_memory(self, csv_path):
t0=now()
with open(csv_path, "r") as file:
reader = csv.DictReader(file)
for i, row in enumerate(reader):
print(f"Embedding plan {i+1}...")
user_query = row["User Query"]
plan = row["Plan"]
user_query_embedding = self.embeddings.embed_query(user_query)
self.plansdb.add(
embeddings=[user_query_embedding],
metadatas=[{
"user_query": user_query,
"plan": plan,
}],
ids=[user_query]
)
U.dump_text(
f"User query:\n{user_query}\n\nPlan:\n{plan}", f"../memory/{self.ckpt_dir}/plans/{user_query}.txt"
)
return f"Intialized memory on planning in {now()-t0} seconds."
def _init_code_memory(self, csv_path):
t0=now()
with open(csv_path, "r") as file:
reader = csv.DictReader(file)
for i, row in enumerate(reader):
print(f"Embedding code {i+1}...")
instruction = row["Instruction"]
code = row["Code"]
instruction_embedding = self.embeddings.embed_query(instruction)
self.codedb.add(
embeddings=[instruction_embedding],
metadatas=[{
"instruction": instruction,
"code": code,
}],
ids=[instruction]
)
U.dump_text(
f"Instruction:\n{instruction}\n\nCode:\n{code}", f"../memory/{self.ckpt_dir}/code/{instruction}.txt"
)
return f"Intialized memory on coding in {now()-t0} seconds."
def _get_code(self, instruction):
instruction_embedding = self.embeddings.embed_query(instruction)
# Retrieve 2 functions only
k = min(self.codedb.count(), 5)
if k==0:
return []
print(f"Retrieving {k} codes...")
codes = self.codedb.query(
query_embeddings=instruction_embedding,
n_results=k,
#where_document={"$contains":"search_string"}
include=["metadatas"]
)
return codes["metadatas"][0][:2]
def _get_plan(self, user_query):
user_query_embedding = self.embeddings.embed_query(user_query)
k = min(self.plansdb.count(), 5)
if k==0:
return []
print(f"Retrieving {k} plans...")
plans = self.plansdb.query(
query_embeddings=user_query_embedding,
n_results=k,
include=["metadatas"]
)
# Just do 2
return plans["metadatas"][0][:2]
def _add_new_code(self, info):
instruction = info["instruction"]
code = info["code"]
instruction_embedding = self.embeddings.embed_query(instruction)
self.codedb.add(
embeddings=[instruction_embedding],
metadatas=[{
"instruction": instruction,
"code": code,
}],
ids=[instruction] #TODO: Account for repeated instructions
)
U.dump_text(
f"Instruction:\n{instruction}\n\nCode:\n{code}", f"../memory/{self.ckpt_dir}/code/{instruction}.txt"
)
return f"Added code for instruction \"{instruction}\""
def _add_new_plan(self, info):
user_query = info["user_query"]
plan = info["plan"]
user_query_embedding = self.embeddings.embed_query(user_query)
self.plansdb.add(
embeddings=[user_query_embedding],
metadatas=[{
"user_query": user_query,
"plan": plan,
}],
ids=[user_query] #TODO: Account for repeated user queries
)
U.dump_text(
f"User query:\n{user_query}\n\nPlan:\n{plan}", f"../memory/{self.ckpt_dir}/plans/{user_query}.txt"
)
return f"Added plan for user query \"{user_query}\""
def _add_new_experience(self, obj):
task = obj.task
plan_function_map = obj.new_plan_function_map
plans = list(plan_function_map.keys())
plan_str = ""
for i, plan in enumerate(plans):
plan_str += f"{i+1}. {plan}\n"
new_plan_dict = {
"user_query": task,
"plan": plan_str,
}
self._add_new_plan(new_plan_dict)
all_code_dicts = []
for plan, function in plan_function_map.items():
code_dict = {
"instruction": plan,
"code": function,
}
all_code_dicts.append(code_dict)
self._add_new_code(code_dict)
return new_plan_dict, all_code_dicts
def _delete_plan_memory(self):
self.client.delete_collection(name="plansdb")
return "Deleted plan memory."
def _delete_code_memory(self):
self.client.delete_collection(name="codedb")
return "Deleted code memory."
def _delete_one_plan(self, user_query):
self.plansdb.delete(ids=[user_query])
return f"Deleted plan for user query \"{user_query}\""
def _delete_one_code(self, instruction):
self.codedb.delete(ids=[instruction])
return f"Deleted code for instruction \"{instruction}\"" | [] |
2024-01-10 | allynee/Unity-Agent | gui~pages~5%E2%9B%BA%EF%B8%8FBaseline.py | import streamlit as st
from dotenv import load_dotenv, find_dotenv
import guidance
import openai
import os
import re
from time import time as now
import sys
sys.path.append("/Users/allyne/Documents/GitHub/Unity-Agent/")
PROMPT = '''
{{#system~}}
You are a skilled C# developer tasked with crafting a C# script for a 3D ubicomp space.
{{~/system}}
{{#user~}}
Your work revolves around our proprietary C# system. Our system comprises of:
- SceneAPI: This is the wrapper class for our ubicomp space. The methods here will allow for manipulating the whole space.
- Object3D: Each object in the space is of this type, and the methods here are available for every object. The anchor for the position and rotation of each object is at the bottom center of the object.
- Vector3D: Any 3-dimensional dataset uses this class to represent x, y, and z.
- Color3D: Color information using RGBA.
**As the script's class inherits from `SceneAPI`, you can directly call its methods without prefixing.**
Use the provided system to manipulate the room.
Here are all the classes and functions you may use in your code:
```
Object types that can be created: (You must use the exact case-sensitive type of the object)
Chair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie
namespace Enums
{
public enum WallName
{
Left,
Right,
BackLeft,
BackRight,
}
}
public class Object3D
{
public string GetType()
public void WhenSelected(UnityAction<SelectEnterEventArgs> function)
public void WhenNotSelected(UnityAction<SelectExitEventArgs> function)
public void WhenHovered(UnityAction<HoverEnterEventArgs> function)
public void WhenNotHovered(UnityAction<HoverExitEventArgs> function)
public void SetColor(Color3D color)
public void SetLuminousIntensity(float intensity)
public void Illuminate(bool lit)
public void SetPosition(Vector3D pos)
public void SetRotation(Vector3D rot)
public void SetSize(Vector3D s)
public void SetSizeByScale(float s)
public void Levitate(bool isLevitated)
public bool IsLevitated()
public Color GetColor()
public float GetIntensity()
public bool IsLit()
public Vector3D GetPosition()
public Vector3D GetRotation()
public Vector3D GetSize()
public GameObject ToGameObject()
}
public class SceneAPI
{
public Vector3D GetWallPosition(WallName wallname)
public List<Object3D> GetAllObject3DsInScene()
public Object3D FindObject3DByName(string objName)
// To find an object you could do
// List<Object3D> objectsInView = GetAllObject3DsInFieldOfView();
// Object3D desiredObj = objectsInView.Find(obj => obj.GetType().Equals("ObjType"));
public bool IsObject3DInFieldOfView(Object3D obj)
public List<Object3D> GetAllObject3DsInFieldOfView() // This is good to get objects in the user's field of view. You can use this to find game objects referenced in the task.
public bool IsObjectTypeValid(string objectType)
public List<string> GetAllValidObjectTypes()
public Vector3D GetSceneSize()
public Vector3D GetUserOrientation()
public Vector3D GetUsersHeadPosition() // Not good for object creation and position movement methods, as the tall height will cause objects to topple
public Vector3D GetUsersLeftHandPosition()
public Vector3D GetUsersRightHandPosition()
public Vector3D GetUsersLeftHandRotation()
public Vector3D GetUsersRightHandRotation()
public Vector3D GetUsersFeetPosition() //This is good for object creation and position movement methods
public Object3D CreateObject(string newObjName, string objectType, Vector3D position, Vector3D rotation)
}
public class Vector3D
{
// Vector3D cannot be used like Vector3. It only has the methods below. If you want to use Vector3 methods, convert it to Vector3 using ToVector3() first.
public float x { get; set; }
public float y { get; set; }
public float z { get; set; }
Vector3D(float x, float y, float z)
public Vector3D ToVector3()
public Vector3D FromVector3(Vector3 vec)
} // To add 2 vector 3Ds, use new Vector3D(vec1.x + vec2.x, vec1.y + vec2.y, vec1.z + vec2.z) Same logic for subtract
//When using NavMeshAgent, make sure to convert all Vector3Ds to Vector3s using ToVector3() before using them in the NavMeshAgent methods.
// When performing calculations like multiplication remember to convert all your Vector3Ds to Vector3s using ToVector3() before performing the calculations.
public class Color3D
{
//All values below range from 0 to 1
public float r { get; set; }
public float g { get; set; }
public float b { get; set; }
public float a { get; set; }
Color3D(float r, float g, float b, float a)
}
```
Follow these steps:
1. Create a public class [classname] : SceneAPI where [classname] should be indicative of the user's task context.
2. Write method(s) using C# code to implement the task.
3. Use Debug.Log statements for action logging and error handling.
4. dhere strictly to standard C# methods.
5. Add comments to your code to explain your thought process.
The user task is:
{{user_task}}
Your format for responses should strictly be:
```
public class YourChosenClassName : SceneAPI
{
// Add any needed class members here
private void Start()
{
Method1();
Method2();
// And so on... The number of methods will depend on the user's request.
}
private void Update()
{
Method3();
// And so on... The number of methods will depend on the user's request.
}
public void Method1()
{
// Insert the method code here
}
public void Method2()
{
// Insert the method code here
}
// And so on... The number of methods will depend on the user's request.
}
```
*Note: Output should not contain any text other than script containing method(s). You must give the full code within the methods.*
{{~/user}}
{{#assistant~}}
{{gen "script" temperature=0 max_tokens=4096}}
{{~/assistant}}
'''
def generate_script(user_task,model_name):
guidance.llm = guidance.llms.OpenAI(model_name)
coder = guidance(PROMPT)
resp = coder(user_task=user_task)
create_and_download_cs_file(resp["script"])
def edit_code_string(code_string):
try:
first_index = code_string.index("public class")
last_index = code_string.rindex("}")
return code_string[first_index:last_index+1]
except ValueError:
st.write("Invalid code: 'using' or '}' not found.")
def get_class_name_from_code(code_string):
# Extract the class name from the code string using regex
match = re.search(r'public class (\w+)', code_string)
if match:
return match.group(1)
return "generated_script"
def create_and_download_cs_file(code_string):
code_string = edit_code_string(code_string)
code_string = "using UnityEngine;\nusing UnityEngine.Events;\nusing UnityEngine.XR.Interaction.Toolkit;\nusing System;\nusing System.Collections.Generic;\nusing Enums;\nusing UnityEngine.AI;\nusing System.Linq;\n\n" + code_string
st.write("\n```csharp\n" + code_string + "\n\n")
class_name = get_class_name_from_code(code_string)
file_name = f"{class_name}.cs"
file_path = f"baseline_generated_scripts/{file_name}"
with open(file_path, "w", encoding="utf-8") as file:
file.write(code_string)
with open(file_path, "rb") as file:
btn = st.download_button(
label="Download .cs file",
data=file,
file_name=file_name,
mime='text/plain',
)
if btn:
os.remove(file_name)
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
st.title("Testing baseline agents")
st.markdown("#### 1. gpt-3.5-turbo-1106")
task = st.text_area(f"Enter task here", key="task_3.5")
if st.button("Run", key="generate_script_3.5"):
with st.spinner("Processing"):
generate_script(task, "gpt-3.5-turbo-1106")
st.success("Process done!")
st.markdown("#### 2. gpt-4-0613")
task = st.text_area(f"Enter task here", key="task_4")
if st.button("Run", key="generate_script_4"):
with st.spinner("Processing"):
generate_script(task, "gpt-4-0613")
st.success("Process done!") | [
"\n{{#system~}}\nYou are a skilled C# developer tasked with crafting a C# script for a 3D ubicomp space. \n{{~/system}}\n{{#user~}}\nYour work revolves around our proprietary C# system. Our system comprises of: \n- SceneAPI: This is the wrapper class for our ubicomp space. The methods here will allow for manipulating the whole space. \n- Object3D: Each object in the space is of this type, and the methods here are available for every object. The anchor for the position and rotation of each object is at the bottom center of the object. \n- Vector3D: Any 3-dimensional dataset uses this class to represent x, y, and z.\n- Color3D: Color information using RGBA.\n \n**As the script's class inherits from `SceneAPI`, you can directly call its methods without prefixing.**\n\nUse the provided system to manipulate the room.\n\nHere are all the classes and functions you may use in your code:\n```\nObject types that can be created: (You must use the exact case-sensitive type of the object)\nChair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie \n\nnamespace Enums\n{\n public enum WallName\n {\n Left,\n Right,\n BackLeft,\n BackRight,\n }\n}\npublic class Object3D\n{\n public string GetType()\n public void WhenSelected(UnityAction<SelectEnterEventArgs> function)\n public void WhenNotSelected(UnityAction<SelectExitEventArgs> function)\n public void WhenHovered(UnityAction<HoverEnterEventArgs> function)\n public void WhenNotHovered(UnityAction<HoverExitEventArgs> function)\n public void SetColor(Color3D color)\n public void SetLuminousIntensity(float intensity)\n public void Illuminate(bool lit)\n public void SetPosition(Vector3D pos)\n public void SetRotation(Vector3D rot)\n public void SetSize(Vector3D s)\n public void SetSizeByScale(float s)\n public void Levitate(bool isLevitated)\n public bool IsLevitated()\n public Color GetColor()\n public float GetIntensity()\n public bool IsLit()\n public Vector3D GetPosition()\n public Vector3D GetRotation()\n public Vector3D GetSize()\n public GameObject ToGameObject()\n}\n\npublic class SceneAPI\n{\n public Vector3D GetWallPosition(WallName wallname)\n public List<Object3D> GetAllObject3DsInScene()\n public Object3D FindObject3DByName(string objName)\n // To find an object you could do \n // List<Object3D> objectsInView = GetAllObject3DsInFieldOfView();\n // Object3D desiredObj = objectsInView.Find(obj => obj.GetType().Equals(\"ObjType\"));\n public bool IsObject3DInFieldOfView(Object3D obj)\n public List<Object3D> GetAllObject3DsInFieldOfView() // This is good to get objects in the user's field of view. You can use this to find game objects referenced in the task. \n public bool IsObjectTypeValid(string objectType)\n public List<string> GetAllValidObjectTypes()\n public Vector3D GetSceneSize()\n public Vector3D GetUserOrientation()\n public Vector3D GetUsersHeadPosition() // Not good for object creation and position movement methods, as the tall height will cause objects to topple\n public Vector3D GetUsersLeftHandPosition()\n public Vector3D GetUsersRightHandPosition()\n public Vector3D GetUsersLeftHandRotation()\n public Vector3D GetUsersRightHandRotation()\n public Vector3D GetUsersFeetPosition() //This is good for object creation and position movement methods\n public Object3D CreateObject(string newObjName, string objectType, Vector3D position, Vector3D rotation)\n}\n\npublic class Vector3D\n{\n // Vector3D cannot be used like Vector3. It only has the methods below. If you want to use Vector3 methods, convert it to Vector3 using ToVector3() first.\n public float x { get; set; }\n public float y { get; set; }\n public float z { get; set; }\n Vector3D(float x, float y, float z)\n public Vector3D ToVector3()\n public Vector3D FromVector3(Vector3 vec)\n} // To add 2 vector 3Ds, use new Vector3D(vec1.x + vec2.x, vec1.y + vec2.y, vec1.z + vec2.z) Same logic for subtract\n//When using NavMeshAgent, make sure to convert all Vector3Ds to Vector3s using ToVector3() before using them in the NavMeshAgent methods.\n// When performing calculations like multiplication remember to convert all your Vector3Ds to Vector3s using ToVector3() before performing the calculations.\npublic class Color3D\n{\n //All values below range from 0 to 1 \n public float r { get; set; }\n public float g { get; set; }\n public float b { get; set; }\n public float a { get; set; }\n Color3D(float r, float g, float b, float a)\n}\n```\nFollow these steps:\n1. Create a public class [classname] : SceneAPI where [classname] should be indicative of the user's task context.\n2. Write method(s) using C# code to implement the task. \n3. Use Debug.Log statements for action logging and error handling.\n4. dhere strictly to standard C# methods. \n5. Add comments to your code to explain your thought process.\n\nThe user task is: \n{{user_task}}\n\nYour format for responses should strictly be: \n```\npublic class YourChosenClassName : SceneAPI\n{\t\n // Add any needed class members here\n \n private void Start()\n {\n Method1();\n Method2();\n // And so on... The number of methods will depend on the user's request.\n }\n\n private void Update()\n {\n Method3();\n // And so on... The number of methods will depend on the user's request.\n }\n\n public void Method1()\n {\n // Insert the method code here\n }\n public void Method2()\n {\n // Insert the method code here\n }\n // And so on... The number of methods will depend on the user's request. \n}\n\n```\n*Note: Output should not contain any text other than script containing method(s). You must give the full code within the methods.*\n{{~/user}}\n{{#assistant~}}\n{{gen \"script\" temperature=0 max_tokens=4096}}\n{{~/assistant}}\n"
] |
2024-01-10 | allynee/Unity-Agent | agent~planner.py | from dotenv import load_dotenv, find_dotenv
import guidance
import openai
import os
class Planner:
# gpt-3.5-turbo-1106
# gpt-4-0613
def __init__(self, model_name="gpt-4-0613", temperature=0.0, resume=False, ckpt_dir="ckpt"):
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
guidance.llm = guidance.llms.OpenAI(model_name, temperature=temperature)
self.llm=guidance.llm
def _generate_plan(self, task, examples):
planner = guidance('''
{{#system~}}
You are an efficient, direct and helpful assistant tasked with helping shape a ubicomp space.
Your role is to generate clear, precise, and effective instructions for altering a 3D space according to user requests.
When responding to user requests, you must closely follow past successful examples provided.
Your instructions should replicate the steps from these examples as closely as possible, only deviating slightly if necessary to tailor the plan to the specific user request.
{{~/system}}
{{#user~}}
As an assistant, create clear and precise instructions to alter a 3D ubicomp space according to user requests.
Follow these guidelines:
- Respond with a numbered set of instructions.
- Your first instruction must be to either create an object or find the object the user is referring to.
-- For example, if the user uses phrases like "The table" and "This table", you should have an instruction like "Find a table in the user's field of view"
- Each instruction should modify only 1 property or behaviour.
- Properties that can be edited are: Position, Rotation, Size, Color, Illumination (Whether the object emanates light), Luminous Intensity (Brightness of the light between 1 and 10), Levitation (When an object is levitated, it floats).
- If you need to edit the position of more than one object, include it within a single instruction. For example, use "Edit the Position property of each chair to be 0.5 meters in front of each room wall" instead of separate instructions for each chair.
- Your instructions must translate subjective terms into specific, measurable instructions.
-- For example, terms like "big" or "close to me" can translate to “2 times its current size” and “1m away from the user” respectively. Always cite explicit numbers.
-- Terms like "the table" or "this table" should translate to "table in the user's field of view"
- For colors, use RGBA values.
- Only instructions modifying the Position property can mention more than one object types. All other property modification can only mention ONE object type.
The space consists of 4 walls, 1 ceiling, and 1 floor.
You are limited to creating or modifying the following object types: (You must use the exact case-sensitive type of the object)
Chair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie
The user's prompt is {{task}}.
When presented with a user request, your first step is to compare it with past examples provided below.
If the current request closely matches a past example, you must replicate the plan from that example as closely as possible, adjusting only what is necessary to fit the specifics of the new task. This replication should include using the same object types, properties, and values.
For any task, where a past example exists, your plan should follow the example's structure and steps very closely.
Only in cases where no past example matches closely, then construct a new plan by synthesizing elements from the examples that are most relevant to the new task.
Remember, the goal is to maintain the effectiveness and consistency of past successful plans. Use them as blueprints for your responses, ensuring that similar tasks yield similar, proven results.
Past examples:\n {{examples}}
The format for response should strictly be:
1. Instruction 1\n
2. Instruction 2\n
…
*Note: Output should not contain any text other than the instructions.*
{{~/user}}
{{#assistant~}}
{{gen "plan" max_tokens=2000 temperature=0}}
{{~/assistant}}
''')
resp = planner(task=task, examples=examples)
return resp["plan"]
| [] |
2024-01-10 | allynee/Unity-Agent | gui~pages~6%F0%9F%91%BENo%20memory.py | import streamlit as st
from dotenv import load_dotenv
from time import time as now
import guidance
import re
import os
import sys
sys.path.append("/Users/allyne/Documents/GitHub/Unity-Agent/")
PLANNER_PROMPT = '''
{{#system~}}
You are an efficient, direct and helpful assistant tasked with helping shape a ubicomp space.
Your role is to generate clear, precise, and effective instructions for altering a 3D space according to user requests.
When responding to user requests, you must closely follow past successful examples provided.
Your instructions should replicate the steps from these examples as closely as possible, only deviating slightly if necessary to tailor the plan to the specific user request.
{{~/system}}
{{#user~}}
As an assistant, create clear and precise instructions to alter a 3D ubicomp space according to user requests.
Follow these guidelines:
- Respond with a numbered set of instructions.
- Your first instruction must be to either create an object or find the object the user is referring to.
-- For example, if the user uses phrases like "The table" and "This table", you should have an instruction like "Find a table in the user's field of view"
- Each instruction should modify only 1 property or behaviour.
- Properties that can be edited are: Position, Rotation, Size, Color, Illumination (Whether the object emanates light), Luminous Intensity (Brightness of the light between 1 and 10), Levitation (When an object is levitated, it floats).
- If you need to edit the position of more than one object, include it within a single instruction. For example, use "Edit the Position property of each chair to be 0.5 meters in front of each room wall" instead of separate instructions for each chair.
- Your instructions must translate subjective terms into specific, measurable instructions.
-- For example, terms like "big" or "close to me" can translate to “2 times its current size” and “1m away from the user” respectively. Always cite explicit numbers.
-- Terms like "the table" or "this table" should translate to "table in the user's field of view"
- For colors, use RGBA values.
- Only instructions modifying the Position property can mention more than one object types. All other property modification can only mention ONE object type.
The space consists of 4 walls, 1 ceiling, and 1 floor.
You are limited to creating or modifying the following object types: (You must use the exact case-sensitive type of the object)
Chair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie
The user's prompt is {{task}}.
The format for response should strictly be:
1. Instruction 1\n
2. Instruction 2\n
…
*Note: Output should not contain any text other than the instructions.*
{{~/user}}
{{#assistant~}}
{{gen "plan" max_tokens=2000 temperature=0}}
{{~/assistant}}
'''
FUNCTION_PROMPT = '''
{{#system~}}
You are an AI skilled in C# development for Unity's ubicomp space.
You will assist in creating functions as part of a larger system.
You will do so by translating pseudocode for a task to C# code.
{{~/system}}
{{#user~}}
Your work revolves around our proprietary C# system. Our system comprises of:
- SceneAPI: This is the wrapper class for our ubicomp space. The methods here will allow for manipulating the whole space.
- Object3D: Each object in the space is of this type, and the methods here are available for every object. The anchor for the position and rotation of each object is at the bottom center of the object.
- Vector3D: Any 3-dimensional dataset uses this class to represent x, y, and z.
- Color3D: Color information using RGBA.
**As the script's class inherits from `SceneAPI`, you can directly call its methods without prefixing.**
Use the provided system to manipulate the room.
Follow these steps:
1. Write method(s) using C# code to implement the task.
2. Declare private fields above your method(s) to track the object(s) you create and modify within the method. Ensure you always assign your object finding, creations, or modifications to these declared fields. If you are creating multiple objects, you can use a list to store them.
3. Use Debug.Log statements for action logging and error handling.
4. Adhere strictly to standard C# methods.
5. Add comments to your code to explain your thought process.
Here are all the classes and functions you may use in your code:
```
Object types that can be created: (You must use the exact case-sensitive type of the object)
Chair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie
namespace Enums
{
public enum WallName
{
Left,
Right,
BackLeft,
BackRight,
}
}
public class Object3D
{
public string GetType()
public void WhenSelected(UnityAction<SelectEnterEventArgs> function)
public void WhenNotSelected(UnityAction<SelectExitEventArgs> function)
public void WhenHovered(UnityAction<HoverEnterEventArgs> function)
public void WhenNotHovered(UnityAction<HoverExitEventArgs> function)
public void SetColor(Color3D color)
public void SetLuminousIntensity(float intensity)
public void Illuminate(bool lit)
public void SetPosition(Vector3D pos)
public void SetRotation(Vector3D rot)
public void SetSize(Vector3D s)
public void SetSizeByScale(float s)
public void Levitate(bool isLevitated)
public bool IsLevitated()
public Color GetColor()
public float GetIntensity()
public bool IsLit()
public Vector3D GetPosition()
public Vector3D GetRotation()
public Vector3D GetSize()
public GameObject ToGameObject()
}
public class SceneAPI
{
public Vector3D GetWallPosition(WallName wallname)
public List<Object3D> GetAllObject3DsInScene()
public Object3D FindObject3DByName(string objName)
// To find an object you could do
// List<Object3D> objectsInView = GetAllObject3DsInFieldOfView();
// Object3D desiredObj = objectsInView.Find(obj => obj.GetType().Equals("ObjType"));
public bool IsObject3DInFieldOfView(Object3D obj)
public List<Object3D> GetAllObject3DsInFieldOfView() // This is good to get objects in the user's field of view. You can use this to find game objects referenced in the task.
public bool IsObjectTypeValid(string objectType)
public List<string> GetAllValidObjectTypes()
public Vector3D GetSceneSize()
public Vector3D GetUserOrientation()
public Vector3D GetUsersHeadPosition() // Not good for object creation and position movement methods, as the tall height will cause objects to topple
public Vector3D GetUsersLeftHandPosition()
public Vector3D GetUsersRightHandPosition()
public Vector3D GetUsersLeftHandRotation()
public Vector3D GetUsersRightHandRotation()
public Vector3D GetUsersFeetPosition() //This is good for object creation and position movement methods
public Object3D CreateObject(string newObjName, string objectType, Vector3D position, Vector3D rotation)
}
public class Vector3D
{
// Vector3D cannot be used like Vector3. It only has the methods below. If you want to use Vector3 methods, convert it to Vector3 using ToVector3() first.
public float x { get; set; }
public float y { get; set; }
public float z { get; set; }
Vector3D(float x, float y, float z)
public Vector3D ToVector3()
public Vector3D FromVector3(Vector3 vec)
} // To add 2 vector 3Ds, use new Vector3D(vec1.x + vec2.x, vec1.y + vec2.y, vec1.z + vec2.z) Same logic for subtract
//When using NavMeshAgent, make sure to convert all Vector3Ds to Vector3s using ToVector3() before using them in the NavMeshAgent methods.
// When performing calculations like multiplication, remember to convert all your Vector3Ds to Vector3s using ToVector3() before performing the calculations.
public class Color3D
{
//All values below range from 0 to 1
public float r { get; set; }
public float g { get; set; }
public float b { get; set; }
public float a { get; set; }
Color3D(float r, float g, float b, float a)
}
```
The task to create a function for is: {{task}}.
Your format for responses should strictly be:
// All class members should be declared here.
private void Start(){
// Insert method(s) that only need to be called once
}
private void Update(){
// If method(s) that need to be called repeatedly
}
public void Method1()
{
// Insert the method code here
}
// And so on... The number of methods will depend on the user's request.
*Note: Output should not contain any text other than the class members and method(s). You must give the full code within the methods*
{{~/user}}
{{#assistant~}}
{{gen "function" temperature=0 max_tokens=4096}}
{{~/assistant}}
'''
SCRIPT_PROMPT = '''
{{#system~}}
You are a skilled C# developer tasked with crafting a coherent C# script, ensuring that created objects and states are managed and utilized effectively across multiple methods.
{{~/system}}
{{#user~}}
User Task: {{task}}
Actionable Instructions to fulfil the user task: {{plan}}
Methods for each step of the actionable plan: {{functions}}
Follow these steps:
1. Develop a public class inheriting from SceneAPI with a name relevant to the task context.
2. You must use all methods in your final script. The only method(s) and/or code you are permitted to remove are methods that repeat creating or finding object(s).
3. Integrate and modify the methods to maintain script coherence.
4. Utilize class-level variables (e.g., private Object3D classMember;) to preserve state throughout the class.
5. Remove duplicate variables referencing the same object(s).
6. Ensure that the same object references are used in every method. Once an object is initialized, consistently use this reference in all related operations.
7. For each method, do not use FindObject3DByName() or CreateObject() to re-assign the same object(s). Always check if there is the same object reference that was initialized in previous methods.
8. Use Debug.Log statements for action logging and error handling.
9. Adhere to standard C# methods and conventions and add comments to your code to explain your thought process.
10. All methods should be called under either Start() or Update().
Your format for responses should strictly be:
```
public class YourChosenClassName : SceneAPI
{
// All class members and constants should be declared here.
// Remember to remove duplicate variables referencing the same object(s)
// Remember to call the final correct variable names across all methods
private void Start()
{
Method1();
Method2();
// And so on... The number of methods will depend on the user's request.
}
private void Update()
{
Method3();
// And so on... The number of methods will depend on the user's request.
}
public void Method1()
{
// Insert the method code here
}
public void Method2()
{
// Insert the method code here
}
// And so on... The number of methods will depend on the user's request.
}
```
*Note: Output should not contain any text other than script containing method(s). You must give the full code within the methods.*
{{~/user}}
{{#assistant~}}
{{gen "script" temperature=0 max_tokens=4096}}
{{~/assistant}}
'''
def get_class_name_from_code(code_string):
# Extract the class name from the code string using regex
match = re.search(r'public class (\w+)', code_string)
if match:
return match.group(1)
return "generated_script"
def create_and_download_cs_file(code_string):
class_name = get_class_name_from_code(code_string)
file_name = f"{class_name}.cs"
file_path = f"no_memory_generated_scripts/{file_name}"
with open(file_path, "w", encoding="utf-8") as file:
file.write(code_string)
with open(file_path, "rb") as file:
btn = st.download_button(
label="Download .cs file",
data=file,
file_name=file_name,
mime='text/plain',
)
if btn:
os.remove(file_name)
def clean_function_text(text):
text = text.replace("```csharp\n", "") # Replace the starting string with nothing
text = text.replace("```", "") # Replace the ending string with nothing
return text.strip()
def generate_plan(task):
guidance.llm = guidance.llms.OpenAI("gpt-4-0613")
planner = guidance(PLANNER_PROMPT)
resp = planner(task=task)
return(resp["plan"])
def generate_function(task):
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo-1106")
coder = guidance(FUNCTION_PROMPT)
resp = coder(task=task)
return(resp["function"])
def edit_code_string(code_string):
try:
first_index = code_string.index("public class")
last_index = code_string.rindex("}")
return code_string[first_index:last_index+1]
except ValueError:
print("Invalid code: 'using' or '}' not found.")
def generate_script(task, plan, functions):
guidance.llm = guidance.llms.OpenAI("gpt-4-0613")
script = guidance(SCRIPT_PROMPT)
resp = script(task=task, plan=plan, functions=functions)
script = edit_code_string(resp["script"])
script = "using UnityEngine;\nusing UnityEngine.Events;\nusing UnityEngine.XR.Interaction.Toolkit;\nusing System;\nusing System.Collections.Generic;\nusing Enums;\nusing UnityEngine.AI;\nusing System.Linq;\n\n" + script
return(script)
def no_memory_test(task):
st.markdown(f"## Received your task to generate a script for: {task}")
st.markdown("## Generating the plan...")
plan = generate_plan(task)
st.write(plan)
plans = plan.split("\n")
plans = [plan for plan in plans if plan.strip()]
functions = []
st.markdown("## Generating functions...")
for plan in plans:
st.write("Generating function for \n ```" + plan + "```")
function = generate_function(plan)
function = clean_function_text(function)
functions.append(function)
st.write("\n```csharp\n" + function + "\n\n")
st.markdown("## Generating the entire script...")
script = generate_script(task, plan, functions)
st.write("```csharp\n" + script)
st.write("\n\nDownload the script here:")
create_and_download_cs_file(script)
st.title("Agent with No Memory")
task = st.text_area(f"Enter task here", key="task")
if st.button("Run", key="generate_script"):
with st.spinner("Processing"):
no_memory_test(task)
st.success("Process done!")
| [
"\n{{#system~}}\nYou are a skilled C# developer tasked with crafting a coherent C# script, ensuring that created objects and states are managed and utilized effectively across multiple methods.\n{{~/system}}\n{{#user~}}\nUser Task: {{task}}\n\nActionable Instructions to fulfil the user task: {{plan}}\n \nMethods for each step of the actionable plan: {{functions}}\n \nFollow these steps:\n1. Develop a public class inheriting from SceneAPI with a name relevant to the task context.\n2. You must use all methods in your final script. The only method(s) and/or code you are permitted to remove are methods that repeat creating or finding object(s).\n3. Integrate and modify the methods to maintain script coherence. \n4. Utilize class-level variables (e.g., private Object3D classMember;) to preserve state throughout the class.\n5. Remove duplicate variables referencing the same object(s).\n6. Ensure that the same object references are used in every method. Once an object is initialized, consistently use this reference in all related operations. \n7. For each method, do not use FindObject3DByName() or CreateObject() to re-assign the same object(s). Always check if there is the same object reference that was initialized in previous methods.\n8. Use Debug.Log statements for action logging and error handling.\n9. Adhere to standard C# methods and conventions and add comments to your code to explain your thought process.\n10. All methods should be called under either Start() or Update().\n\nYour format for responses should strictly be: \n```\npublic class YourChosenClassName : SceneAPI\n{\t\n // All class members and constants should be declared here. \n // Remember to remove duplicate variables referencing the same object(s)\n // Remember to call the final correct variable names across all methods\n \n private void Start()\n {\n Method1();\n Method2();\n // And so on... The number of methods will depend on the user's request. \n }\n private void Update()\n {\n Method3();\n // And so on... The number of methods will depend on the user's request.\n }\n \n public void Method1()\n {\n // Insert the method code here\n }\n public void Method2()\n {\n // Insert the method code here\n }\n // And so on... The number of methods will depend on the user's request. \n}\n```\n*Note: Output should not contain any text other than script containing method(s). You must give the full code within the methods.*\n{{~/user}}\n{{#assistant~}}\n{{gen \"script\" temperature=0 max_tokens=4096}}\n{{~/assistant}}\n",
"\n{{#system~}}\nYou are an efficient, direct and helpful assistant tasked with helping shape a ubicomp space. \nYour role is to generate clear, precise, and effective instructions for altering a 3D space according to user requests.\nWhen responding to user requests, you must closely follow past successful examples provided. \nYour instructions should replicate the steps from these examples as closely as possible, only deviating slightly if necessary to tailor the plan to the specific user request. \n{{~/system}}\n{{#user~}}\nAs an assistant, create clear and precise instructions to alter a 3D ubicomp space according to user requests. \n \nFollow these guidelines:\n- Respond with a numbered set of instructions. \n- Your first instruction must be to either create an object or find the object the user is referring to.\n-- For example, if the user uses phrases like \"The table\" and \"This table\", you should have an instruction like \"Find a table in the user's field of view\"\n- Each instruction should modify only 1 property or behaviour.\n- Properties that can be edited are: Position, Rotation, Size, Color, Illumination (Whether the object emanates light), Luminous Intensity (Brightness of the light between 1 and 10), Levitation (When an object is levitated, it floats). \n- If you need to edit the position of more than one object, include it within a single instruction. For example, use \"Edit the Position property of each chair to be 0.5 meters in front of each room wall\" instead of separate instructions for each chair.\n- Your instructions must translate subjective terms into specific, measurable instructions. \n-- For example, terms like \"big\" or \"close to me\" can translate to “2 times its current size” and “1m away from the user” respectively. Always cite explicit numbers.\n-- Terms like \"the table\" or \"this table\" should translate to \"table in the user's field of view\"\n- For colors, use RGBA values.\n- Only instructions modifying the Position property can mention more than one object types. All other property modification can only mention ONE object type.\n\nThe space consists of 4 walls, 1 ceiling, and 1 floor.\n\nYou are limited to creating or modifying the following object types: (You must use the exact case-sensitive type of the object)\nChair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie\n\nThe user's prompt is {{task}}.\n\nThe format for response should strictly be:\n 1. Instruction 1\n\n 2. Instruction 2\n\n …\n\n*Note: Output should not contain any text other than the instructions.*\n{{~/user}}\n{{#assistant~}}\n{{gen \"plan\" max_tokens=2000 temperature=0}}\n{{~/assistant}}\n",
"\n{{#system~}}\nYou are an AI skilled in C# development for Unity's ubicomp space. \nYou will assist in creating functions as part of a larger system. \nYou will do so by translating pseudocode for a task to C# code.\n{{~/system}}\n{{#user~}}\nYour work revolves around our proprietary C# system. Our system comprises of: \n- SceneAPI: This is the wrapper class for our ubicomp space. The methods here will allow for manipulating the whole space. \n- Object3D: Each object in the space is of this type, and the methods here are available for every object. The anchor for the position and rotation of each object is at the bottom center of the object. \n- Vector3D: Any 3-dimensional dataset uses this class to represent x, y, and z.\n- Color3D: Color information using RGBA.\n \n**As the script's class inherits from `SceneAPI`, you can directly call its methods without prefixing.**\n\nUse the provided system to manipulate the room.\n \nFollow these steps:\n1. Write method(s) using C# code to implement the task. \n2. Declare private fields above your method(s) to track the object(s) you create and modify within the method. Ensure you always assign your object finding, creations, or modifications to these declared fields. If you are creating multiple objects, you can use a list to store them.\n3. Use Debug.Log statements for action logging and error handling.\n4. Adhere strictly to standard C# methods. \n5. Add comments to your code to explain your thought process.\n \nHere are all the classes and functions you may use in your code:\n```\nObject types that can be created: (You must use the exact case-sensitive type of the object)\nChair, Fox, Lamp, LED Cube, Push Button, Table, Vase, Zombie \n\nnamespace Enums\n{\n public enum WallName\n {\n Left,\n Right,\n BackLeft,\n BackRight,\n }\n}\npublic class Object3D\n{\n public string GetType()\n public void WhenSelected(UnityAction<SelectEnterEventArgs> function)\n public void WhenNotSelected(UnityAction<SelectExitEventArgs> function)\n public void WhenHovered(UnityAction<HoverEnterEventArgs> function)\n public void WhenNotHovered(UnityAction<HoverExitEventArgs> function)\n public void SetColor(Color3D color)\n public void SetLuminousIntensity(float intensity)\n public void Illuminate(bool lit)\n public void SetPosition(Vector3D pos)\n public void SetRotation(Vector3D rot)\n public void SetSize(Vector3D s)\n public void SetSizeByScale(float s)\n public void Levitate(bool isLevitated)\n public bool IsLevitated()\n public Color GetColor()\n public float GetIntensity()\n public bool IsLit()\n public Vector3D GetPosition()\n public Vector3D GetRotation()\n public Vector3D GetSize()\n public GameObject ToGameObject()\n}\n\npublic class SceneAPI\n{\n public Vector3D GetWallPosition(WallName wallname)\n public List<Object3D> GetAllObject3DsInScene()\n public Object3D FindObject3DByName(string objName)\n // To find an object you could do \n // List<Object3D> objectsInView = GetAllObject3DsInFieldOfView();\n // Object3D desiredObj = objectsInView.Find(obj => obj.GetType().Equals(\"ObjType\"));\n public bool IsObject3DInFieldOfView(Object3D obj)\n public List<Object3D> GetAllObject3DsInFieldOfView() // This is good to get objects in the user's field of view. You can use this to find game objects referenced in the task. \n public bool IsObjectTypeValid(string objectType)\n public List<string> GetAllValidObjectTypes()\n public Vector3D GetSceneSize()\n public Vector3D GetUserOrientation()\n public Vector3D GetUsersHeadPosition() // Not good for object creation and position movement methods, as the tall height will cause objects to topple\n public Vector3D GetUsersLeftHandPosition()\n public Vector3D GetUsersRightHandPosition()\n public Vector3D GetUsersLeftHandRotation()\n public Vector3D GetUsersRightHandRotation()\n public Vector3D GetUsersFeetPosition() //This is good for object creation and position movement methods\n public Object3D CreateObject(string newObjName, string objectType, Vector3D position, Vector3D rotation)\n}\n\npublic class Vector3D\n{\n // Vector3D cannot be used like Vector3. It only has the methods below. If you want to use Vector3 methods, convert it to Vector3 using ToVector3() first.\n public float x { get; set; }\n public float y { get; set; }\n public float z { get; set; }\n Vector3D(float x, float y, float z)\n public Vector3D ToVector3()\n public Vector3D FromVector3(Vector3 vec)\n} // To add 2 vector 3Ds, use new Vector3D(vec1.x + vec2.x, vec1.y + vec2.y, vec1.z + vec2.z) Same logic for subtract\n//When using NavMeshAgent, make sure to convert all Vector3Ds to Vector3s using ToVector3() before using them in the NavMeshAgent methods.\n// When performing calculations like multiplication, remember to convert all your Vector3Ds to Vector3s using ToVector3() before performing the calculations.\npublic class Color3D\n{\n //All values below range from 0 to 1 \n public float r { get; set; }\n public float g { get; set; }\n public float b { get; set; }\n public float a { get; set; }\n Color3D(float r, float g, float b, float a)\n}\n```\nThe task to create a function for is: {{task}}. \n \nYour format for responses should strictly be: \n \n// All class members should be declared here. \n\nprivate void Start(){\n // Insert method(s) that only need to be called once\n}\n \nprivate void Update(){\n // If method(s) that need to be called repeatedly\n}\n \npublic void Method1()\n{\n // Insert the method code here\n}\n// And so on... The number of methods will depend on the user's request. \n\n*Note: Output should not contain any text other than the class members and method(s). You must give the full code within the methods*\n{{~/user}}\n{{#assistant~}}\n{{gen \"function\" temperature=0 max_tokens=4096}}\n{{~/assistant}}\n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.