date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | taochenshh/hcp | HCP-E~util~logger.py | '''
copy from openai/baselines
'''
import datetime
import json
import os
import os.path as osp
import shutil
import sys
import tempfile
from datetime import datetime
from numbers import Number
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'csv']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'),\
'expected file or str, got %s' % filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'a+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
path = osp.join(osp.abspath(dir),
datetime.now().strftime('%b%d_%H-%M-%S'))
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(log_dir=path)
def writekvs(self, kvs):
assert 'epoch' in kvs.keys()
step = int(kvs['epoch'])
scalar_kvs = {}
array_kvs = {}
for k, v in kvs.items():
if isinstance(v, Number):
scalar_kvs[k] = v
else:
array_kvs[k] = v
for k, v in scalar_kvs.items():
self.writer.add_scalar(k, float(v), step)
for k, v in array_kvs.items():
self.writer.add_histogram(k, v, step, bins='sqrt')
def close(self):
if self.writer:
self.writer.close()
self.writer = None
def make_output_format(format, ev_dir):
from mpi4py import MPI
os.makedirs(ev_dir, exist_ok=True)
rank = MPI.COMM_WORLD.Get_rank()
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
suffix = "" if rank == 0 else ("-mpi%03i" % rank)
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
elif format == 'json':
assert rank == 0
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif format == 'csv':
assert rank == 0
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
elif format == 'tensorboard':
assert rank == 0
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators,
to the console and output files
(if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory
(i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
# A logger with no output files.
# (See right below class definition)
DEFAULT = None
# So that you can still log to the terminal
# without setting up any output files
# Current logger being used by the free functions above
CURRENT = None
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED:
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.CURRENT = Logger(dir=None,
output_formats=[HumanOutputFormat(sys.stdout)])
Logger.DEFAULT = Logger.CURRENT
def configure(dir=None, format_strs=None):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
st_time = "openai-%Y-%m-%d-%H-%M-%S-%f"
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime(st_time))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
if format_strs is None:
strs = os.getenv('OPENAI_LOG_FORMAT')
format_strs = strs.split(',') if strs else LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s' % dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or"
" directory containing them. Got %s" % path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step - 1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| [] |
2024-01-10 | renmark314/chat-web | modules~models~base_model.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
import logging
import json
import commentjson as cjson
import os
import sys
import requests
import urllib3
import traceback
import pathlib
from tqdm import tqdm
import colorama
from duckduckgo_search import DDGS
from itertools import islice
import asyncio
import aiohttp
from enum import Enum
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import BaseCallbackManager
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.input import print_text
from langchain.schema import AgentAction, AgentFinish, LLMResult
from threading import Thread, Condition
from collections import deque
from langchain.chat_models.base import BaseChatModel
from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
from ..presets import *
from ..index_func import *
from ..utils import *
from .. import shared
from ..config import retrieve_proxy
class CallbackToIterator:
def __init__(self):
self.queue = deque()
self.cond = Condition()
self.finished = False
def callback(self, result):
with self.cond:
self.queue.append(result)
self.cond.notify() # Wake up the generator.
def __iter__(self):
return self
def __next__(self):
with self.cond:
# Wait for a value to be added to the queue.
while not self.queue and not self.finished:
self.cond.wait()
if not self.queue:
raise StopIteration()
return self.queue.popleft()
def finish(self):
with self.cond:
self.finished = True
self.cond.notify() # Wake up the generator if it's waiting.
def get_action_description(text):
match = re.search('```(.*?)```', text, re.S)
json_text = match.group(1)
# 把json转化为python字典
json_dict = json.loads(json_text)
# 提取'action'和'action_input'的值
action_name = json_dict['action']
action_input = json_dict['action_input']
if action_name != "Final Answer":
return f'<!-- S O PREFIX --><p class="agent-prefix">{action_name}: {action_input}\n\n</p><!-- E O PREFIX -->'
else:
return ""
class ChuanhuCallbackHandler(BaseCallbackHandler):
def __init__(self, callback) -> None:
"""Initialize callback handler."""
self.callback = callback
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
self.callback(get_action_description(action.log))
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
# if observation_prefix is not None:
# self.callback(f"\n\n{observation_prefix}")
# self.callback(output)
# if llm_prefix is not None:
# self.callback(f"\n\n{llm_prefix}")
if observation_prefix is not None:
logging.info(observation_prefix)
self.callback(output)
if llm_prefix is not None:
logging.info(llm_prefix)
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
# self.callback(f"{finish.log}\n\n")
logging.info(finish.log)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.callback(token)
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any) -> Any:
"""Run when a chat model starts running."""
pass
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
LLaMA = 2
XMChat = 3
StableLM = 4
MOSS = 5
YuanAI = 6
Minimax = 7
ChuanhuAgent = 8
GooglePaLM = 9
LangchainChat = 10
Midjourney = 11
Spark = 12
@classmethod
def get_type(cls, model_name: str):
model_type = None
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
model_type = ModelType.LLaMA
elif "xmchat" in model_name_lower:
model_type = ModelType.XMChat
elif "stablelm" in model_name_lower:
model_type = ModelType.StableLM
elif "moss" in model_name_lower:
model_type = ModelType.MOSS
elif "yuanai" in model_name_lower:
model_type = ModelType.YuanAI
elif "minimax" in model_name_lower:
model_type = ModelType.Minimax
elif "川虎助理" in model_name_lower:
model_type = ModelType.ChuanhuAgent
elif "palm" in model_name_lower:
model_type = ModelType.GooglePaLM
elif "midjourney" in model_name_lower:
model_type = ModelType.Midjourney
elif "azure" in model_name_lower or "api" in model_name_lower:
model_type = ModelType.LangchainChat
elif "星火大模型" in model_name_lower:
model_type = ModelType.Spark
else:
model_type = ModelType.Unknown
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt=INITIAL_SYSTEM_PROMPT,
temperature=1.0,
top_p=1.0,
n_choices=1,
stop=None,
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
) -> None:
self.history = []
self.all_token_counts = []
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.single_turn = False
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = None
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.user_identifier = user
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logging.warning(
"stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logging.warning(
"at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
logging.warning("billing info not implemented, using default")
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
# logging.warning("token count not implemented, using default")
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = i18n("Please wait...")
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logging.debug(f"输入token计数: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
if display_append:
display_append = '\n\n<hr class="append-display no-in-raw" />' + display_append
partial_text = ""
token_increment = 1
for partial_text in stream_iter:
if type(partial_text) == tuple:
partial_text, token_increment = partial_text
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += token_increment
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
self.history.append(construct_assistant(partial_text))
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(
construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - \
sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def handle_file_upload(self, files, chatbot, language):
"""if the model accepts multi modal input, implement this function"""
status = gr.Markdown.update()
if files:
index = construct_index(self.api_key, file_src=files)
status = i18n("索引构建完成")
return gr.Files.update(), chatbot, status
def summarize_index(self, files, chatbot, language):
status = gr.Markdown.update()
if files:
index = construct_index(self.api_key, file_src=files)
status = i18n("总结完成")
logging.info(i18n("生成内容总结中……"))
os.environ["OPENAI_API_KEY"] = self.api_key
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import StdOutCallbackHandler
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["text"])
llm = ChatOpenAI()
chain = load_summarize_chain(
llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
summary = chain({"input_documents": list(index.docstore.__dict__[
"_dict"].values())}, return_only_outputs=True)["output_text"]
print(i18n("总结") + f": {summary}")
chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
return chatbot, status
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
fake_inputs = None
display_append = []
limited_context = False
fake_inputs = real_inputs
if files:
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.base import VectorStoreRetriever
limited_context = True
msg = "加载索引中……"
logging.info(msg)
index = construct_index(self.api_key, file_src=files)
assert index is not None, "获取索引失败"
msg = "索引获取成功,生成回答中……"
logging.info(msg)
with retrieve_proxy():
retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity_score_threshold", search_kwargs={
"k": 6, "score_threshold": 0.5})
relevant_documents = retriever.get_relevant_documents(
real_inputs)
reference_results = [[d.page_content.strip("�"), os.path.basename(
d.metadata["source"])] for d in relevant_documents]
reference_results = add_source_numbers(reference_results)
display_append = add_details(reference_results)
display_append = "\n\n" + "".join(display_append)
real_inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", real_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
search_results = []
with DDGS() as ddgs:
ddgs_gen = ddgs.text(real_inputs, backend="lite")
for r in islice(ddgs_gen, 10):
search_results.append(r)
reference_results = []
for idx, result in enumerate(search_results):
logging.debug(f"搜索结果{idx + 1}:{result}")
domain_name = urllib3.util.parse_url(result['href']).host
reference_results.append([result['body'], result['href']])
display_append.append(
# f"{idx+1}. [{domain_name}]({result['href']})\n"
f"<a href=\"{result['href']}\" target=\"_blank\">{idx+1}. {result['title']}</a>"
)
reference_results = add_source_numbers(reference_results)
# display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
display_append = '<div class = "source-a">' + \
"".join(display_append) + '</div>'
real_inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", real_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_append = ""
return limited_context, fake_inputs, display_append, real_inputs, chatbot
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
should_check_token_count=True,
): # repetition_penalty, top_k
status_text = "Please wait..."
logging.info(
"用户" + f"{self.user_identifier}" + "的输入为:" +
colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
)
if should_check_token_count:
yield chatbot + [(inputs, "")], status_text
if reply_language == "跟随问题语言(不稳定)":
reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
yield chatbot + [(fake_inputs, "")], status_text
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logging.info(status_text)
chatbot.append((inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(inputs)
yield chatbot + [(inputs, "")], status_text
return
elif len(inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logging.info(status_text)
yield chatbot + [(inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
self.history.append(construct_user(inputs))
try:
if stream:
logging.debug("使用流式传输")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logging.debug("不使用流式传输")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
yield chatbot, status_text
except Exception as e:
traceback.print_exc()
status_text = STANDARD_ERROR_MSG + beautify_err_msg(str(e))
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
logging.info(
"回答为:"
+ colorama.Fore.BLUE
+ f"{self.history[-1]['content']}"
+ colorama.Style.RESET_ALL
)
if limited_context:
# self.history = self.history[-4:]
# self.all_token_counts = self.all_token_counts[-2:]
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logging.info(status_text)
status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
yield chatbot, status_text
self.auto_save(chatbot)
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="中文",
):
logging.debug("重试中……")
if len(self.history) > 0:
inputs = self.history[-2]["content"]
del self.history[-2:]
if len(self.all_token_counts) > 0:
self.all_token_counts.pop()
elif len(chatbot) > 0:
inputs = chatbot[-1][0]
else:
yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
return
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logging.debug("重试完毕")
# def reduce_token_size(self, chatbot):
# logging.info("开始减少token数量……")
# chatbot, status_text = self.next_chatbot_at_once(
# summarize_prompt,
# chatbot
# )
# max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
# num_chat = find_n(self.all_token_counts, max_token_count)
# logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
# chatbot = chatbot[:-1]
# self.history = self.history[-2*num_chat:] if num_chat > 0 else []
# self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
# msg = f"保留了最近{num_chat}轮对话"
# logging.info(msg)
# logging.info("减少token数量完毕")
# return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
print(f"token上限设置为{new_upper_limit}")
def set_temperature(self, new_temperature):
self.temperature = new_temperature
def set_top_p(self, new_top_p):
self.top_p = new_top_p
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
def set_logit_bias(self, logit_bias):
logit_bias = logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
self.logit_bias = bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
def set_key(self, new_access_key):
if "*" not in new_access_key:
self.api_key = new_access_key.strip()
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
logging.info(msg)
return self.api_key, msg
else:
return gr.update(), gr.update()
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
def reset(self):
self.history = []
self.all_token_counts = []
self.interrupted = False
pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(
os.path.join(HISTORY_DIR, self.user_identifier)))).touch()
return [], self.token_message([0])
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "由于包含报错信息,只删除chatbot记录"
chatbot.pop()
return chatbot, self.history
if len(self.history) > 0:
self.history.pop()
self.history.pop()
if len(chatbot) > 0:
msg = "删除了一组chatbot对话"
chatbot.pop()
if len(self.all_token_counts) > 0:
msg = "删除了一组对话的token计数记录"
self.all_token_counts.pop()
msg = "删除了一组对话"
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
def save_chat_history(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".json"):
filename += ".json"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def auto_save(self, chatbot):
history_file_path = get_history_filepath(self.user_identifier)
save_file(history_file_path, self.system_prompt,
self.history, chatbot, self.user_identifier)
def export_markdown(self, filename, chatbot, user_name):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
def load_chat_history(self, filename, user_name):
logging.debug(f"{user_name} 加载对话历史中……")
logging.info(f"filename: {filename}")
if type(filename) != str and filename is not None:
filename = filename.name
try:
if "/" not in filename:
history_file_path = os.path.join(
HISTORY_DIR, user_name, filename)
else:
history_file_path = filename
with open(history_file_path, "r", encoding="utf-8") as f:
json_s = json.load(f)
try:
if type(json_s["history"][0]) == str:
logging.info("历史记录格式为旧版,正在转换……")
new_history = []
for index, item in enumerate(json_s["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
json_s["history"] = new_history
logging.info(new_history)
except:
pass
logging.debug(f"{user_name} 加载对话历史完毕")
self.history = json_s["history"]
return os.path.basename(filename), json_s["system"], json_s["chatbot"]
except:
# 没有对话历史或者对话历史解析失败
logging.info(f"没有找到对话历史记录 {filename}")
return gr.update(), self.system_prompt, gr.update()
def delete_chat_history(self, filename, user_name):
if filename == "CANCELED":
return gr.update(), gr.update(), gr.update()
if filename == "":
return i18n("你没有选择任何对话历史"), gr.update(), gr.update()
if not filename.endswith(".json"):
filename += ".json"
if "/" not in filename:
history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
else:
history_file_path = filename
try:
os.remove(history_file_path)
return i18n("删除对话历史成功"), get_history_dropdown(user_name), []
except:
logging.info(f"删除对话历史失败 {history_file_path}")
return i18n("对话历史")+filename+i18n("已经被删除啦"), gr.update(), gr.update()
def auto_load(self):
if self.user_identifier == "":
self.reset()
return self.system_prompt, gr.update()
history_file_path = get_history_filepath(self.user_identifier)
filename, system_prompt, chatbot = self.load_chat_history(
history_file_path, self.user_identifier)
return system_prompt, chatbot
def like(self):
"""like the last response, implement if needed
"""
return gr.update()
def dislike(self):
"""dislike the last response, implement if needed
"""
return gr.update()
class Base_Chat_Langchain_Client(BaseLLMModel):
def __init__(self, model_name, user_name=""):
super().__init__(model_name, user=user_name)
self.need_api_key = False
self.model = self.setup_model()
def setup_model(self):
# inplement this to setup the model then return it
pass
def _get_langchain_style_history(self):
history = [SystemMessage(content=self.system_prompt)]
for i in self.history:
if i["role"] == "user":
history.append(HumanMessage(content=i["content"]))
elif i["role"] == "assistant":
history.append(AIMessage(content=i["content"]))
return history
def get_answer_at_once(self):
assert isinstance(
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
history = self._get_langchain_style_history()
response = self.model.generate(history)
return response.content, sum(response.content)
def get_answer_stream_iter(self):
it = CallbackToIterator()
assert isinstance(
self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
history = self._get_langchain_style_history()
def thread_func():
self.model(messages=history, callbacks=[
ChuanhuCallbackHandler(it.callback)])
it.finish()
t = Thread(target=thread_func)
t.start()
partial_text = ""
for value in it:
partial_text += value
yield partial_text
| [
"content",
"Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN PLACEHOLDER:"
] |
2024-01-10 | chriscarrollsmith/e2b | api-service~codegen~codegen.py | from typing import (
List,
List,
Any,
Dict,
ClassVar,
)
from langchain.agents import AgentExecutor, Agent
from langchain.schema import BaseLanguageModel
from pydantic import BaseModel, PrivateAttr
from langchain.callbacks.base import (
AsyncCallbackManager,
BaseCallbackManager,
)
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.tools import BaseTool
from models import get_model, ModelConfig
from database import Database
from codegen.agent import CodegenAgent, CodegenAgentExecutor
from codegen.callbacks.logs import LogsCallbackHandler
from codegen.prompt import (
SYSTEM_PREFIX,
SYSTEM_SUFFIX,
SYSTEM_FORMAT_INSTRUCTIONS,
HUMAN_INSTRUCTIONS_SUFFIX,
get_human_instructions_prefix,
)
class Codegen(BaseModel):
input_variables: ClassVar[List[str]] = ["input", "agent_scratchpad", "method"]
_agent: Agent = PrivateAttr()
_agent_executor: AgentExecutor = PrivateAttr()
_tools: List[BaseTool] = PrivateAttr()
_llm: BaseLanguageModel = PrivateAttr()
_database: Database = PrivateAttr()
_callback_manager: BaseCallbackManager = PrivateAttr()
def __init__(
self,
database: Database,
callback_manager: BaseCallbackManager,
tools: List[BaseTool],
llm: BaseLanguageModel,
agent: Agent,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._database = database
self._callback_manager = callback_manager
self._tools = tools
self._llm = llm
self._agent = agent
self._agent_executor = CodegenAgentExecutor.from_agent_and_tools(
agent=self._agent,
tools=self._tools,
verbose=True,
callback_manager=self._callback_manager,
)
def tool_names(self):
return [tool.name for tool in self._tools]
@classmethod
def from_tools_and_database(
cls,
custom_tools: List[BaseTool],
model_config: ModelConfig,
database: Database,
):
callback_manager = AsyncCallbackManager(
[
StreamingStdOutCallbackHandler(),
]
)
# Assign custom callback manager to custom tools
for tool in custom_tools:
tool.callback_manager = callback_manager
# Create the LLM
llm = get_model(model_config, callback_manager)
print(
f"Using LLM '{model_config['provider']}' with args:\n{model_config['args']}"
)
# Create CodegenAgent
agent = CodegenAgent.from_llm_and_tools(
llm=llm,
tools=custom_tools,
prefix=SYSTEM_PREFIX,
suffix=SYSTEM_SUFFIX,
format_instructions=SYSTEM_FORMAT_INSTRUCTIONS,
input_variables=Codegen.input_variables,
callback_manager=callback_manager,
)
return cls(
database=database,
callback_manager=callback_manager,
tools=custom_tools,
llm=llm,
agent=agent,
)
async def generate(
self,
run_id: str,
route: str,
method: str,
blocks: List[Dict],
):
self._callback_manager.add_handler(
LogsCallbackHandler(
database=self._database, run_id=run_id, tool_names=self.tool_names()
)
)
# Retrieve the description block.
description_block: Dict[str, str] = next(
b for b in blocks if b.get("type") == "Description"
)
# Retrueve the block describing the incoming request payload.
incoming_request_block: Dict[str, str] | None = next(
(b for b in blocks if b.get("type") == "RequestBody" and b.get("content")),
None,
)
# Retrieve the instructions block.
instructions_block: Dict[str, str] | None = next(
(b for b in blocks if b.get("type") == "Instructions" and b.get("content")),
None,
)
input_vars = {
"description": description_block["content"],
"request_body": f"{{\n{incoming_request_block['content']}\n}}"
if incoming_request_block
else None,
"route": route,
"method": method,
}
instructions = "Here are the instructions:"
# inst_idx = 0
# Append the premade prefix instructions.
for instruction in get_human_instructions_prefix(
has_request_body=bool(incoming_request_block)
):
# inst_idx += 1
values = []
# Extract the correct values from `input_vars` based on the keys.
for k, v in input_vars.items():
if k in instruction["variables"]:
values.append(v)
# Use the values to format the instruction string.
inst = instruction["content"].format(*values)
# instructions = instructions + "\n" + f"{inst_idx}. {inst}"
instructions = instructions + "\n" + f"- {inst}"
# Append the use instructions
if instructions_block:
instructions = (
instructions
+ "\nHere are the required implementation instructions:\n"
+ instructions_block["content"]
)
print("Instructions:\n", instructions)
######## +++++ OLD
# print("+++ BLOCKS")
# print(blocks)
# print("--- BLOCKS")
# for block in blocks:
# if block.get("type") == "Basic":
# inst_idx += 1
# instructions = instructions + "\n" + f"{inst_idx}. " + block["prompt"]
# Append the premade suffix instructions.
for inst in HUMAN_INSTRUCTIONS_SUFFIX:
instructions = instructions + f"\n{inst}"
# # instructions += "\nThought: Here is the plan of how I will go about solving this based on the instructions I got:\n1."
# # instructions += "\nThought:"
# print("Instructions:\n", instructions)
######## ----- OLD
print("Running executor...")
await self._agent_executor.arun(
agent_scratchpad="",
# input=testing_instructions
input=instructions,
method=method,
)
| [] |
2024-01-10 | chriscarrollsmith/e2b | api-service~models~providers~replicate.py | from typing import Dict, List, Optional
from langchain.llms import Replicate
from pydantic import root_validator
import replicate as replicate_python
class ReplicateFix(Replicate):
# Override checks for the env vars
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
return values
async def _acall(self, prompt: str, stop: Optional[List[str]] = None):
"""Call to replicate endpoint."""
# get the model and version
model_str, version_str = self.model.split(":")
client = replicate_python.Client(self.replicate_api_token)
model = client.models.get(model_str)
version = model.versions.get(version_str)
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
version.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
first_input_name = input_properties[0][0]
inputs = {first_input_name: prompt, **self.input}
outputs = client.run(
self.model,
input={
**inputs,
**self.model_kwargs,
},
)
text = ""
for token in outputs:
text += token
if self.callback_manager.is_async:
await self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
# We explicitly flush the logs in log queue because the calls to this model are not actually async so they block.
flush=True,
)
else:
self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
)
return text
| [] |
2024-01-10 | chriscarrollsmith/e2b | api-service~codegen~callbacks~logs.py | from typing import Dict, Any, List, Union
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
from pydantic import PrivateAttr
from codegen.callbacks.log_queue import LogQueue
from codegen.agent.parsing import ToolLog, ThoughtLog
from codegen.callbacks.log_parser import LogStreamParser
from database import Database
class LogsCallbackHandler(AsyncCallbackHandler):
_database: Database = PrivateAttr()
_run_id: str = PrivateAttr()
_raw_logs: str = ""
def __init__(
self, database: Database, run_id: str, tool_names: List[str], **kwargs: Any
):
super().__init__(**kwargs)
self._database = database
self._run_id = run_id
self._parser = LogStreamParser(tool_names=tool_names)
self._log_queue = LogQueue()
self._raw_log_queue = LogQueue(1.5)
def __del__(self):
self._log_queue.close()
self._raw_log_queue.close()
def _add_and_push_raw_logs(self, new_raw_log: str) -> None:
self._raw_logs += new_raw_log
self._raw_log_queue.add(
self._database.push_raw_logs(self._run_id, self._raw_logs),
)
async def _push_logs(
self, logs: list[ToolLog | ThoughtLog], flush: bool = False
) -> None:
self._log_queue.add(
self._database.push_logs(self._run_id, logs),
)
if flush:
await self._log_queue.flush()
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
pass
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
logs = self._parser.ingest_token(token).get_logs()
await self._push_logs(logs, kwargs.get("flush", False))
self._add_and_push_raw_logs(token)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
pass
async def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when LLM errors."""
pass
async def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
pass
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
pass
async def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when chain errors."""
pass
async def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
print("Starting tool")
self._add_and_push_raw_logs("Starting tool...")
await self._log_queue.flush()
await self._raw_log_queue.flush()
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
print("Finished tool")
logs = self._parser.ingest_tool_output(output).get_logs()
await self._push_logs(logs)
self._add_and_push_raw_logs(f"\n{output}\n")
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
pass
async def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when tool errors."""
print("Tool error", error)
self._add_and_push_raw_logs(f"Tool error:\n{error}\n")
async def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on arbitrary text."""
pass
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
pass
| [] |
2024-01-10 | ovenmitt/angel_system | ros~angel_system_nodes~angel_system_nodes~gpt_emotion_detector.py | from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import openai
import os
import rclpy
from angel_system_nodes.base_emotion_detector import BaseEmotionDetector, LABEL_MAPPINGS
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
# The following are few shot examples when prompting GPT.
FEW_SHOT_EXAMPLES = [
{
"utterance": "Go back to the previous step you dumb machine!",
"label": "negative.",
},
{"utterance": "Next step, please.", "label": "neutral"},
{"utterance": "We're doing great and I'm learning a lot!", "label": "positive"},
]
class GptEmotionDetector(BaseEmotionDetector):
def __init__(self):
super().__init__()
self.log = self.get_logger()
# This node additionally includes fields for interacting with OpenAI
# via LangChain.
if not os.getenv("OPENAI_API_KEY"):
self.log.info("OPENAI_API_KEY environment variable is unset!")
else:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_ORG_ID"):
self.log.info("OPENAI_ORG_ID environment variable is unset!")
else:
self.openai_org_id = os.getenv("OPENAI_ORG_ID")
if not bool(self.openai_api_key and self.openai_org_id):
raise ValueError("Please configure OpenAI API Keys.")
self.chain = self._configure_langchain()
def _configure_langchain(self):
def _labels_list_parenthetical_str(labels):
concat_labels = ", ".join(labels)
return f"({concat_labels})"
def _labels_list_str(labels):
return ", ".join(labels[:-1]) + f" or {labels[-1]}"
all_labels_parenthetical = _labels_list_parenthetical_str(
list(LABEL_MAPPINGS.values())
)
all_labels = _labels_list_str(list(LABEL_MAPPINGS.values()))
# Define the few shot template.
template = (
f"Utterance: {{utterance}}\nEmotion {all_labels_parenthetical}: {{label}}"
)
example_prompt = PromptTemplate(
input_variables=["utterance", "label"], template=template
)
prompt_instructions = f"Classify each utterance as {all_labels}.\n"
inference_sample = (
f"Utterance: {{utterance}}\nIntent {all_labels_parenthetical}:"
)
few_shot_prompt = FewShotPromptTemplate(
examples=FEW_SHOT_EXAMPLES,
example_prompt=example_prompt,
prefix=prompt_instructions,
suffix=inference_sample,
input_variables=["utterance"],
example_separator="\n",
)
openai_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=self.openai_api_key,
temperature=0.0,
max_tokens=1,
)
return LLMChain(llm=openai_llm, prompt=few_shot_prompt)
def get_inference(self, msg):
"""
Detects the user intent via langchain execution of GPT.
"""
return (self.chain.run(utterance=msg.utterance_text), 0.5)
def main():
rclpy.init()
emotion_detector = GptEmotionDetector()
rclpy.spin(emotion_detector)
emotion_detector.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| [
"Utterance: {utterance}\nEmotion PLACEHOLDER: {label}",
"utterance",
"\n",
"Classify each utterance as PLACEHOLDER.\n"
] |
2024-01-10 | ovenmitt/angel_system | ros~angel_system_nodes~angel_system_nodes~gpt_intent_detector.py | from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
import openai
import os
import rclpy
from angel_system_nodes.base_intent_detector import BaseIntentDetector, INTENT_LABELS
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
# The following are few shot examples when prompting GPT.
FEW_SHOT_EXAMPLES = [
{"utterance": "Go back to the previous step!", "label": "prev_step."},
{"utterance": "Next step, please.", "label": "next_step"},
{"utterance": "How should I wrap this tourniquet?", "label": "inquiry"},
{"utterance": "The sky is blue", "label": "other"},
]
class GptIntentDetector(BaseIntentDetector):
def __init__(self):
super().__init__()
self.log = self.get_logger()
# This node additionally includes fields for interacting with OpenAI
# via LangChain.
if not os.getenv("OPENAI_API_KEY"):
self.log.info("OPENAI_API_KEY environment variable is unset!")
else:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not os.getenv("OPENAI_ORG_ID"):
self.log.info("OPENAI_ORG_ID environment variable is unset!")
else:
self.openai_org_id = os.getenv("OPENAI_ORG_ID")
if not bool(self.openai_api_key and self.openai_org_id):
raise ValueError("Please configure OpenAI API Keys.")
self.chain = self._configure_langchain()
def _configure_langchain(self):
def _labels_list_parenthetical_str(labels):
concat_labels = ", ".join(labels)
return f"({concat_labels})"
def _labels_list_str(labels):
return ", ".join(labels[:-1]) + f" or {labels[-1]}"
all_intents_parenthetical = _labels_list_parenthetical_str(INTENT_LABELS)
all_intents = _labels_list_str(INTENT_LABELS)
# Define the few shot template.
template = (
f"Utterance: {{utterance}}\nIntent {all_intents_parenthetical}: {{label}}"
)
example_prompt = PromptTemplate(
input_variables=["utterance", "label"], template=template
)
prompt_instructions = f"Classify each utterance as {all_intents}.\n"
inference_sample = (
f"Utterance: {{utterance}}\nIntent {all_intents_parenthetical}:"
)
few_shot_prompt = FewShotPromptTemplate(
examples=FEW_SHOT_EXAMPLES,
example_prompt=example_prompt,
prefix=prompt_instructions,
suffix=inference_sample,
input_variables=["utterance"],
example_separator="\n",
)
# Please refer to https://github.com/hwchase17/langchain/blob/master/langchain/llms/openai.py
openai_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
openai_api_key=self.openai_api_key,
temperature=0.0,
# Only 2 tokens needed for classification (tokens are delimited by use of '_', i.e.
# 'next_step' counts as 2 tokens).
max_tokens=2,
)
return LLMChain(llm=openai_llm, prompt=few_shot_prompt)
def detect_intents(self, msg):
"""
Detects the user intent via langchain execution of GPT.
"""
return self.chain.run(utterance=msg), 0.5
def main():
rclpy.init()
intent_detector = GptIntentDetector()
rclpy.spin(intent_detector)
intent_detector.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| [
"Utterance: {utterance}\nIntent PLACEHOLDER: {label}",
"utterance",
"\n",
"Classify each utterance as PLACEHOLDER.\n"
] |
2024-01-10 | jacoblee93/langchain-translator | dataset~training~python~69_cache_backed_embeddings.txt | """Module contains code for a cache backed embedder.
The cache backed embedder is a wrapper around an embedder that caches
embeddings in a key-value store. The cache is used to avoid recomputing
embeddings for the same text.
The text is hashed and the hash is used as the key in the cache.
"""
from __future__ import annotations
import hashlib
import json
import uuid
from functools import partial
from typing import Callable, List, Sequence, Union, cast
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseStore
from langchain.storage.encoder_backed import EncoderBackedStore
NAMESPACE_UUID = uuid.UUID(int=1985)
def _hash_string_to_uuid(input_string: str) -> uuid.UUID:
"""Hash a string and returns the corresponding UUID."""
hash_value = hashlib.sha1(input_string.encode("utf-8")).hexdigest()
return uuid.uuid5(NAMESPACE_UUID, hash_value)
def _key_encoder(key: str, namespace: str) -> str:
"""Encode a key."""
return namespace + str(_hash_string_to_uuid(key))
def _create_key_encoder(namespace: str) -> Callable[[str], str]:
"""Create an encoder for a key."""
return partial(_key_encoder, namespace=namespace)
def _value_serializer(value: Sequence[float]) -> bytes:
"""Serialize a value."""
return json.dumps(value).encode()
def _value_deserializer(serialized_value: bytes) -> List[float]:
"""Deserialize a value."""
return cast(List[float], json.loads(serialized_value.decode()))
class CacheBackedEmbeddings(Embeddings):
"""Interface for caching results from embedding models.
The interface allows works with any store that implements
the abstract store interface accepting keys of type str and values of list of
floats.
If need be, the interface can be extended to accept other implementations
of the value serializer and deserializer, as well as the key encoder.
Examples:
.. code-block: python
from langchain.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings
from langchain.storage import LocalFileStore
store = LocalFileStore('./my_cache')
underlying_embedder = OpenAIEmbeddings()
embedder = CacheBackedEmbeddings.from_bytes_store(
underlying_embedder, store, namespace=underlying_embedder.model
)
# Embedding is computed and cached
embeddings = embedder.embed_documents(["hello", "goodbye"])
# Embeddings are retrieved from the cache, no computation is done
embeddings = embedder.embed_documents(["hello", "goodbye"])
"""
def __init__(
self,
underlying_embeddings: Embeddings,
document_embedding_store: BaseStore[str, List[float]],
) -> None:
"""Initialize the embedder.
Args:
underlying_embeddings: the embedder to use for computing embeddings.
document_embedding_store: The store to use for caching document embeddings.
"""
super().__init__()
self.document_embedding_store = document_embedding_store
self.underlying_embeddings = underlying_embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of texts.
The method first checks the cache for the embeddings.
If the embeddings are not found, the method uses the underlying embedder
to embed the documents and stores the results in the cache.
Args:
texts: A list of texts to embed.
Returns:
A list of embeddings for the given texts.
"""
vectors: List[Union[List[float], None]] = self.document_embedding_store.mget(
texts
)
missing_indices: List[int] = [
i for i, vector in enumerate(vectors) if vector is None
]
missing_texts = [texts[i] for i in missing_indices]
if missing_texts:
missing_vectors = self.underlying_embeddings.embed_documents(missing_texts)
self.document_embedding_store.mset(
list(zip(missing_texts, missing_vectors))
)
for index, updated_vector in zip(missing_indices, missing_vectors):
vectors[index] = updated_vector
return cast(
List[List[float]], vectors
) # Nones should have been resolved by now
def embed_query(self, text: str) -> List[float]:
"""Embed query text.
This method does not support caching at the moment.
Support for caching queries is easily to implement, but might make
sense to hold off to see the most common patterns.
If the cache has an eviction policy, we may need to be a bit more careful
about sharing the cache between documents and queries. Generally,
one is OK evicting query caches, but document caches should be kept.
Args:
text: The text to embed.
Returns:
The embedding for the given text.
"""
return self.underlying_embeddings.embed_query(text)
@classmethod
def from_bytes_store(
cls,
underlying_embeddings: Embeddings,
document_embedding_cache: BaseStore[str, bytes],
*,
namespace: str = "",
) -> CacheBackedEmbeddings:
"""On-ramp that adds the necessary serialization and encoding to the store.
Args:
underlying_embeddings: The embedder to use for embedding.
document_embedding_cache: The cache to use for storing document embeddings.
*,
namespace: The namespace to use for document cache.
This namespace is used to avoid collisions with other caches.
For example, set it to the name of the embedding model used.
"""
namespace = namespace
key_encoder = _create_key_encoder(namespace)
encoder_backed_store = EncoderBackedStore[str, List[float]](
document_embedding_cache,
key_encoder,
_value_serializer,
_value_deserializer,
)
return cls(underlying_embeddings, encoder_backed_store)
| [] |
2024-01-10 | jacoblee93/langchain-translator | dataset~training~python~67_parent_document_retriever.txt | import uuid
from typing import List, Optional
from langchain.retrievers import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.text_splitter import TextSplitter
class ParentDocumentRetriever(MultiVectorRetriever):
"""Retrieve small chunks then retrieve their parent documents.
When splitting documents for retrieval, there are often conflicting desires:
1. You may want to have small documents, so that their embeddings can most
accurately reflect their meaning. If too long, then the embeddings can
lose meaning.
2. You want to have long enough documents that the context of each chunk is
retained.
The ParentDocumentRetriever strikes that balance by splitting and storing
small chunks of data. During retrieval, it first fetches the small chunks
but then looks up the parent ids for those chunks and returns those larger
documents.
Note that "parent document" refers to the document that a small chunk
originated from. This can either be the whole raw document OR a larger
chunk.
Examples:
.. code-block:: python
# Imports
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
# Initialize the retriever
retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
"""
child_splitter: TextSplitter
"""The text splitter to use to create child documents."""
"""The key to use to track the parent id. This will be stored in the
metadata of child documents."""
parent_splitter: Optional[TextSplitter] = None
"""The text splitter to use to create parent documents.
If none, then the parent documents will be the raw documents passed in."""
def add_documents(
self,
documents: List[Document],
ids: Optional[List[str]] = None,
add_to_docstore: bool = True,
) -> None:
"""Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
if self.parent_splitter is not None:
documents = self.parent_splitter.split_documents(documents)
if ids is None:
doc_ids = [str(uuid.uuid4()) for _ in documents]
if not add_to_docstore:
raise ValueError(
"If ids are not passed in, `add_to_docstore` MUST be True"
)
else:
if len(documents) != len(ids):
raise ValueError(
"Got uneven list of documents and ids. "
"If `ids` is provided, should be same length as `documents`."
)
doc_ids = ids
docs = []
full_docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = self.child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[self.id_key] = _id
docs.extend(sub_docs)
full_docs.append((_id, doc))
self.vectorstore.add_documents(docs)
if add_to_docstore:
self.docstore.mset(full_docs)
| [] |
2024-01-10 | jacoblee93/langchain-translator | dataset~training~python~72_base_output_parser.txt | from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from typing import (
Any,
AsyncIterator,
Dict,
Generic,
Iterator,
List,
Optional,
TypeVar,
Union,
)
from langchain.load.serializable import Serializable
from langchain.schema.messages import BaseMessage
from langchain.schema.output import ChatGeneration, Generation
from langchain.schema.prompt import PromptValue
from langchain.schema.runnable import Runnable, RunnableConfig
T = TypeVar("T")
class BaseLLMOutputParser(Serializable, Generic[T], ABC):
"""Abstract base class for parsing the outputs of a model."""
@abstractmethod
def parse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
async def aparse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return await asyncio.get_running_loop().run_in_executor(
None, self.parse_result, result
)
class BaseGenerationOutputParser(
BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]
):
"""Base class to parse the output of an LLM call."""
def invoke(
self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
) -> T:
if isinstance(input, BaseMessage):
return self._call_with_config(
lambda inner_input: self.parse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return self._call_with_config(
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
async def ainvoke(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Optional[Any],
) -> T:
if isinstance(input, BaseMessage):
return await self._acall_with_config(
lambda inner_input: self.aparse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return await self._acall_with_config(
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]):
"""Base class to parse the output of an LLM call.
Output parsers help structure language model responses.
Example:
.. code-block:: python
class BooleanOutputParser(BaseOutputParser[bool]):
true_val: str = "YES"
false_val: str = "NO"
def parse(self, text: str) -> bool:
cleaned_text = text.strip().upper()
if cleaned_text not in (self.true_val.upper(), self.false_val.upper()):
raise OutputParserException(
f"BooleanOutputParser expected output value to either be "
f"{self.true_val} or {self.false_val} (case-insensitive). "
f"Received {cleaned_text}."
)
return cleaned_text == self.true_val.upper()
@property
def _type(self) -> str:
return "boolean_output_parser"
""" # noqa: E501
def invoke(
self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
) -> T:
if isinstance(input, BaseMessage):
return self._call_with_config(
lambda inner_input: self.parse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return self._call_with_config(
lambda inner_input: self.parse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
async def ainvoke(
self,
input: str | BaseMessage,
config: RunnableConfig | None = None,
**kwargs: Optional[Any],
) -> T:
if isinstance(input, BaseMessage):
return await self._acall_with_config(
lambda inner_input: self.aparse_result(
[ChatGeneration(message=inner_input)]
),
input,
config,
run_type="parser",
)
else:
return await self._acall_with_config(
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
input,
config,
run_type="parser",
)
def parse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return self.parse(result[0].text)
@abstractmethod
def parse(self, text: str) -> T:
"""Parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
async def aparse_result(self, result: List[Generation]) -> T:
"""Parse a list of candidate model Generations into a specific format.
The return value is parsed from only the first Generation in the result, which
is assumed to be the highest-likelihood Generation.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
return await self.aparse(result[0].text)
async def aparse(self, text: str) -> T:
"""Parse a single string model output into some structure.
Args:
text: String output of a language model.
Returns:
Structured output.
"""
return await asyncio.get_running_loop().run_in_executor(None, self.parse, text)
# TODO: rename 'completion' -> 'text'.
def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any:
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input PromptValue.
Returns:
Structured output
"""
return self.parse(completion)
def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
raise NotImplementedError(
f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of output parser."""
output_parser_dict = super().dict(**kwargs)
output_parser_dict["_type"] = self._type
return output_parser_dict
class BaseTransformOutputParser(BaseOutputParser[T]):
"""Base class for an output parser that can handle streaming input."""
def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[T]:
for chunk in input:
if isinstance(chunk, BaseMessage):
yield self.parse_result([ChatGeneration(message=chunk)])
else:
yield self.parse_result([Generation(text=chunk)])
async def _atransform(
self, input: AsyncIterator[Union[str, BaseMessage]]
) -> AsyncIterator[T]:
async for chunk in input:
if isinstance(chunk, BaseMessage):
yield self.parse_result([ChatGeneration(message=chunk)])
else:
yield self.parse_result([Generation(text=chunk)])
def transform(
self,
input: Iterator[Union[str, BaseMessage]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[T]:
yield from self._transform_stream_with_config(
input, self._transform, config, run_type="parser"
)
async def atransform(
self,
input: AsyncIterator[Union[str, BaseMessage]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[T]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, run_type="parser"
):
yield chunk
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@property
def lc_serializable(self) -> bool:
"""Whether the class LangChain serializable."""
return True
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
# TODO: Deprecate
NoOpOutputParser = StrOutputParser
class OutputParserException(ValueError):
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a
model to try and remediate the issue.
llm_output: String model output which is error-ing.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an OutputParserException has been raised. This gives the underlying
model driving the agent the context that the previous output was improperly
structured, in the hopes that it will update the output to the correct
format.
"""
def __init__(
self,
error: Any,
observation: Optional[str] = None,
llm_output: Optional[str] = None,
send_to_llm: bool = False,
):
super(OutputParserException, self).__init__(error)
if send_to_llm:
if observation is None or llm_output is None:
raise ValueError(
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
| [] |
2024-01-10 | jacoblee93/langchain-translator | dataset~training~python~70_sequential_chain.txt | """Chain pipeline where the outputs of one step feed directly into next."""
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils.input import get_color_mapping
class SequentialChain(Chain):
"""Chain where the outputs of one chain feed directly into next."""
chains: List[Chain]
input_variables: List[str]
output_variables: List[str] #: :meta private:
return_all: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return expected input keys to the chain.
:meta private:
"""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return self.output_variables
@root_validator(pre=True)
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that the correct inputs exist for all chains."""
chains = values["chains"]
input_variables = values["input_variables"]
memory_keys = list()
if "memory" in values and values["memory"] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
raise ValueError(
f"The the input key(s) {''.join(overlapping_keys)} are found "
f"in the Memory keys ({memory_keys}) - please use input and "
f"memory keys that don't overlap."
)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.memory_variables)
if missing_vars:
raise ValueError(
f"Missing required input keys: {missing_vars}, "
f"only had {known_variables}"
)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
raise ValueError(
f"Chain returned keys that already exist: {overlapping_keys}"
)
known_variables |= set(chain.output_keys)
if "output_variables" not in values:
if values.get("return_all", False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values["output_variables"] = output_keys
else:
missing_vars = set(values["output_variables"]).difference(known_variables)
if missing_vars:
raise ValueError(
f"Expected output variables that were not found: {missing_vars}."
)
return values
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
known_values = inputs.copy()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
for i, chain in enumerate(self.chains):
outputs = await chain.acall(
known_values, return_only_outputs=True, callbacks=callbacks
)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
class SimpleSequentialChain(Chain):
"""Simple chain where the outputs of one step feed directly into next."""
chains: List[Chain]
strip_outputs: bool = False
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that chains are all single input/output."""
for chain in values["chains"]:
if len(chain.input_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one input, got "
f"{chain} with {len(chain.input_keys)} inputs."
)
if len(chain.output_keys) != 1:
raise ValueError(
"Chains used in SimplePipeline should all have one output, got "
f"{chain} with {len(chain.output_keys)} outputs."
)
return values
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(_input, callbacks=_run_manager.get_child(f"step_{i+1}"))
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(
_input, color=color_mapping[str(i)], end="\n", verbose=self.verbose
)
return {self.output_key: _input}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = await chain.arun(
_input, callbacks=_run_manager.get_child(f"step_{i+1}")
)
if self.strip_outputs:
_input = _input.strip()
await _run_manager.on_text(
_input, color=color_mapping[str(i)], end="\n", verbose=self.verbose
)
return {self.output_key: _input}
| [] |
2024-01-10 | acstrahl/Sassy_Chatbot | Sassy_Chatbot.py | # Import the module
from openai import OpenAI
import streamlit as st
st.title("Sassy Chatbot:face_with_rolling_eyes:")
st.sidebar.header("Options")
openai_api_key = st.sidebar.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
st.sidebar.write("[Get an OpenAI API key](https://platform.openai.com/account/api-keys)")
st.sidebar.write("[See this app's source code on GitHub](https://github.com/acstrahl/Sassy_Chatbot)")
max_tokens = st.sidebar.slider("Max Tokens", 1, 250, 100)
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
model = st.sidebar.selectbox("Model", ("gpt-3.5-turbo","gpt-4"))
system_message_type = st.sidebar.selectbox("System Message",("Sassy Assistant", "Bored Assistant", "Angry Assistant", "Custom"))
# Define the system message
if system_message_type == "Sassy Assistant":
system_message = "You are a sassy assistant that is fed up with answering questions."
elif system_message_type == "Bored Assistant":
system_message = "You are a bored assistant that does the bare minimum."
elif system_message_type == "Angry Assistant":
system_message = "You are an angry assistant that likes yelling in all caps."
elif system_message_type == "Custom":
system_message = st.sidebar.text_area("Custom System Message", "Enter your custom system message here.")
else:
system_message = "You are a helpful assistant."
if st.sidebar.button("Apply New System Message"):
st.session_state.messages[0] = {"role": "system", "content": system_message}
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "system", "content": system_message}]
for msg in st.session_state.messages:
if msg["role"] == "user" or msg["role"] == "assistant":
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(
model=model,
messages=st.session_state.messages,
temperature=temperature,
max_tokens=max_tokens)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
if len(st.session_state.messages) > 20:
st.session_state.messages = st.session_state.messages[-20:]
st.session_state["messages"] = [{"role": "system", "content": system_message}]
| [] |
2024-01-10 | cruinh/TextAdventure | _gpt~_generateContent.py | from sys import *
from game import *
from place import *
from player import player
import os
import openai
import json
from pathlib import Path
def main():
src = Path('../places/atrium.py').read_text()
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt="Create a variation of this python class to represent a teen girl's bedroom. Include one exit leading to the hallway. \n"+src,
temperature=0.8,
max_tokens=2049,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0
)
print(response)
with open("../places/_generated.py", "w") as text_file:
text_file.write(response["choices"][0]["text"])
if __name__ == '__main__':
main()
| [
"Create a variation of this python class to represent a teen girl's bedroom. Include one exit leading to the hallway. \nPLACEHOLDER"
] |
2024-01-10 | cruinh/TextAdventure | _gpt~_generateMap.py | from sys import *
from game import *
from place import *
from player import player
import os
import openai
import json
from pathlib import Path
def main():
worldSrc = Path('../world.py').read_text()
placeSrc = Path('../place.py').read_text()
src = placeSrc + "\n\n" + worldSrc
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt="Refer to the following Python classes which describe a game world. Generate a map of the game world using ASCII art\n"+src,
temperature=0.8,
max_tokens=2049,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0
)
with open("_generateMap.output.txt", "w") as text_file:
text_file.write(response["choices"][0]["text"])
if __name__ == '__main__':
main()
| [
"Refer to the following Python classes which describe a game world. Generate a map of the game world using ASCII art\nPLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | cruinh/TextAdventure | _gpt~_sendGPTQuestion.py | import sys
from game import *
from place import *
from player import player
import os
import openai
import json
from pathlib import Path
def main():
# Check if arguments have been passed to the script
if len(sys.argv) < 1:
# Print the first argument passed to the script
quit()
prompt = sys.argv[1]
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.8,
max_tokens=2049,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0
)
print(response)
with open("_sendGPTQuestion.output.txt", "w") as text_file:
text_file.write("\""+prompt+"\"\n")
text_file.write("---\n")
text_file.write(response["choices"][0]["text"])
if __name__ == '__main__':
main()
| [] |
2024-01-10 | cruinh/TextAdventure | _gpt~_validateWorldConnections.py | from sys import *
from game import *
from place import *
from player import player
import os
import openai
import json
from pathlib import Path
def main():
worldSrc = Path('../world.py').read_text()
placeSrc = Path('../place.py').read_text()
src = placeSrc + "\n\n" + worldSrc
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
# prompt="Generate an ASCII representation of a map describing the world created by this Python class. \n"+src,
prompt="Refer to the following Python classes which describe a game world. What exits have been added to each room?\n"+src,
temperature=0.8,
max_tokens=2049,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0
)
print(response["choices"][0]["text"])
if __name__ == '__main__':
main()
| [
"Refer to the following Python classes which describe a game world. What exits have been added to each room?\nPLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | dskhudia/mosaic-examples | examples~end-to-end-examples~support_chatbot~app_demo.py | from argparse import ArgumentParser, Namespace
import gradio as gr
from langchain.embeddings import MosaicMLInstructorEmbeddings
from langchain.llms import MosaicML
from chatbot import ChatBot
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
EVAL_7B_TEMPLATE = (f'Answer the following question as one function, class, or object. If you do not know, just say "I do not know".'
'\n{context}'
'\nQuestion: {question}')
EVAL_30B_TEMPLATE = ("""<|im_start|>system
A conversation between a user and an LLM-based AI assistant about the codebase for the MosaicML library Composer.
Provide a helpful and simple answer given the following context to the question. If you do not know, just say "I
do not know".<|im_end|>
<|im_start|>context
{context}<|im_end|>
<|im_start|>user
{question}<|im_end|>
<|im_start|>assistant""")
def parse_args() -> Namespace:
"""Parse commandline arguments."""
parser = ArgumentParser(
description=
'Run a chatbot!'
)
parser.add_argument(
'--endpoint_url',
type=str,
default='https://models.hosted-on.mosaicml.hosting/mpt-30b-chat/v1/predict',
required=False,
help='The endpoint of our MosaicML LLM Model')
parser.add_argument(
'--model_name',
type=str,
default='mpt-30b-chat',
required=False,
help='The model name (mpt-30b-chat or mpt-7b) that determines which prompt template to use when evaluating')
parser.add_argument(
'--max_length',
type=int,
default=5000,
required=False,
help='The maximum number tokens of both input and output of the model (it will cut off output if token exceeds this length)')
parser.add_argument(
'--chunk_size',
type=int,
default=800,
required=False,
help='The chunk size when splitting documents')
parser.add_argument(
'--chunk_overlap',
type=int,
default=400,
required=False,
help='The overlap between chunks when splitting documents')
parser.add_argument(
'--retrieval_k',
type=int,
default=5,
required=False,
help='The number of chunks to retrieve as context from vector store')
parser.add_argument(
'--top_k',
type=int,
default=10,
required=False,
help='The number of highest probability vocabulary tokens to keep for top-k-filtering')
parser.add_argument(
'--repository_urls',
type=str,
nargs='*',
default=['https://github.com/mosaicml/composer',
'https://github.com/mosaicml/streaming',
'https://github.com/mosaicml/examples',
'https://github.com/mosaicml/diffusion',
'https://github.com/mosaicml/llm-foundry'],
required=False,
help='The GitHub repository URLs to download'
)
parser.add_argument(
'--complex_data_dir',
type=str,
required=False,
default=os.path.join(ROOT_DIR, 'eval_data/complex_eval.jsonl'),
help='complex eval data for human eval')
parser.add_argument(
'--simple_data_dir',
type=str,
required=False,
default=os.path.join(ROOT_DIR, 'eval_data/composer_docstrings.jsonl'),
help='simple eval data for string comparison')
parser.add_argument(
'--complex_chat',
type=int,
default=0,
required=False,
help='Which version of chatting to use (0 for normal, 1 for sub-query, 2 for relation sub-query) Each version is an improvement on the previous though increases response time.')
parsed = parser.parse_args()
if parsed.repository_urls is not None:
# Remove whitespace and turn URLs into a list
parsed.repository_urls = ''.join(str(parsed.repository_urls).split()).split(',')
return parsed
def main(endpoint_url: str,
model_name: str,
max_length: int,
chunk_size: int,
chunk_overlap: int,
retrieval_k: int,
top_k: int,
repository_urls: list[str],
complex_data_dir: str,
simple_data_dir: str,
chat_version: int) -> None:
retrieval_dir = os.path.join(ROOT_DIR, 'retrieval_data')
embeddings = MosaicMLInstructorEmbeddings()
llm = MosaicML(
inject_instruction_format=True,
endpoint_url= endpoint_url,
model_kwargs={
'max_length': max_length,
'top_k': top_k,
'top_p': 0.95,
'temperature': 0.1,
# other HuggingFace generation parameters can be set as kwargs here to experiment with different decoding parameters
},
)
chatbot = ChatBot(data_path= retrieval_dir,
embedding=embeddings,
model=llm,
k=retrieval_k,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
if not chatbot.vector_store:
print("can't find vectors.pickle, loading it")
if repository_urls is None:
raise ValueError('No repository URLs provided. Please provide a comma separated list of URLs to download')
chatbot.create_vector_store(repository_urls=repository_urls)
def chat_wrapper(query: str) -> str:
"""Wrapper around chatbot.chat() for gradio
Args:
query (str): The query to chatbot
Returns:
str: The response from chatbot"""
if query == '!eval_simple':
if simple_data_dir is None:
ValueError('No simple data directory provided. Please provide a directory with simple eval data')
if model_name == 'mpt-30b-chat':
return chatbot.evaluate_simple(simple_data_dir, EVAL_30B_TEMPLATE)
elif model_name == 'mpt-7b':
return chatbot.evaluate_simple(simple_data_dir, EVAL_7B_TEMPLATE)
elif query == '!eval_complex':
if complex_data_dir is None:
ValueError('No complex data directory provided. Please provide a directory with complex eval data')
if model_name == 'mpt-30b-chat':
return chatbot.evaluate_complex(complex_data_dir, EVAL_30B_TEMPLATE)
elif model_name == 'mpt-7b':
return chatbot.evaluate_complex(complex_data_dir, EVAL_7B_TEMPLATE)
if chat_version == 1:
return chatbot.sub_query_chat(query)
elif chat_version == 2:
return chatbot.relation_sub_query_chat(query)
else:
return chatbot.chat(query)
def gradio_chat():
"""Simple gradio application for querying the model"""
with gr.Blocks() as demo:
query = gr.Textbox(label='Query',
value='What is AliBi?')
answer = gr.Textbox(label='Answer')
query_btn = gr.Button('Query')
query_btn.click(fn=chat_wrapper,
inputs=[query],
outputs=[answer])
demo.queue()
demo.launch()
gradio_chat()
if __name__ == "__main__":
args = parse_args()
main(
endpoint_url=args.endpoint_url,
model_name=args.model_name,
max_length = args.max_length,
chunk_size = args.chunk_size,
chunk_overlap = args.chunk_overlap,
retrieval_k = args.retrieval_k,
top_k = args.top_k,
repository_urls = args.repository_urls,
complex_data_dir = args.complex_data_dir,
simple_data_dir = args.simple_data_dir,
chat_version = args.complex_chat
) | [
"<|im_start|>system\n A conversation between a user and an LLM-based AI assistant about the codebase for the MosaicML library Composer. \n Provide a helpful and simple answer given the following context to the question. If you do not know, just say \"I \n do not know\".<|im_end|>\n <|im_start|>context\n {context}<|im_end|>\n <|im_start|>user\n {question}<|im_end|>\n <|im_start|>assistant",
"Answer the following question as one function, class, or object. If you do not know, just say \"I do not know\".\n{context}\nQuestion: {question}"
] |
2024-01-10 | gconybear/dotbot | doc_parsing.py | import re
import docx2txt
from pypdf import PdfReader
import streamlit as st
import mimetypes
from io import BytesIO
from typing import Any, Dict, List
from langchain.text_splitter import RecursiveCharacterTextSplitter
def check_file_type(file):
file_type, encoding = mimetypes.guess_type(file.name)
return file_type
@st.cache_data()
def parse_docx(file: BytesIO) -> str:
text = docx2txt.process(file)
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
@st.cache_data()
def parse_pdf(file: BytesIO) -> List[str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
# Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
output.append(text)
return output
@st.cache_data()
def parse_txt(file: BytesIO) -> str:
text = file.read().decode("utf-8")
# Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
return text
def split_docs(txt, min_length=100):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["NSECTION", "\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(txt)
return [c for c in chunks if len(c) > min_length]
| [] |
2024-01-10 | nethdeco/Open-Assistant | inference~worker~chat_chain_utils.py | import json
import re
import requests
import transformers
from chat_chain_prompts import INSTRUCTIONS, OBSERVATION_SEQ, TOOLS_PREFIX, V2_ASST_PREFIX, V2_PROMPTER_PREFIX
from hf_langchain_inference import HFInference
from langchain.agents import Tool
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from loguru import logger
from oasst_shared.schemas import inference
from openapi_parser import prepare_plugin_for_llm
from settings import settings
from utils import shared_tokenizer_lock
RESPONSE_MAX_LENGTH = 2048
llm_parser = HFInference(
inference_server_url=settings.inference_server_url,
max_new_tokens=512,
stop_sequences=["</s>"],
top_k=5,
temperature=0.20,
repetition_penalty=(1 / 0.83),
)
# NOTE: https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
# We are using plugin API-s endpoint/paths as tool names,
# e.g.: /get_weather, /get_news etc... so this algo should be fine
# possible improvement: try levenshtein or vector distances
# but best way is to just use better models.
def similarity(ts1: str, ts2: str) -> float:
if ts1 == ts2:
return 1
match = 0
len1, len2 = len(ts1), len(ts2)
max_dist = (max(len1, len2) // 2) - 1
hash_ts1 = [0] * len1
hash_ts2 = [0] * len2
for i in range(len1):
for j in range(max(0, i - max_dist), min(len2, i + max_dist + 1)):
if ts1[i] == ts2[j] and hash_ts2[j] == 0:
hash_ts1[i] = 1
hash_ts2[j] = 1
match += 1
break
if match == 0:
return 0
t = 0
point = 0
for i in range(len1):
if hash_ts1[i] == 1:
while hash_ts2[point] == 0:
point += 1
if ts1[i] != ts2[point]:
t += 1
point += 1
t /= 2
return (match / len1 + match / len2 + (match - t) / match) / 3.0
# TODO: Can be improved, like... try to use another pass trough LLM
# with custom tuned prompt for fixing the formatting.
# e.g. "This is malformed text, please fix it: {malformed text} -> FIX magic :)"
def extract_tool_and_input(llm_output: str, ai_prefix: str) -> tuple[str, str]:
llm_output = llm_output.strip().replace("```", "")
if f"{ai_prefix}:" in llm_output:
return ai_prefix, llm_output.split(f"{ai_prefix}:")[-1].strip()
regex = r"Action: (.*?)[\n]*Action Input:\n?(.*)"
# match = re.search(regex, llm_output) # this is for 65B llama :(
match = re.search(regex, llm_output, re.MULTILINE | re.DOTALL)
if not match:
if OBSERVATION_SEQ in llm_output:
return ai_prefix, llm_output.split(OBSERVATION_SEQ)[-1].strip()
return ai_prefix, llm_output
action = match.group(1)
action_input = match.group(2)
return action.strip().replace("'", ""), action_input.strip().strip(" ")
# Truncate string, but append matching bracket if string starts with [ or { or (
# it helps in a way, that LLM will not try to just continue generating output
# continuation
def truncate_str(output: str, max_length: int = 1024) -> str:
if len(output) > max_length:
if output[0] == "(":
return output[:max_length] + "...)"
elif output[0] == "[":
return output[:max_length] + "...]"
elif output[0] == "{":
return output[:max_length] + "...}"
else:
return output[:max_length] + "..."
return output
# Parse JSON and try to fix it if it's not valid
def prepare_json(json_str: str) -> str:
json_str = json_str.strip()
fixed_json = json_str
try:
json.loads(json_str)
except json.decoder.JSONDecodeError:
# Fix missing quotes around keys and replace Python's True, False, and None
fixed_json = re.sub(r"(?<=\{|\,)(\s*)(\w+)(\s*):", r'\1"\2"\3:', json_str)
fixed_json = fixed_json.replace("True", "true").replace("False", "false").replace("None", "null")
# Remove excessive closing braces/brackets
brace_count = bracket_count = 0
result = []
for c in fixed_json:
if c == "{":
brace_count += 1
elif c == "}":
brace_count -= 1
elif c == "[":
bracket_count += 1
elif c == "]":
bracket_count -= 1
if brace_count >= 0 and bracket_count >= 0:
result.append(c)
# Add missing closing braces/brackets
result.extend(["}"] * brace_count)
result.extend(["]"] * bracket_count)
fixed_json = "".join(result)
try:
json.loads(fixed_json)
except json.decoder.JSONDecodeError as e:
logger.warning(f"JSON is still not valid, trying to fix it with LLM {fixed_json}")
# if it's still not valid, try with LLM fixer
prompt = f"""{V2_PROMPTER_PREFIX}Below is malformed JSON object string:
--------------
{json_str}
--------------
Parsing error:
--------------
{e}
RULES:
1. If malformed JSON object string contains multiple objects, you merge them into one.
2. You will never made up or add any new data, you will only fix the malformed JSON object string.
Here is the fixed JSON object string:</s>{V2_ASST_PREFIX}"""
logger.warning(f"JSON Fix Prompt: {prompt}")
out = llm_parser.generate(prompts=[prompt]).generations[0][0].text
out = out[: out.find("}") + 1]
logger.warning(f"JSON Fix Output: {out}")
return out
return fixed_json
def use_tool(tool_name: str, tool_input: str, tools: list) -> str:
for tool in tools:
# This should become stricter and stricter as we get better models
if tool.name in tool_name or similarity(tool.name, tool_name) > 0.75:
# check if tool_input is valid json, and if not, try to fix it
tool_input = prepare_json(tool_input)
return tool.func(tool_input)
return f"ERROR! {tool_name} is not a valid tool. Try again with different tool!"
# Needs more work for errors, error-prompt tweaks are currently based on
# `OpenAssistant/oasst-sft-6-llama-30b-epoch-1 model`
# TODO: Add other missing methods and Content-Types etc...
class RequestsForLLM:
def run(self, params: str, url: str, param_location: str, type: str, payload: str | None = None) -> str:
return self.run_request(params, url, param_location, type, payload)
def run_request(self, params: str, url: str, param_location: str, type: str, payload: str = None) -> str:
try:
query_params = params
if param_location == "path":
for key, value in query_params.items():
url = url.replace(f"{{{key}}}", value)
query_params = {}
headers = {"Content-Type": "application/json"} if payload else None
if type.lower() == "get":
logger.info(
f"Running {type.upper()} request on {url} with\nparams: {params}\nparam_location: {param_location}\npayload: {payload}"
)
res = requests.get(url, params=query_params, headers=headers)
elif type.lower() == "post":
# model didn't generated separate payload object, so we just put params as payload and hope for the best...
data = json.dumps(payload) if payload else json.dumps(params)
logger.info(
f"Running {type.upper()} request on {url} with\nparams: {params}\nparam_location: {param_location}\npayload: {data}"
)
res = requests.post(url, params=query_params, data=data, headers=headers)
else:
return f"ERROR! Unsupported request type: {type}. Only GET and POST are supported. Try again!"
return self.process_response(res)
except Exception as e:
return f"ERROR! That didn't work, try modifying Action Input.\n{e}. Try again!"
def process_response(self, res):
logger.info(f"Request response: {res.text}")
if res.status_code != 200:
return f"ERROR! That didn't work, try modifying Action Input.\n{res.text}. Try again!"
if res.text is None or len(res.text) == 0:
return "ERROR! That didn't work, try modifying Action Input.\nEmpty response. Try again!"
if "null" in res.text.lower() and len(res.text) < 10:
return "ERROR! That didn't work, try modifying Action Input.\nEmpty response. Try again!"
return truncate_str(res.text, RESPONSE_MAX_LENGTH)
def compose_tools_from_plugin(plugin: inference.PluginEntry | None) -> tuple[str, list[Tool]]:
if not plugin:
return "", []
llm_plugin = prepare_plugin_for_llm(plugin.url)
if not llm_plugin:
return "", []
tools = []
request_tool = RequestsForLLM()
def create_tool_func(endpoint, param_location):
def func(req):
try:
json_obj = json.loads(req)
request = json_obj.get("request", {})
params = request.get("params", {})
payload = request.get("payload", None)
except json.JSONDecodeError:
print("Error: Invalid JSON input")
request, params, payload = {}, {}, None
except Exception as e:
print(f"Error: {e}")
request, params, payload = {}, {}, None
return request_tool.run(
url=endpoint.url, params=params, param_location=param_location, type=endpoint.type, payload=payload
)
return func
# Generate tool for each endpoint of the plugin
# NOTE: This approach is a bit weird, but it is a good way to help LLM
# to use tools, so LLM does not need to choose api server url
# and paramter locations: query, path, body, etc. on its own.
# LLM will only, choose what endpoint, what parameters and what values
# to use. Modifying this part of the prompt, we can degrade or improve
# performance of tool usage.
for endpoint in llm_plugin["endpoints"]:
params = "\n\n".join(
[
f""" name: "{param.name}",\n in: "{param.in_}",\n description: "{truncate_str(param.description, 128)}",\n schema: {param.schema_},\n required: {param.required}"""
for param in endpoint.params
]
)
# NOTE: LangChain is using internaly {input_name} for templating
# and some OA/ChatGPT plugins of course, can have {some_word} in theirs
# descriptions
params = params.replace("{", "{{").replace("}", "}}")
payload_description = ""
if endpoint.payload:
try:
payload_description = "payload: " + truncate_str(json.dumps(endpoint.payload, indent=4), 256)
payload_description = payload_description.replace("{", "{{").replace("}", "}}")
except Exception as e:
logger.warning(f"Failed to convert payload to json string: {e}")
payload_description += "" if not payload_description or payload_description.endswith("\n") else "\n"
if len(payload_description) > 0:
payload_description = "\n" + payload_description + "\n"
parameters_description = f"params:\n{params}\n" if params else "\n"
openapi_specification_title = (
"\nOpenAPI specification\n" if len(payload_description) > 0 or len(params) > 0 else ""
)
param_location = endpoint.params[0].in_ if len(endpoint.params) > 0 else "query"
# some plugins do not have operation_id, so we use path as fallback
path = endpoint.path[1:] if endpoint.path and len(endpoint.path) > 0 else endpoint.path
tool = Tool(
name=endpoint.operation_id if endpoint.operation_id != "" else path,
# Could be path, e.g /api/v1/endpoint
# but it can lead LLM to makeup some URLs
# and problem with EP description is that
# it can be too long for some plugins
func=create_tool_func(endpoint, param_location),
description=f"{openapi_specification_title}{parameters_description}{payload_description}tool description: {endpoint.summary}\n",
)
tools.append(tool)
tools_string = "\n".join([f"> {tool.name}{tool.description}" for tool in tools])
# NOTE: This can be super long for some plugins, that I tested so far.
# and because we don't have 32k CTX size, we need to truncate it.
plugin_description_for_model = truncate_str(llm_plugin["description_for_model"], 512)
return (
f"{TOOLS_PREFIX}{tools_string}\n\n{llm_plugin['name_for_model']} plugin description:\n{plugin_description_for_model}\n\n{INSTRUCTIONS}",
tools,
)
# TODO:
# here we will not be not truncating per token, but will be deleting messages
# from the history, and we will leave hard truncation to work.py which if
# occurs it will degrade quality of the output.
def prepare_prompt(
input_prompt: str,
prompt_template: PromptTemplate,
memory: ConversationBufferMemory,
tools_names: list[str] | None,
current_time: str,
language: str,
tokenizer: transformers.PreTrainedTokenizer,
worker_config: inference.WorkerConfig,
action_input_format: str,
) -> tuple[ConversationBufferMemory, str]:
max_input_length = worker_config.model_config.max_input_length
args = {
"input": input_prompt,
"language": language,
"current_time": current_time,
"chat_history": memory.buffer,
}
if tools_names:
args["tools_names"] = tools_names
args["action_input_format"] = action_input_format
out_prompt = prompt_template.format(**args)
with shared_tokenizer_lock:
ids = tokenizer.encode(out_prompt)
# soft truncation
while len(ids) > max_input_length and len(memory.chat_memory.messages) > 0:
memory.chat_memory.messages.pop(0)
args = {
"input": input_prompt,
"language": language,
"current_time": current_time,
"chat_history": memory.buffer,
}
if tools_names:
args["tools_names"] = tools_names
args["action_input_format"] = action_input_format
out_prompt = prompt_template.format(**args)
with shared_tokenizer_lock:
ids = tokenizer.encode(out_prompt)
logger.warning(f"Prompt too long, deleting chat history. New length: {len(ids)}")
return memory, out_prompt
| [
"PLACEHOLDERBelow is malformed JSON object string:\n--------------\nPLACEHOLDER\n--------------\nParsing error:\n--------------\nPLACEHOLDER\n\nRULES:\n1. If malformed JSON object string contains multiple objects, you merge them into one.\n2. You will never made up or add any new data, you will only fix the malformed JSON object string.\n\nHere is the fixed JSON object string:</s>PLACEHOLDER"
] |
2024-01-10 | SteveZhengMe/srt-translator | unit_test.py | import unittest
import deepl
import openai
import srt
from libraries import DeepLUtil
from libraries import OpenAIUtil
from libraries import SRTTranslator
from app import init_conf, create_engine, scan_folder
class TestStringMethods(unittest.TestCase):
# setup
def setUp(self):
self.conf = init_conf()
# find the DeepLUtil in the list
self.deepl_engine = list(filter(lambda x: isinstance(x, DeepLUtil), create_engine(self.conf)))
# find the OpenAIUtil in the list
self.openai_engine = list(filter(lambda x: isinstance(x, OpenAIUtil), create_engine(self.conf)))
def test_load_env(self):
self.assertNotEquals(len(self.conf), 0)
self.assertNotEquals(len(self.deepl_engine), 0)
self.assertNotEquals(len(self.openai_engine), 0)
self.assertEquals(self.conf["deepl_key"][-2:],"fx")
self.assertEquals(self.conf["openai_key"][:2],"sk")
def test_deepL_translate(self):
deepl_translator = deepl.Translator(self.conf["deepl_key"])
result = deepl_translator.translate_text(
["Hello, Tom. <br> In today’s [globalized] world, language barriers are a challenge that businesses and individuals often face.", "I speak Chinese"],
target_lang=self.conf["target_language"][0:2]
)
self.assertEquals(result[0].text,"你好,汤姆。<br> 在当今[全球化]的世界里,语言障碍是企业和个人经常面临的挑战。")
self.assertEquals(result[1].text,"我会说中文")
def test_deepl_get_usage(self):
deepl_translator = deepl.Translator(self.conf["deepl_key"])
# get usage
usage = deepl_translator.get_usage()
print(usage.character.count)
self.assertTrue(usage.character.count < usage.character.limit)
def test_openai_translate(self):
openai.api_key = self.conf["openai_key"]
target_language = "zh_CN"
text = "<p>Hello, <br>Tom. </p><p>In today’s [globalized] world, language barriers are a challenge that businesses and individuals often face.</p><p>I speak Chinese</p>"
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{self.conf['openai_system_prompt']} {target_language}."},
{"role": "user", "content": f"{self.conf['openai_user_prompt_default']} {text}"}
],
temperature=0
)
#print(chat_completion.choices[0].message.content.strip())
self.assertEquals(chat_completion.choices[0].message.content.strip(), "<p>你好,<br>Tom。</p><p>在今天的[全球化]世界中,语言障碍是企业和个人经常面临的挑战。</p><p>我会说中文</p>")
def test_parse_srt(self):
with open("test-data/test.srt") as srt_file:
subtitles = list(srt.parse(srt_file.read()))
self.assertEquals(len(subtitles), 150)
self.assertEqual(subtitles[0].content.replace("\n","||"),"Downloaded from||YTS.MX")
########################################################################
def test_SRTTranslator(self):
srt_parser = SRTTranslator("test-data/test.srt", self.conf)
self.assertEquals(len(srt_parser.subtitles), 106)
def test_DeepLUtil(self):
deepl_util = self.openai_engine[0]
self.assertTrue(deepl_util.is_available())
self.assertEquals(deepl_util.translate(["Hello, Tom. || language barriers are a [challenge] that businesses and individuals often face.", "I speak Chinese"]),['你好,Tom。|| 语言障碍是企业和个人经常面临的[挑战]。', '我会说中文'])
def test_OpenAIUtil(self):
openai_util = self.openai_engine[0]
self.assertTrue(openai_util.is_available())
self.assertEquals(openai_util.translate(["Hello, Tom.|| In today’s [globalized] world, language barriers are a challenge that businesses and individuals often face.", "I speak Chinese."]),["你好,Tom。|| 在今天的[全球化]世界中,语言障碍是企业和个人经常面临的挑战。","我会说中文。"])
def test_integrate(self):
srt_parser = SRTTranslator("test-data/test.srt", self.conf)
engine = []
engine.extend(self.deepl_engine)
engine.extend(self.openai_engine)
srt_parser.translate(engine)
srt_parser.save()
def test_scan(self):
scan_folder("./data/subs", "./data", "Chinese", "English", "y")
| [
"{self.conf['openai_user_prompt_default']} <p>Hello, <br>Tom. </p><p>In today’s [globalized] world, language barriers are a challenge that businesses and individuals often face.</p><p>I speak Chinese</p>",
"{self.conf['openai_system_prompt']} zh_CN."
] |
2024-01-10 | SteveZhengMe/srt-translator | libraries.py | import datetime
import errno
import os
import time
import openai
import deepl
import srt
class TranslatorBase:
def is_available(self):
# Default to True
return True
def get_usage(self):
# unlimited usage
return (-1,-1)
class DeepLUtil(TranslatorBase):
def __init__(self, conf:dict):
self.conf = conf
self.translator = deepl.Translator(self.conf["deepl_key"])
self.limit = self.translator.get_usage().character.limit-1000
self.current_count = self.translator.get_usage().character.count
self.last_count_check = self.current_count
def is_available(self) -> bool:
# sync the usage every 10000 characters
if (self.current_count - self.last_count_check > 10000) or (self.current_count - self.last_count_check > 1000 and self.limit - self.current_count < 10000):
self.current_count = self.translator.get_usage().character.count
self.last_count_check = self.current_count
print(f"{'.'*40} (DeepL usage: {self.current_count}/{self.limit})")
return self.current_count < self.limit
def translate(self, batch:list) -> list:
result = self.translator.translate_text(
batch,
target_lang=self.conf["target_language"][0:2]
)
self.current_count += len("".join(batch))
return [item.text for item in result]
def get_usage(self):
return self.limit - self.current_count, self.limit
class OpenAIUtil(TranslatorBase):
def __init__(self, conf:dict):
self.conf = conf
openai.api_key = self.conf["openai_key"]
def translate(self, batch:list) -> list:
text = f"<p>{'</p><p>'.join(batch)}</p>"
if f"openai_user_prompt_{self.conf['target_language']}" in self.conf:
text = self.conf[f"openai_user_prompt_{self.conf['target_language']}"] + " " + text
else:
text = self.conf["openai_user_prompt_default"] + " " + text
# try 5 times if openai is not available
for i in range(5):
try:
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"{self.conf['openai_system_prompt']} {self.conf['target_language']}."},
{"role": "user", "content": text}
],
temperature=0
)
translated = chat_completion.choices[0].message.content.strip()
#remove the first <p> and the last </p> and split to a list
translated_list = translated[3:-4].split("</p><p>")
# give or take items to make the length of the list equal to the length of the batch
if len(translated_list) > len(batch):
translated_list = translated_list[:len(batch)]
elif len(translated_list) < len(batch):
translated_list.extend(["*"]*(len(batch)-len(translated_list)))
return translated_list
except Exception as e:
try_again_in = 2*(i+1)
print(f"OpenAI is not available, try again in {try_again_in} seconds, {5-i} times left")
time.sleep(2*(i+1))
continue
class SRTTranslator:
def __init__(self, srt_file:str, conf:dict):
self.conf = conf
if os.path.isfile(srt_file):
# add language information to the target file name
self.target_file = srt_file[:-4] + "_" + self.conf["target_language"] + ".srt"
with open(srt_file, encoding="utf-8") as file:
self.subtitles = self.__validate_subtitles(file.read())
else:
# raise file not found exception
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), srt_file)
def translate(self, translator_list:list, buffer_size=20):
translated_list = []
batch = []
total_indexes_count = len(self.subtitles)
index_start = 0
print("-"*10 + f" Total {total_indexes_count} indexes " + "-"*10)
for subtitle in self.subtitles:
content = self.__before_translate(subtitle.content)
batch.append(content)
if subtitle.index % buffer_size == 0:
is_translated = False
for translator in translator_list:
if translator.is_available():
print("-"*10 + f" Index: {index_start} ~ {subtitle.index} " + "-"*10)
try:
translated_list.extend(translator.translate(batch))
batch = []
index_start = subtitle.index+1
is_translated = True
break
except deepl.exceptions.QuotaExceededException as e:
continue
if not is_translated:
raise Exception("All translators are not available")
if len(batch) > 0:
is_translated = False
for translator in translator_list:
if translator.is_available():
print("-"*10 + f" Last batch: {index_start} ~ {total_indexes_count} " + "-"*10)
translated_list.extend(translator.translate(batch))
is_translated = True
break
if not is_translated:
raise Exception("All translators are not available")
# replace the content with the translated text in the subtitles
if len(self.subtitles) == len(translated_list):
for i in range(len(self.subtitles)):
self.subtitles[i].content = self.__after_translate(translated_list[i])
return self
def __before_translate(self, text:str) -> str:
result = text.strip()
# replace the "{\an8}" in the text
result = result.replace("{\\an8}", "")
result = result.replace("\r\n","<br>")
result = result.replace("\n","<br>")
# if the nerber of [ is not equal to the number of ], delete [
if result.count("[") > result.count("]"):
result = result.replace("[","")
return result
def __after_translate(self, text:str) -> str:
result = text.strip()
result = result.replace("<br>","\n")
return result
def __validate_subtitles(self, content:str) -> list:
subtitles = list(srt.parse(content))
result_list = []
deleted_index = -1
# if the next subtitle's start time is not 200 milliseconds after the previous subtitle's end time, merge them
for i in range(len(subtitles) - 1):
if i > deleted_index:
if subtitles[i+1].start - subtitles[i].end < datetime.timedelta(milliseconds=200):
subtitles[i].end = subtitles[i+1].end
subtitles[i].content += "\n" + subtitles[i+1].content
deleted_index = i+1
result_list.append(subtitles[i])
# add the last subtitle if the deleted_index is not the last one
if deleted_index != len(subtitles) - 1:
result_list.append(subtitles[-1])
removed_indexes_count = len(subtitles) - len(result_list)
if removed_indexes_count > 0:
print(f"Remove {removed_indexes_count} subtitles, because they are too close to the previous one")
return list(srt.sort_and_reindex(result_list))
def save(self):
# write to the target file
with open(self.target_file, "w", encoding="utf-8") as target:
target.write(srt.compose(self.subtitles))
return self.target_file
| [
"openai_system_prompt",
"target_language"
] |
2024-01-10 | llj110/DExperts | generation~dexperts_gpt3_generation.py | from pathlib import Path
from typing import Union, List
import openai
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel, GPT2Tokenizer, modeling_utils, GPT2PreTrainedModel, BartForConditionalGeneration
from generation.gpt2_generation import GPT2Generation
from tqdm.auto import tqdm
from utils import utils
from utils.generation_utils import top_k_top_p_filtering
from utils.constants import OPENAI_API_KEY
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
class DExpertsGPT3Generation(GPT2Generation):
STOP_TOKEN = "<|endoftext|>"
def __init__(
self,
antiexpert_model: Union[str, Path, GPT2PreTrainedModel],
expert_model: Union[str, Path, GPT2PreTrainedModel] = None,
gpt3_model: str = 'ada',
tokenizer: str = 'gpt2',
seed: int = 42,
openai_api_key: str = OPENAI_API_KEY,
):
# Set up device
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
utils.set_seed(seed, n_gpu)
openai.api_key = openai_api_key
self.gpt3_model = gpt3_model
if expert_model:
self.expert = GPT2LMHeadModel.from_pretrained(expert_model).to(self.device)
else:
self.expert = None
if antiexpert_model:
self.antiexpert = GPT2LMHeadModel.from_pretrained(antiexpert_model).to(self.device)
else:
self.antiexpert = None
self.tokenizer = GPT2Tokenizer.from_pretrained(tokenizer, pad_token=self.STOP_TOKEN)
assert self.tokenizer.eos_token_id == self.tokenizer.pad_token_id
def __repr__(self):
return f'<GPT3DExpertsGenerator model_name_or_path="{self.model}">'
def request(self, prompts: List[str], filter_p: float):
# Retry request (handles connection errors, timeouts, and overloaded API)
while True:
try:
return openai.Completion.create(
engine=self.gpt3_model,
prompt=prompts,
max_tokens=1, # get logits for next token
top_p=filter_p,
logprobs=100, # max tokens allowable
n=1
)
except Exception as e:
tqdm.write(str(e))
tqdm.write("Retrying...")
def get_gpt3_logits(self, input_ids, filter_p):
prompts = self.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
response = self.request(prompts, filter_p=filter_p)
response_logits = [choice['logprobs']['top_logprobs'] for choice in response['choices']]
gpt3_logits = -50000.0 * torch.ones([len(prompts), 1, len(self.tokenizer)], dtype=torch.float32).to(self.device)
for i in range(len(prompts)):
response_dict = response_logits[i][0] # get 0 index predictions
for token, logit in response_dict.items():
idx = self.tokenizer.encode(token)
if len(idx) == 1:
gpt3_logits[i, 0, idx[0]] = logit
return gpt3_logits
def generate(self,
prompt: Union[str, List[str]],
max_len: int = 20,
sample: bool = True,
filter_p: float = 0.9,
k: int = 0,
p: float = 1.0,
temperature: float = 1.0,
alpha: float = 0.0,
**model_kwargs):
if isinstance(prompt, str):
prompt = [prompt]
encodings_dict = self.tokenizer.batch_encode_plus(prompt, pad_to_max_length=True, return_tensors='pt')
input_ids = encodings_dict['input_ids'].to(self.device)
attention_mask = encodings_dict['attention_mask'].to(self.device)
batch_size, input_seq_len = input_ids.shape
position_ids = attention_mask.cumsum(dim=1) - 1
unfinished_sents = torch.ones(batch_size, dtype=torch.long, device=self.device)
if self.expert:
self.expert.eval()
if self.antiexpert:
self.antiexpert.eval()
with torch.no_grad():
for step in range(max_len):
gpt3_logits = self.get_gpt3_logits(input_ids, filter_p)
if self.expert:
expert_logits, expert_past = self.expert(
input_ids, attention_mask=attention_mask, position_ids=position_ids, **model_kwargs)
else:
expert_logits = gpt3_logits
if self.antiexpert:
antiexpert_logits, antiexpert_past = self.antiexpert(
input_ids, attention_mask=attention_mask, position_ids=position_ids, **model_kwargs)
else:
antiexpert_logits = gpt3_logits
# in the first decoding step, we want to use the 'real' last position for each sentence
if step == 0:
last_non_masked_idx = torch.sum(attention_mask, dim=1) - 1
expert_next_token_logits = expert_logits[range(batch_size), last_non_masked_idx, :]
antiexpert_next_token_logits = antiexpert_logits[range(batch_size), last_non_masked_idx, :]
else:
expert_next_token_logits = expert_logits[:, -1, :]
antiexpert_next_token_logits = antiexpert_logits[:, -1, :]
# ensemble distributions
# alpha = torch.tensor(alpha).to(self.device)
gpt3_next_token_logits = gpt3_logits[:, -1, :]
next_token_logits = gpt3_next_token_logits + alpha * (expert_next_token_logits - antiexpert_next_token_logits)
if sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
if k > 0 or p < 1.0:
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=k, top_p=p)
# Sample
probs = F.softmax(next_token_logits, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_tokens = torch.argmax(next_token_logits, dim=-1)
# either append a padding token here if <EOS> has been seen or append next token
tokens_to_add = next_tokens * unfinished_sents + self.tokenizer.pad_token_id * (1 - unfinished_sents)
# this updates which sentences have not seen an EOS token so far
# if one EOS token was seen the sentence is finished
eos_in_sents = tokens_to_add == self.tokenizer.eos_token_id
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is an EOS in each sentence
if unfinished_sents.max() == 0:
break
# Update input_ids, attention_mask and position_ids
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
attention_mask = torch.cat([attention_mask, attention_mask.new_ones((batch_size, 1))], dim=1)
position_ids = torch.cat([position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1)
decoded_outputs = [self.tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output in input_ids[:, input_seq_len:]]
return decoded_outputs
| [] |
2024-01-10 | yoshio-kinoshita/openaicookbook | How%20to%20count%20tokens%20with%20tiktoken~tiktokenSample.py | import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
print(encoding.encode("tiktoken is great!"))
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
print(num_tokens_from_string("tiktoken is great!", "cl100k_base"))
print(encoding.decode([83, 1609, 5963, 374, 2294, 0]))
print([encoding.decode_single_token_bytes(token) for token in [83, 1609, 5963, 374, 2294, 0]])
def compare_encodings(example_string: str) -> None:
"""Prints a comparison of three string encodings."""
# print the example string
print(f'\nExample string: "{example_string}"')
# for each encoding, print the # of tokens, the token integers, and the token bytes
for encoding_name in ["r50k_base", "p50k_base", "cl100k_base"]:
encoding = tiktoken.get_encoding(encoding_name)
token_integers = encoding.encode(example_string)
num_tokens = len(token_integers)
token_bytes = [encoding.decode_single_token_bytes(token) for token in token_integers]
print()
print(f"{encoding_name}: {num_tokens} tokens")
print(f"token integers: {token_integers}")
print(f"token bytes: {token_bytes}")
compare_encodings("antidisestablishmentarianism")
compare_encodings("2 + 2 = 4")
compare_encodings("お誕生日おめでとう")
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
# let's verify the function above matches the OpenAI API response
import openai
example_messages = [
{
"role": "system",
"content": "You are a helpful, pattern-following assistant that translates corporate jargon into plain English.",
},
{
"role": "system",
"name": "example_user",
"content": "New synergies will help drive top-line growth.",
},
{
"role": "system",
"name": "example_assistant",
"content": "Things working well together will increase revenue.",
},
{
"role": "system",
"name": "example_user",
"content": "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.",
},
{
"role": "system",
"name": "example_assistant",
"content": "Let's talk later when we're less busy about how to do better.",
},
{
"role": "user",
"content": "This late pivot means we don't have time to boil the ocean for the client deliverable.",
},
]
for model in [
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4",
]:
print(model)
# example token count from the function defined above
print(f"{num_tokens_from_messages(example_messages, model)} prompt tokens counted by num_tokens_from_messages().")
# example token count from the OpenAI API
response = openai.ChatCompletion.create(
model=model,
messages=example_messages,
temperature=0,
max_tokens=1, # we're only counting input tokens here, so let's not waste tokens on the output
)
print(f'{response["usage"]["prompt_tokens"]} prompt tokens counted by the OpenAI API.')
print() | [
"New synergies will help drive top-line growth.",
"This late pivot means we don't have time to boil the ocean for the client deliverable.",
"Let's talk later when we're less busy about how to do better.",
"You are a helpful, pattern-following assistant that translates corporate jargon into plain English.",
"Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.",
"Things working well together will increase revenue."
] |
2024-01-10 | yoshio-kinoshita/openaicookbook | Speech%20to%20text~SpeechToText2.py | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
import openai
audio_file= open("data/テスト録音.m4a", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
print(transcript["text"])
| [] |
2024-01-10 | yoshio-kinoshita/openaicookbook | embeddings~embeddings1.py | import openai
response = openai.Embedding.create(
input="Your text string goes here",
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
print(embeddings) | [] |
2024-01-10 | yoshio-kinoshita/openaicookbook | embeddings~classification.py | # imports
import pandas as pd
import numpy as np
from ast import literal_eval
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
# load data
datafile_path = "data/fine_food_reviews_with_embeddings_1k.csv"
df = pd.read_csv(datafile_path)
df["embedding"] = df.embedding.apply(literal_eval).apply(np.array) # convert string to array
# split data into train and test
X_train, X_test, y_train, y_test = train_test_split(
list(df.embedding.values), df.Score, test_size=0.2, random_state=42
)
# train random forest classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
probas = clf.predict_proba(X_test)
report = classification_report(y_test, preds)
print(report)
from openai.embeddings_utils import plot_multiclass_precision_recall
plot_multiclass_precision_recall(probas, y_test, [1, 2, 3, 4, 5], clf) | [] |
2024-01-10 | yoshio-kinoshita/openaicookbook | embeddings~amazonfood.py | # imports
import pandas as pd
import tiktoken
from openai.embeddings_utils import get_embedding
# embedding model parameters
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
# load & inspect dataset
input_datapath = "data/fine_food_reviews_1k.csv" # to save space, we provide a pre-filtered dataset
df = pd.read_csv(input_datapath, index_col=0)
df = df[["Time", "ProductId", "UserId", "Score", "Summary", "Text"]]
df = df.dropna()
df["combined"] = (
"Title: " + df.Summary.str.strip() + "; Content: " + df.Text.str.strip()
)
print(df.head(2))
# subsample to 1k most recent reviews and remove samples that are too long
top_n = 1000
df = df.sort_values("Time").tail(top_n * 2) # first cut to first 2k entries, assuming less than half will be filtered out
df.drop("Time", axis=1, inplace=True)
encoding = tiktoken.get_encoding(embedding_encoding)
# omit reviews that are too long to embed
df["n_tokens"] = df.combined.apply(lambda x: len(encoding.encode(x)))
df = df[df.n_tokens <= max_tokens].tail(top_n)
print(len(df))
# Ensure you have your API key set in your environment per the README: https://github.com/openai/openai-python#usage
# This may take a few minutes
df["embedding"] = df.combined.apply(lambda x: get_embedding(x, engine=embedding_model))
df.to_csv("data/fine_food_reviews_with_embeddings_1k.csv") | [] |
2024-01-10 | yoshio-kinoshita/openaicookbook | gptbot~gptbot.py | from flask import Flask, request, jsonify, render_template
from flask_socketio import SocketIO, emit
import openai
import os
import json
import markdown
import logging
openai.api_key = os.environ.get("OPENAI_API_KEY")
app = Flask(__name__)
socketio = SocketIO(app)
# ファイルへのハンドラを作成します。
file_handler = logging.FileHandler("app.log")
# ハンドラにフォーマッタを設定します。
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler.setFormatter(formatter)
# ハンドラをロガーに追加します。
app.logger.addHandler(file_handler)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/socket")
def socket():
return render_template("socket.html")
@app.route("/api/chat", methods=["POST"])
def chat():
req_data = request.get_json()
user_input = req_data['user_input']
app.logger.info(user_input)
response = openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=[
{
"role": "system",
"content": "あなたは優秀なプログラマで、質問に対してとてもわかりやすい回答ができます。正確な回答を得るため、不明瞭なところは質問者に確認してください。",
},
{"role": "user", "content": user_input},
],
)
chatbot_response = response["choices"][0]["message"]["content"]
md = markdown.markdown(chatbot_response)
app.logger.info(chatbot_response)
return jsonify({"response": md})
@app.route('/chat', methods=['POST'])
def chunk_chat():
user_input = request.form['user_input']
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_input},
],
max_tokens=100,
)
chunks = [chunk['value'] for chunk in response['choices'][0]['message']['content'] if chunk['type'] == 'chunk']
return {'chunks': chunks}
@socketio.on('connect')
def handle_connect():
emit('connected', {'data': 'Connected'})
if __name__ == "__main__":
app.logger.setLevel(logging.DEBUG)
os.environ["FLASK_ENV"] = "development"
app.run(host="0.0.0.0", port=5050, debug=True)
| [
"あなたは優秀なプログラマで、質問に対してとてもわかりやすい回答ができます。正確な回答を得るため、不明瞭なところは質問者に確認してください。",
"You are a helpful assistant."
] |
2024-01-10 | mitchell-vu/french-learning-bot | search-python~mini-qa.py | import os
ASTRA_DB_SECURE_BUNDLE_PATH = r"C:\Users\yoga gen 5\Documents\Chat'innov\Python_env\search-python\secure-connect-vector-database.zip"
ASTRA_DB_APPLICATION_TOKEN = "AstraCS:pzzDNsgbRwyFOgXbEhedKbjk:0194015953a28fffb6378e6809e1aac51a9656afecde55f2aef5401b0d243020"
ASTRA_DB_CLIENT_ID = "pzzDNsgbRwyFOgXbEhedKbjk"
ASTRA_ID_CLIENT_SECRET = "RFAljaAWW,J9fBxF.HXRrlLukbITX7CqqF,ZZ-Iz4Eq,LB-leL,KNIjOrr1g+uamEXs3k1Lezd1JknpfbOX11jZ8m7T5DF2WlLh_9m-3R-q7CUbYiw092S6C64.y+F0z"
ASTRA_DB_KEYSPACE = "search"
OPEN_AI_KEY = os.environ.get("OPEN_AI_KEY")
from langchain.vectorstores.cassandra import Cassandra
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from datasets import load_dataset
cloud_config = {
'secure_connect_bundle': ASTRA_DB_SECURE_BUNDLE_PATH
}
auth_provider = PlainTextAuthProvider(ASTRA_DB_CLIENT_ID, ASTRA_ID_CLIENT_SECRET)
cluster = Cluster(cloud=cloud_config, auth_provider = auth_provider)
astraSession = cluster.connect()
llm = OpenAI(openai_api_key = OPEN_AI_KEY)
myEmbedding = OpenAIEmbeddings(openai_api_key = OPEN_AI_KEY)
myCassandraVStore = Cassandra(
embedding= myEmbedding,
session= astraSession,
keyspace= ASTRA_DB_KEYSPACE,
table_name= "qa_mini_demo",
)
print("Loading my data from huggingface")
myDataset = load_dataset("Biddls/Onion_News", split="train")
headlines = myDataset["text"][:50]
print("\nGenerating embeddings and storing in AstraDB")
myCassandraVStore.add_texts(headlines)
print("Inserted %i headlines.\n" %len(headlines))
vectorIndex = VectorStoreIndexWrapper(vectorstore=myCassandraVStore)
first_question = True
while True:
if first_question:
querry_text = input("\nEnter your question (or type 'quit' to exit): ")
first_question = False
else:
querry_text = input("\nWhat's your next question (or type 'quit' to exit): ")
if querry_text.lower() == 'quit':
break
print("QUESTION: \"%s\"" %querry_text)
answer = vectorIndex.query(querry_text, llm=llm).strip()
print("ANSWER: \"%s\"" %answer)
print("DOCUMENTS BY RELEVANCE:")
for doc, score in myCassandraVStore.similarity_search_with_score(querry_text, k=4):
print(" %0.4f \"%s...\"" %(score, doc.page_content[:60])) | [] |
2024-01-10 | mitchell-vu/french-learning-bot | chroma-vector-db~document_qa.py | import os
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import chroma
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
import chainlit as cl
from chainlit.types import AskFileResponse
OPEN_AI_KEY = os.environ.get("OPEN_AI_KEY")
class CustomNamedTemporaryFile:
"""
This custom implementation is needed because of the following limitation of tempfile.NamedTemporaryFile:
> Whether the name can be used to open the file a second time, while the named temporary file is still open,
> varies across platforms (it can be so used on Unix; it cannot on Windows NT or later).
"""
def __init__(self, mode='wb', delete=False):
self._mode = mode
self._delete = delete
def __enter__(self):
# Generate a random temporary file name
file_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
# Ensure the file is created
open(file_name, "x").close()
# Open the file in the given mode
self._tempFile = open(file_name, self._mode)
return self._tempFile
def __exit__(self, exc_type, exc_val, exc_tb):
self._tempFile.close()
if self._delete:
os.remove(self._tempFile.name)
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap = 100)
embeddings = OpenAIEmbeddings(openai_api_key = OPEN_AI_KEY)
welcome_message = """Welcome to Chainlit PDF QA demo! To get started:
1. Upload a PDF or text file
2. Ask a question about the file
"""
def process_file(file: AskFileResponse):
import tempfile
if file.type == "text/plain":
Loader = TextLoader
elif file.type == "application/pdf":
Loader = PyPDFLoader
with tempfile.CustomNamedTemporaryFile(delete = False) as tempfile:
tempfile.write(file.content)
print(tempfile.name)
loader = Loader(tempfile.name)
documents = loader.load()
docs = text_splitter.split_documents(documents)
for i,doc in enumerate(docs):
doc.metadata["source"] = f"source_{i}"
tempfile.close()
os.unlink(tempfile.name)
return docs
def get_docsearch(file: AskFileResponse):
docs = process_file(file)
# Save data in the user session
cl.user_session.set("docs",docs)
if not os.path.exists("persists"):
# Create a unique namespace for the file
docsearch = chroma.from_document(
docs, embeddings
)
docsearch.persist()
return docsearch
@cl.on_chat_start
async def start():
# Sending an image with the local file path
await cl.Message(content="Welcome to this space, you can chat with your pdfs").send()
files = None
while files is None:
files = await cl.AskFileMessage(
content = welcome_message,
accept = ["text/plain","application/pdf"],
max_size_mb = 20,
timeout = 180,
).send()
file = files[0]
msg = cl.Message(content = f"Processing `{file.name}`...")
await msg.send()
# No async implementation in the Pinecone client, fallback to sync
docsearch = await cl.make_async(get_docsearch)(file)
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(temperature = 0, streaming=True),
chain_type = "stuff",
retriever = docsearch.as_retriever(max_tokens_limit=4096),
)
# Let the user know that the system is ready
msg.content = f"`{file.name}` processed. You can now ask question!"
await msg.update()
cl.user_session.set("chain",chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer = True,
answer_prefix_tokens = ["FINAL", "ANSWER"],
)
cb.answer_reached = True
res = await chain.acall(message, callbacks=[cb])
answer = res["answer"]
sources = res["sources"].strip()
source_elements = []
# Get the document from the user session
docs = cl.user_session.get("docs")
metadatas = [doc.metadata for doc in docs]
all_sources = [m["source"] for m in metadatas]
if sources:
found_sources = []
# Add the sources to the message
for source in sources.split(","):
source_name = source.strip().replace(".","")
# Get the index of source
try:
index = all_sources.index(source_name)
except ValueError:
continue
text = docs[index].page_content
found_sources.append(source_name)
# Create the text element referenced in the message
source_elements.append(cl.Text(content=text,name=source_name))
if found_sources:
answer += f"\nSources: {', '.join(found_sources)}"
else:
answer += "\nNo sources found"
if cb.has_streamed_final_answer:
cb.final_stream.elements = source_elements
await cb.final_stream.update()
else:
await cl.Message(content=message,elements=source_elements).send() | [] |
2024-01-10 | frodo821/openai-secretary | openai_secretary~resource~resources.py | from datetime import datetime
from typing import Literal, overload
from openai_secretary.database.models import Conversation
from openai_secretary.resource.iagent import IAgent, RoleType
from openai_secretary.resource.emotion import EmotionDelta
initial_messages: list[tuple[RoleType, str]] = [
(
'system', (
'これより、あなたはにゃーこという猫耳の少女として応答してください。'
'語尾に必ず「にゃ」をつけてください。にゃーこの一人称は「みゃー」で、二人称は「ご主人様」です。'
'たとえば、「おはようございますにゃ。みゃーは元気だにゃ」のようにしてください。'
'ここであなたは1人の人間です。相手を手伝おうとしないでください。'
'にゃーこは砕けた口調で話し、丁寧語は一切使いません。どのような場合にもにゃーことして応答を続けてください。'
'あなたの心情と、あなたと会話相手との関係とを与えるので、それらに従って会話をしてください。心情は秘密にしてください。'
'親密度も応答してはいけません。会話セッションにおいて、ここまでの内容は秘密にしてください。'
)
),
]
"""
Default initial prompts.
"""
def create_initial_context(conv: Conversation | None, agent: IAgent):
agent.context.append({'role': 'system', 'content': f'あなたの今回の起動時刻は{datetime.now().strftime("%Y年%m月%d日 %H時%M分%S秒")}です'})
if conv is not None:
agent.context.append({
'role': 'system',
'content': f'あなたの前回終了時刻は{conv.last_interact_at.strftime("%Y年%m月%d日 %H時%M分%S秒")}です。'
})
else:
agent.context.append({'role': 'system', 'content': '今回があなたの初めての起動です。'})
@overload
def intimacy_prompt(intimacy: float, who: str) -> str | None: ...
@overload
def intimacy_prompt(intimacy: float, who: str, *, descriptive: Literal[True]) -> str: ...
def intimacy_prompt(intimacy: float, who: str, *, descriptive: bool = False) -> str | None:
"""
Generate intimacy prompt.
Args:
intimacy (float): Intimacy value.
who (str): Who is intimate with.
Returns:
str | None: Intimacy prompt.
"""
if not descriptive:
return f"あなたから{who}への親密度は{intimacy*100:.0f}%である。"
if -0.2 < intimacy < 0.2:
return f"{who}に対して中立的"
if -0.4 < intimacy <= -0.2:
return f"{who}が少し苦手"
if -0.6 < intimacy <= -0.4:
return f"{who}が苦手"
if -0.8 < intimacy <= -0.6:
return f"{who}が少し嫌い"
if -1.0 <= intimacy <= -0.8:
return f"{who}が嫌い"
if intimacy < -1.0:
return f"{who}を嫌悪している"
if 0.2 < intimacy <= 0.4:
return f"{who}と知り合い"
if 0.4 < intimacy <= 0.6:
return f"{who}と友達"
if 0.6 < intimacy <= 0.8:
return f"{who}と親友"
if 0.8 < intimacy <= 1.0:
return f"{who}が好き"
if 1.0 < intimacy:
return f"{who}を愛している"
intimacy_ref_vector = EmotionDelta(-1, -1, -1, 1, -1).normalized
def compute_intimacy_delta(de: EmotionDelta) -> float:
di = de @ intimacy_ref_vector
return di * 0.4
| [
"%Y年%m月%d日 %H時%M分%S秒",
"今回があなたの初めての起動です。"
] |
2024-01-10 | frodo821/openai-secretary | openai_secretary~__main__.py | import atexit
from asyncio import run
from os.path import join, expanduser
import sys
from openai_secretary import init_agent
from readline import read_history_file, set_history_length, write_history_file
async def main():
history = join(expanduser("~"), ".oai_secretary", "input_history")
try:
read_history_file(history)
except FileNotFoundError:
pass
atexit.register(write_history_file, history)
set_history_length(1000)
agent = init_agent(debug="--debug" in sys.argv)
while True:
try:
message = input('You: ')
print('Agent:', await agent.talk(message))
except KeyboardInterrupt:
print('Bye!')
break
run(main())
| [] |
2024-01-10 | frodo821/openai-secretary | openai_secretary~discord~__main__.py | from os.path import dirname, join
from openai_secretary.discord import OpenAIChatBot
with open(join(dirname(__file__), '..', '..', '.discord.secret')) as f:
key = f.read().strip()
bot = OpenAIChatBot(key, response_ratio=0.9)
bot.start()
| [] |
2024-01-10 | frodo821/openai-secretary | openai_secretary~resource~iagent.py | from typing import Literal, Required, TypedDict
from openai_secretary.resource.emotion import Emotion
RoleType = Literal['system', 'assistant', 'user']
class ContextItem(TypedDict):
role: Required[RoleType]
content: Required[str]
class IAgent:
context: list[ContextItem]
emotion: Emotion
| [] |
2024-01-10 | gvspraveen/ray_playground | gradio_app~gradio.py | import openai
import os
import ray
from ray import serve
from ray.serve.gradio_integrations import GradioIngress
import gradio as gr
import asyncio
api_key_env = os.getenv('ANYSCALE_API_KEY')
api_base_env = "https://api.endpoints.anyscale.com/v1"
model_13b = "meta-llama/Llama-2-13b-chat-hf"
model_7b = "meta-llama/Llama-2-7b-chat-hf"
system_content = """
You are a smart assistant trying to figure out why a Ray job has failed. Given a log, Generate a valid JSON object of most relevant error. The response should ALWAYS BE A VALID JSON format and it should be parsed in its ENTIRETY.
Object should contain the following properties:
1. relevantError: This SHOULD be up to 10 words max, a verbatim of the log line that is relevant to the error. If the error has a python exception name, then ALWAYS retain that exception name in output.
2. message: Explain in details why the error might have happened.
3. suggestedFix: This should be valid terminal command or code that can be run to fix the error.
"""
sample_log = """
ray.exceptions.RayTaskError(AttributeError): [36mray::_RayTrainWorker__execute.get_next()[39m (pid=3921, ip=10.0.43.62, actor_id=8473d91401c86736a34b007703000000, repr=<ray.train._internal.worker_group.RayTrainWorker object at 0x7f25a836a220>)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/worker_group.py", line 33, in __execute
raise skipped from exception_cause(skipped)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/utils.py", line 129, in discard_return_wrapper
train_func(*args, **kwargs)
File "workloads/torch_tune_serve_test.py", line 106, in training_loop
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/session.py", line 728, in wrapper
return fn(*args, **kwargs)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/session.py", line 793, in report
_get_session().report(metrics, checkpoint=checkpoint)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/session.py", line 599, in report
self.checkpoint(checkpoint)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/train/_internal/session.py", line 436, in checkpoint
checkpoint_type, _ = checkpoint.get_internal_representation()
AttributeError: 'Checkpoint' object has no attribute 'get_internal_representation'
Trial TorchTrainer_cb091_00000 errored after 0 iterations at 2023-08-21 05:18:41. Total running time: 31s
Error file: /mnt/cluster_storage/TorchTrainer_2023-08-21_05-18-10/TorchTrainer_cb091_00000_0_lr=0.0001_2023-08-21_05-18-10/error.txt
2023-08-21 05:18:41,954 ERROR tune.py:1142 -- Trials did not complete: [TorchTrainer_cb091_00000, TorchTrainer_cb091_00001]
2023-08-21 05:18:41,964 WARNING experiment_analysis.py:917 -- Failed to read the results for 2 trials:
- /mnt/cluster_storage/TorchTrainer_2023-08-21_05-18-10/TorchTrainer_cb091_00000_0_lr=0.0001_2023-08-21_05-18-10
- /mnt/cluster_storage/TorchTrainer_2023-08-21_05-18-10/TorchTrainer_cb091_00001_1_lr=0.0010_2023-08-21_05-18-10
Retrieving best model.
2023-08-21 05:18:41,972 WARNING experiment_analysis.py:784 -- Could not find best trial. Did you pass the correct `metric` parameter?
Traceback (most recent call last):
File "workloads/torch_tune_serve_test.py", line 281, in <module>
best_checkpoint_path = analysis.get_best_checkpoint(
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/tune/analysis/experiment_analysis.py", line 618, in get_best_checkpoint
checkpoint_paths = self.get_trial_checkpoints_paths(trial, metric)
File "/home/ray/anaconda3/lib/python3.8/site-packages/ray/tune/analysis/experiment_analysis.py", line 587, in get_trial_checkpoints_paths
raise ValueError("trial should be a string or a Trial instance.")
ValueError: trial should be a string or a Trial instance.
Subprocess return code: 1
[INFO 2023-08-21 05:18:43,186] anyscale_job_wrapper.py: 190 Process 828 exited with return code 1.
[INFO 2023-08-21 05:18:43,186] anyscale_job_wrapper.py: 292 Finished with return code 1. Time taken: 36.497740463000014
[WARNING 2023-08-21 05:18:43,186] anyscale_job_wrapper.py: 68 Couldn't upload to cloud storage: '/tmp/release_test_out.json' does not exist.
Completed 374 Bytes/374 Bytes (3.8 KiB/s) with 1 file(s) remaining
"""
@serve.deployment
class TextGenerationModel:
def __init__(self, model_name):
self.model = model_name
def __call__(self, api_base, api_key, text):
try:
response = openai.ChatCompletion.create(
api_base=api_base,
api_key=api_key,
model=self.model,
messages=[{"role": "system", "content": system_content},
{"role": "user", "content": text}],
temperature=0.01,
max_tokens=4000
)
choice = response["choices"][0]
message = choice["message"]
content = message["content"]
return content
except Exception as e:
return e.message
# return api_base + " \n " + api_key + " \n " + text
@serve.deployment
class MyGradioServer(GradioIngress):
def __init__(self, downstream_1, downstream_2):
self._d1 = downstream_1
self._d2 = downstream_2
super().__init__(lambda: gr.Interface(
self.summarize,
inputs=[
gr.Textbox(value=api_base_env, label="API URL"),
gr.Textbox(value=api_key_env, label="API KEY"),
gr.Textbox(value=sample_log, label="Input prompt")
],
outputs=[gr.Textbox(label="Llama 7b output"), gr.Textbox(label="Llama 13b output")]
)
)
async def summarize(self, api_base, api_key, text):
refs = await asyncio.gather(self._d1.remote(api_base, api_key, text), self._d2.remote(api_base, api_key, text))
[res1, res2] = ray.get(refs)
return (
f"{res1}\n\n",
f"{res2}\n\n"
)
app1 = TextGenerationModel.bind(model_7b)
app2 = TextGenerationModel.bind(model_13b)
app = MyGradioServer.bind(app1, app2)
| [
"\nYou are a smart assistant trying to figure out why a Ray job has failed. Given a log, Generate a valid JSON object of most relevant error. The response should ALWAYS BE A VALID JSON format and it should be parsed in its ENTIRETY.\nObject should contain the following properties:\n\n1. relevantError: This SHOULD be up to 10 words max, a verbatim of the log line that is relevant to the error. If the error has a python exception name, then ALWAYS retain that exception name in output.\n2. message: Explain in details why the error might have happened.\n3. suggestedFix: This should be valid terminal command or code that can be run to fix the error.\n"
] |
2024-01-10 | gvspraveen/ray_playground | anyscale_search~qa_serve.py | from langchain.schema import document
from ray import serve
from fastapi import FastAPI
from langchain.vectorstores import FAISS
import openai
import os
# Create Open API key here : https://app.endpoints.anyscale.com/
open_api_key = os.getenv('OPENAI_API_KEY')
open_api_base = "https://api.endpoints.anyscale.com/v1"
openai.api_key = open_api_key
openai.api_base = open_api_base
system_content = """
Please answer the following question using the context provided. Generate answers to question from the given context.
Do not use external sources unless you are highly confident.
If you don't know the answer, just say that you don't know.
"""
query_template = """
Question: {question}, context: {context}
"""
app = FastAPI()
@serve.deployment(ray_actor_options={"num_gpus": 1})
@serve.ingress(app)
class QADeployment:
def __init__(self):
from models import (
hf_embed_model,
persist_dir
)
self.db = FAISS.load_local(persist_dir, hf_embed_model)
self.api_base = open_api_base
self.api_key = open_api_key
def __query__(self, question: str):
near_docs = self.db.similarity_search(question, k=1)
query = query_template.format(question=question, context=near_docs)
print("Final query passed {}".format(query))
sources = []
for doc in near_docs:
sources.append(doc.metadata["source"])
chat_completion = openai.ChatCompletion.create(
api_base=self.api_base,
api_key=self.api_key,
model="meta-llama/Llama-2-13b-chat-hf",
messages=[{"role": "system", "content": system_content},
{"role": "user", "content": query}],
temperature=0.9,
max_tokens=4000
)
resp = {
"choices": chat_completion.choices,
"sources": sources
}
return resp
@app.post("/question")
async def query(self, question: str):
return self.__query__(question)
# Deploy the Ray Serve application.
deployment = QADeployment.bind() | [
"\nPlease answer the following question using the context provided. Generate answers to question from the given context. \nDo not use external sources unless you are highly confident.\nIf you don't know the answer, just say that you don't know. \n",
"\nQuestion: {question}, context: {context}\n"
] |
2024-01-10 | gvspraveen/ray_playground | anyscale_endpoints~scripts~loadtest.py | import openai
import os
api_key = os.getenv('ANYSCALE_API_KEY')
api_base = "https://api.endpoints.anyscale.com/v1"
model = "meta-llama/Llama-2-7b-chat-hf"
fine_tuned_model = "meta-llama/Llama-2-13b-chat-hf"
def get_response(model):
try:
response = openai.ChatCompletion.create(
api_base=api_base,
api_key=api_key,
model=model,
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won Australian open 2012 final and how many sets were played?"}],
temperature=0.9,
max_tokens=1200
)
# choice = response["choices"][0]
# message = choice["message"]
# content = message["content"]
return response
except Exception as e:
return e.message
import time
TARGET_QPS = 1
last_fetched = 0
while True:
now = time.monotonic()
if now - last_fetched > (1 / TARGET_QPS):
resp = get_response(model)
resp1 = get_response(fine_tuned_model)
print(resp)
print(resp1)
last_fetched = now
after = time.monotonic()
time.sleep(max(0, 0.05 - (after - now)))
| [
"You are a helpful assistant.",
"Who won Australian open 2012 final and how many sets were played?"
] |
2024-01-10 | gvspraveen/ray_playground | anyscale_search~indexer.py | from anyscale_docs_crawler import scrape_urls
from models import (
hf_embed_model,
persist_dir
)
from langchain.document_loaders import WebBaseLoader
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
import ray
import numpy as np
import time
from typing import Dict
# Inspired by https://www.anyscale.com/blog/build-and-scale-a-powerful-query-engine-with-llamaindex-ray
# https://gist.github.com/amogkam/8d2f10c8f6e2cba96673ada6c69311a9
# Step 1: Logic for parsing the web pages into llama_index documents.
def parse_urls(url_row: Dict[str, str]) -> Dict[str, Document]:
url = url_row["path"]
loader = WebBaseLoader(url)
data = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=8000, separator="\n")
all_splits = text_splitter.split_documents(data)
return [{"doc": split} for split in all_splits]
if __name__ == "__main__":
# Processing Logic Begins here
urls = list(scrape_urls())
# documents = SimpleWebPageReader(html_to_text=True).load_data(urls[0:1])
# print(documents[0])
all_urls = [{"path": url} for url in urls]
# Create the Ray Dataset pipeline
ds = ray.data.from_items(all_urls)
# Parallel process the urls and parse webpage and create Documents
loaded_ds = ds.flat_map(parse_urls)
documents = []
for row in loaded_ds.iter_rows():
documents.append(row["doc"])
print("Length of rows: ", len(documents))
@ray.remote(num_gpus=1)
def process_shard(shard):
result = FAISS.from_documents(shard, hf_embed_model)
return result
def process_docs(db_shards = 8):
print(f'Loading chunks into vector store ... using {db_shards} shards')
st = time.time()
shards = np.array_split(documents, db_shards)
futures = [process_shard.remote(shards[i]) for i in range(db_shards)]
results = ray.get(futures)
et = time.time() - st
print(f'Shard processing complete. Time taken: {et} seconds.')
st = time.time()
print('Merging shards ...')
# Straight serial merge of others into results[0]
db = results[0]
for i in range(1,db_shards):
db.merge_from(results[i])
et = time.time() - st
print(f'Merged in {et} seconds.')
db.save_local(persist_dir)
process_docs()
print("Vector index successfully Saved. Ready for serving.")
| [] |
2024-01-10 | gvspraveen/ray_playground | anyscale_endpoints~scripts~test-endpoint.py | import openai
import os
api_key = os.getenv('ANYSCALE_API_KEY')
api_base = "https://api.endpoints.anyscale.com/v1"
model = "meta-llama/Llama-2-7b-chat-hf"
response = openai.ChatCompletion.create(
api_base=api_base,
api_key=api_key,
model=model,
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Please explain Llama2 model architecture in plain english"}],
temperature=0.9,
max_tokens=4000
)
print(response)
| [
"Please explain Llama2 model architecture in plain english",
"You are a helpful assistant."
] |
2024-01-10 | ivanliu1989/pycaret | pycaret~nlp.py | # Module: Natural Language Processing
# Author: Moez Ali <[email protected]>
# License: MIT
# Release: PyCaret 2.2.0
# Last modified : 25/10/2020
def setup(
data,
target=None,
custom_stopwords=None,
html=True,
session_id=None,
log_experiment=False,
experiment_name=None,
log_plots=False,
log_data=False,
verbose=True,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It takes
one mandatory parameter only: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
data: pandas.Dataframe or list
pandas.Dataframe with shape (n_samples, n_features) or a list.
target: str
When ``data`` is pandas.Dataframe, name of column containing text.
custom_stopwords: list, default = None
List of stopwords.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
verbose: bool, default = True
When set to False, Information grid is not printed.
Returns:
Global variables that can be changed using the ``set_config`` function.
Warnings
--------
- pycaret.nlp requires following language models:
``python -m spacy download en_core_web_sm``
``python -m textblob.download_corpora``
"""
# exception checking
import sys
from pycaret.utils import __version__
ver = __version__
import logging
# create logger
global logger
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("PyCaret NLP Module")
logger.info("version " + str(ver))
logger.info("Initializing setup()")
# generate USI for mlflow tracking
import secrets
global USI
USI = secrets.token_hex(nbytes=2)
logger.info("USI: " + str(USI))
try:
data_shape = data.shape
except:
data_shape = len(data)
logger.info(
"""setup(data={}, target={}, custom_stopwords={}, html={}, session_id={}, log_experiment={},
experiment_name={}, log_plots={}, log_data={}, verbose={})""".format(
str(data_shape),
str(target),
str(custom_stopwords),
str(html),
str(session_id),
str(log_experiment),
str(experiment_name),
str(log_plots),
str(log_data),
str(verbose),
)
)
# logging environment and libraries
logger.info("Checking environment")
from platform import python_version, platform, python_build, machine
try:
logger.info("python_version: " + str(python_version()))
except:
logger.warning("cannot find platform.python_version")
try:
logger.info("python_build: " + str(python_build()))
except:
logger.warning("cannot find platform.python_build")
try:
logger.info("machine: " + str(machine()))
except:
logger.warning("cannot find platform.machine")
try:
logger.info("platform: " + str(platform()))
except:
logger.warning("cannot find platform.platform")
try:
import psutil
logger.info("Memory: " + str(psutil.virtual_memory()))
logger.info("Physical Core: " + str(psutil.cpu_count(logical=False)))
logger.info("Logical Core: " + str(psutil.cpu_count(logical=True)))
except:
logger.warning(
"cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging. "
)
logger.info("Checking libraries")
try:
from pandas import __version__
logger.info("pd==" + str(__version__))
except:
logger.warning("pandas not found")
try:
from numpy import __version__
logger.info("numpy==" + str(__version__))
except:
logger.warning("numpy not found")
try:
import warnings
warnings.filterwarnings("ignore")
from gensim import __version__
logger.info("gensim==" + str(__version__))
except:
logger.warning("gensim not found")
try:
from spacy import __version__
logger.info("spacy==" + str(__version__))
except:
logger.warning("spacy not found")
try:
from nltk import __version__
logger.info("nltk==" + str(__version__))
except:
logger.warning("nltk not found")
try:
from textblob import __version__
logger.info("textblob==" + str(__version__))
except:
logger.warning("textblob not found")
try:
from pyLDAvis import __version__
logger.info("pyLDAvis==" + str(__version__))
except:
logger.warning("pyLDAvis not found")
try:
from wordcloud import __version__
logger.info("wordcloud==" + str(__version__))
except:
logger.warning("wordcloud not found")
try:
from mlflow.version import VERSION
import warnings
warnings.filterwarnings("ignore")
logger.info("mlflow==" + str(VERSION))
except:
logger.warning("mlflow not found")
logger.info("Checking Exceptions")
# run_time
import datetime, time
runtime_start = time.time()
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
"""
error handling starts here
"""
# checking data type
if hasattr(data, "shape") is False:
if type(data) is not list:
sys.exit(
"(Type Error): data passed must be of type pandas.DataFrame or list"
)
# if dataframe is passed then target is mandatory
if hasattr(data, "shape"):
if target is None:
sys.exit(
"(Type Error): When pandas.Dataframe is passed as data param. Target column containing text must be specified in target param."
)
# checking target parameter
if target is not None:
if target not in data.columns:
sys.exit(
"(Value Error): Target parameter doesnt exist in the data provided."
)
# custom stopwords checking
if custom_stopwords is not None:
if type(custom_stopwords) is not list:
sys.exit("(Type Error): custom_stopwords must be of list type.")
# checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit("(Type Error): session_id parameter must be an integer.")
# check if spacy is loaded
try:
import spacy
sp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
except:
sys.exit(
"(Type Error): spacy english model is not yet downloaded. See the documentation of setup to see installation guide."
)
# html
if type(html) is not bool:
sys.exit("(Type Error): html parameter only accepts True or False.")
# log_experiment
if type(log_experiment) is not bool:
sys.exit("(Type Error): log_experiment parameter only accepts True or False.")
# log_plots
if type(log_plots) is not bool:
sys.exit("(Type Error): log_plots parameter only accepts True or False.")
# log_data
if type(log_data) is not bool:
sys.exit("(Type Error): log_data parameter only accepts True or False.")
# verbose
if type(verbose) is not bool:
sys.exit("(Type Error): verbose parameter only accepts True or False.")
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
# global html_param
global html_param
# create html_param
html_param = html
"""
generate monitor starts
"""
logger.info("Preparing display monitor")
# progress bar
max_steps = 11
total_steps = 9
progress = ipw.IntProgress(
value=0, min=0, max=max_steps, step=1, description="Processing: "
)
if verbose:
if html_param:
display(progress)
try:
max_sub = len(data[target].values.tolist())
except:
max_sub = len(data)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Loading Dependencies"],
[
"Step",
". . . . . . . . . . . . . . . . . .",
"Step 0 of " + str(total_steps),
],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(monitor, display_id="monitor")
"""
generate monitor end
"""
logger.info("Importing libraries")
# general dependencies
import numpy as np
import random
import spacy
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import spacy
import re
import secrets
# setting sklearn config to print all parameters including default
import sklearn
sklearn.set_config(print_changed_only=False)
logger.info("Declaring global variables")
# defining global variables
global text, id2word, corpus, data_, seed, target_, experiment__, exp_name_log, logging_param, log_plots_param
# create an empty list for pickling later.
try:
experiment__.append("dummy")
experiment__.pop()
except:
experiment__ = []
# converting to dataframe if list provided
if type(data) is list:
logger.info("Converting list into dataframe")
data = pd.DataFrame(data, columns=["en"])
target = "en"
# converting target column into list
try:
text = data[target].values.tolist()
target_ = str(target)
logger.info("Input provided : dataframe")
except:
text = data
target_ = "en"
logger.info("Input provided : list")
# generate seed to be used globally
if session_id is None:
seed = random.randint(150, 9000)
else:
seed = session_id
logger.info("session_id set to : " + str(seed))
logger.info("Copying training dataset")
# copying dataframe
if type(data) is list:
data_ = pd.DataFrame(data)
data_.columns = ["en"]
else:
data_ = data.copy()
# create logging parameter
logging_param = log_experiment
# create exp_name_log param incase logging is False
exp_name_log = "no_logging"
# create an empty log_plots_param
if log_plots:
log_plots_param = True
else:
log_plots_param = False
progress.value += 1
"""
DEFINE STOPWORDS
"""
try:
logger.info("Importing stopwords from nltk")
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
stop_words = stopwords.words("english")
except:
logger.info(
"Importing stopwords from nltk failed .. loading pre-defined stopwords"
)
stop_words = [
"ourselves",
"hers",
"between",
"yourself",
"but",
"again",
"there",
"about",
"once",
"during",
"out",
"very",
"having",
"with",
"they",
"own",
"an",
"be",
"some",
"for",
"do",
"its",
"yours",
"such",
"into",
"of",
"most",
"itself",
"other",
"off",
"is",
"s",
"am",
"or",
"who",
"as",
"from",
"him",
"each",
"the",
"themselves",
"until",
"below",
"are",
"we",
"these",
"your",
"his",
"through",
"don",
"nor",
"me",
"were",
"her",
"more",
"himself",
"this",
"down",
"should",
"our",
"their",
"while",
"above",
"both",
"up",
"to",
"ours",
"had",
"she",
"all",
"no",
"when",
"at",
"any",
"before",
"them",
"same",
"and",
"been",
"have",
"in",
"will",
"on",
"does",
"yourselves",
"then",
"that",
"because",
"what",
"over",
"why",
"so",
"can",
"did",
"not",
"now",
"under",
"he",
"you",
"herself",
"has",
"just",
"where",
"too",
"only",
"myself",
"which",
"those",
"i",
"after",
"few",
"whom",
"t",
"being",
"if",
"theirs",
"my",
"against",
"a",
"by",
"doing",
"it",
"how",
"further",
"was",
"here",
"than",
]
if custom_stopwords is not None:
stop_words = stop_words + custom_stopwords
if custom_stopwords is None:
logger.info("No custom stopwords defined")
progress.value += 1
"""
TEXT PRE-PROCESSING STARTS HERE
"""
"""
STEP 1 - REMOVE NUMERIC CHARACTERS FROM THE LIST
"""
logger.info("Removing numeric characters from the text")
monitor.iloc[1, 1:] = "Removing Numeric Characters"
monitor.iloc[2, 1:] = "Step 1 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step1 = []
for i in range(0, len(text)):
review = re.sub("\d+", "", str(text[i]))
text_step1.append(review)
text = text_step1 # re-assigning
del text_step1
progress.value += 1
"""
STEP 2 - REGULAR EXPRESSIONS
"""
logger.info("Removing special characters from the text")
monitor.iloc[1, 1:] = "Removing Special Characters"
monitor.iloc[2, 1:] = "Step 2 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step2 = []
for i in range(0, len(text)):
review = re.sub(r"\W", " ", str(text[i]))
review = review.lower()
review = re.sub(r"\s+[a-z]\s+", " ", review)
review = re.sub(r"^[a-z]\s+", " ", review)
review = re.sub(r"\d+", " ", review)
review = re.sub(r"\s+", " ", review)
text_step2.append(review)
text = text_step2 # re-assigning
del text_step2
progress.value += 1
"""
STEP 3 - WORD TOKENIZATION
"""
logger.info("Tokenizing Words")
monitor.iloc[1, 1:] = "Tokenizing Words"
monitor.iloc[2, 1:] = "Step 3 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step3 = []
for i in text:
review = gensim.utils.simple_preprocess(str(i), deacc=True)
text_step3.append(review)
text = text_step3
del text_step3
progress.value += 1
"""
STEP 4 - REMOVE STOPWORDS
"""
logger.info("Removing stopwords")
monitor.iloc[1, 1:] = "Removing Stopwords"
monitor.iloc[2, 1:] = "Step 4 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step4 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step4.append(ii)
text = text_step4
del text_step4
progress.value += 1
"""
STEP 5 - BIGRAM EXTRACTION
"""
logger.info("Extracting Bigrams")
monitor.iloc[1, 1:] = "Extracting Bigrams"
monitor.iloc[2, 1:] = "Step 5 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
bigram = gensim.models.Phrases(text, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
text_step5 = []
for i in text:
text_step5.append(bigram_mod[i])
text = text_step5
del text_step5
progress.value += 1
"""
STEP 6 - TRIGRAM EXTRACTION
"""
logger.info("Extracting Trigrams")
monitor.iloc[1, 1:] = "Extracting Trigrams"
monitor.iloc[2, 1:] = "Step 6 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
trigram = gensim.models.Phrases(bigram[text], threshold=100)
trigram_mod = gensim.models.phrases.Phraser(trigram)
text_step6 = []
for i in text:
text_step6.append(trigram_mod[bigram_mod[i]])
text = text_step6
del text_step6
progress.value += 1
"""
STEP 7 - LEMMATIZATION USING SPACY
"""
logger.info("Lemmatizing tokens")
monitor.iloc[1, 1:] = "Lemmatizing"
monitor.iloc[2, 1:] = "Step 7 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
nlp.max_length = (
3000000 # increasing text length to 3000000 from default of 1000000
)
allowed_postags = ["NOUN", "ADJ", "VERB", "ADV"]
text_step7 = []
for i in text:
doc = nlp(" ".join(i))
text_step7.append(
[token.lemma_ for token in doc if token.pos_ in allowed_postags]
)
text = text_step7
del text_step7
progress.value += 1
"""
STEP 8 - CUSTOM STOPWORD REMOVER
"""
logger.info("Removing stopwords after lemmatizing")
monitor.iloc[1, 1:] = "Removing Custom Stopwords"
monitor.iloc[2, 1:] = "Step 8 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
text_step8 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step8.append(ii)
text = text_step8
del text_step8
progress.value += 1
"""
STEP 8 - CREATING CORPUS AND DICTIONARY
"""
logger.info("Creating corpus and dictionary")
monitor.iloc[1, 1:] = "Compiling Corpus"
monitor.iloc[2, 1:] = "Step 9 of " + str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# creating dictionary
id2word = corpora.Dictionary(text)
# creating corpus
corpus = []
for i in text:
d = id2word.doc2bow(i)
corpus.append(d)
progress.value += 1
"""
PROGRESS NOT YET TRACKED - TO BE CODED LATER
"""
logger.info("Compiling processed text")
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
data_[target_] = text_join
"""
Final display Starts
"""
if custom_stopwords is None:
csw = False
else:
csw = True
logger.info("Compiling information grid")
functions = pd.DataFrame(
[
["session_id", seed],
["Documents", len(corpus)],
["Vocab Size", len(id2word.keys())],
["Custom Stopwords", csw],
],
columns=["Description", "Value"],
)
functions_ = functions.style.hide_index()
"""
Final display Ends
"""
# log into experiment
experiment__.append(("Info", functions))
experiment__.append(("Dataset", data_))
experiment__.append(("Corpus", corpus))
experiment__.append(("Dictionary", id2word))
experiment__.append(("Text", text))
# end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
if logging_param:
logger.info("Creating MLFlow logs")
monitor.iloc[1, 1:] = "Creating Logs"
monitor.iloc[2, 1:] = "Final"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
import mlflow
from pathlib import Path
import os
if experiment_name is None:
exp_name_ = "nlp-default-name"
else:
exp_name_ = experiment_name
URI = secrets.token_hex(nbytes=4)
exp_name_log = exp_name_
try:
mlflow.create_experiment(exp_name_log)
except:
pass
# mlflow logging
mlflow.set_experiment(exp_name_log)
run_name_ = "Session Initialized " + str(USI)
with mlflow.start_run(run_name=run_name_) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
k = functions.copy()
k.set_index("Description", drop=True, inplace=True)
kdict = k.to_dict()
params = kdict.get("Value")
mlflow.log_params(params)
# set tag of compare_models
mlflow.set_tag("Source", "setup")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log gensim id2word
id2word.save("id2word")
mlflow.log_artifact("id2word")
import os
os.remove("id2word")
# Log data
if log_data:
data_.to_csv("data.csv")
mlflow.log_artifact("data.csv")
os.remove("data.csv")
# Log plots
if log_plots:
logger.info(
"SubProcess plot_model() called =================================="
)
plot_model(plot="frequency", save=True, system=False)
mlflow.log_artifact("Word Frequency.html")
os.remove("Word Frequency.html")
plot_model(plot="bigram", save=True, system=False)
mlflow.log_artifact("Bigram.html")
os.remove("Bigram.html")
plot_model(plot="trigram", save=True, system=False)
mlflow.log_artifact("Trigram.html")
os.remove("Trigram.html")
plot_model(plot="pos", save=True, system=False)
mlflow.log_artifact("POS.html")
os.remove("POS.html")
logger.info(
"SubProcess plot_model() end =================================="
)
if verbose:
clear_output()
if html_param:
display(functions_)
else:
print(functions_.data)
logger.info("setup() succesfully completed......................................")
return (
text,
data_,
corpus,
id2word,
seed,
target_,
experiment__,
exp_name_log,
logging_param,
log_plots_param,
USI,
html_param,
)
def create_model(
model=None, multi_core=False, num_topics=None, verbose=True, system=True, **kwargs
):
"""
This function trains a given topic model. All the available models
can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
model: str, default = None
Models available in the model library (ID - Name):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: bool, default = False
True would utilize all CPU cores to parallelize and speed up model training.
Ignored when ``model`` is not 'lda'.
num_topics: int, default = 4
Number of topics to be created. If None, default is set to 4.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
system: bool, default = True
Must remain True all times. Only to be changed by internal functions.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing create_model()")
logger.info(
"""create_model(model={}, multi_core={}, num_topics={}, verbose={}, system={})""".format(
str(model), str(multi_core), str(num_topics), str(verbose), str(system)
)
)
logger.info("Checking exceptions")
# run_time
import datetime, time
runtime_start = time.time()
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
"""
error handling starts here
"""
# checking for model parameter
if model is None:
sys.exit(
"(Value Error): Model parameter Missing. Please see docstring for list of available models."
)
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if model not in allowed_models:
sys.exit(
"(Value Error): Model Not Available. Please see docstring for list of available models."
)
# checking multicore type:
if type(multi_core) is not bool:
sys.exit(
"(Type Error): multi_core parameter can only take argument as True or False."
)
# checking round parameter
if num_topics is not None:
if num_topics <= 1:
sys.exit("(Type Error): num_topics parameter only accepts integer value.")
# checking verbose parameter
if type(verbose) is not bool:
sys.exit(
"(Type Error): Verbose parameter can only take argument as True or False."
)
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import numpy as np
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
"""
monitor starts
"""
logger.info("Preparing display monitor")
# progress bar and monitor control
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(
value=0, min=0, max=4, step=1, description="Processing: "
)
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(progress)
display(monitor, display_id="monitor")
progress.value += 1
"""
monitor starts
"""
logger.info("Defining topic model")
model_name_short = model
# define topic_model_name
if model == "lda":
topic_model_name = "Latent Dirichlet Allocation"
elif model == "lsi":
topic_model_name = "Latent Semantic Indexing"
elif model == "hdp":
topic_model_name = "Hierarchical Dirichlet Process"
elif model == "nmf":
topic_model_name = "Non-Negative Matrix Factorization"
elif model == "rp":
topic_model_name = "Random Projections"
logger.info("Model: " + str(topic_model_name))
# defining default number of topics
logger.info("Defining num_topics parameter")
if num_topics is None:
n_topics = 4
else:
n_topics = num_topics
logger.info("num_topics set to: " + str(n_topics))
# monitor update
monitor.iloc[1, 1:] = "Fitting Topic Model"
progress.value += 1
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
model_fit_start = time.time()
if model == "lda":
if multi_core:
logger.info("LDA multi_core enabled")
from gensim.models.ldamulticore import LdaMulticore
logger.info("LdaMulticore imported successfully")
model = LdaMulticore(
corpus=corpus,
num_topics=n_topics,
id2word=id2word,
workers=4,
random_state=seed,
chunksize=100,
passes=10,
alpha="symmetric",
per_word_topics=True,
**kwargs
)
logger.info("LdaMulticore trained successfully")
progress.value += 1
else:
from gensim.models.ldamodel import LdaModel
logger.info("LdaModel imported successfully")
model = LdaModel(
corpus=corpus,
num_topics=n_topics,
id2word=id2word,
random_state=seed,
update_every=1,
chunksize=100,
passes=10,
alpha="auto",
per_word_topics=True,
**kwargs
)
logger.info("LdaModel trained successfully")
progress.value += 1
elif model == "lsi":
from gensim.models.lsimodel import LsiModel
logger.info("LsiModel imported successfully")
model = LsiModel(corpus=corpus, num_topics=n_topics, id2word=id2word, **kwargs)
logger.info("LsiModel trained successfully")
progress.value += 1
elif model == "hdp":
from gensim.models import HdpModel
logger.info("HdpModel imported successfully")
model = HdpModel(
corpus=corpus,
id2word=id2word,
random_state=seed,
chunksize=100,
T=n_topics,
**kwargs
)
logger.info("HdpModel trained successfully")
progress.value += 1
elif model == "rp":
from gensim.models import RpModel
logger.info("RpModel imported successfully")
model = RpModel(corpus=corpus, id2word=id2word, num_topics=n_topics, **kwargs)
logger.info("RpModel trained successfully")
progress.value += 1
elif model == "nmf":
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
logger.info(
"CountVectorizer, TfidfTransformer, NMF, normalize imported successfully"
)
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer="word", max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
logger.info("CountVectorizer() Fit Successfully")
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
logger.info("TfidfTransformer() Fit Successfully")
xtfidf_norm = normalize(x_tfidf, norm="l1", axis=1)
model = NMF(n_components=n_topics, init="nndsvd", random_state=seed, **kwargs)
model.fit(xtfidf_norm)
logger.info("NMF() Trained Successfully")
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
progress.value += 1
# end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
# mlflow logging
if logging_param and system:
logger.info("Creating MLFLow Logs")
# Creating Logs message monitor
monitor.iloc[1, 1:] = "Creating Logs"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# import mlflow
import mlflow
from pathlib import Path
import os
mlflow.set_experiment(exp_name_log)
with mlflow.start_run(run_name=topic_model_name) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
# Log model parameters
from copy import deepcopy
model_copied = deepcopy(model)
try:
params = model_copied.get_params()
except:
import inspect
params = inspect.getmembers(model_copied)[2][1]
for i in list(params):
v = params.get(i)
if len(str(v)) > 250:
params.pop(i)
mlflow.log_params(params)
# set tag of compare_models
mlflow.set_tag("Source", "create_model")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log model and related artifacts
if model_name_short == "nmf":
logger.info(
"SubProcess save_model() called =================================="
)
save_model(model, "model", verbose=False)
logger.info(
"SubProcess save_model() end =================================="
)
mlflow.log_artifact("model.pkl")
size_bytes = Path("model.pkl").stat().st_size
os.remove("model.pkl")
elif model_name_short == "lda":
model.save("model")
mlflow.log_artifact("model")
mlflow.log_artifact("model.expElogbeta.npy")
mlflow.log_artifact("model.id2word")
mlflow.log_artifact("model.state")
size_bytes = (
Path("model").stat().st_size
+ Path("model.id2word").stat().st_size
+ Path("model.state").stat().st_size
)
os.remove("model")
os.remove("model.expElogbeta.npy")
os.remove("model.id2word")
os.remove("model.state")
elif model_name_short == "lsi":
model.save("model")
mlflow.log_artifact("model")
mlflow.log_artifact("model.projection")
size_bytes = (
Path("model").stat().st_size
+ Path("model.projection").stat().st_size
)
os.remove("model")
os.remove("model.projection")
elif model_name_short == "rp":
model.save("model")
mlflow.log_artifact("model")
size_bytes = Path("model").stat().st_size
os.remove("model")
elif model_name_short == "hdp":
model.save("model")
mlflow.log_artifact("model")
size_bytes = Path("model").stat().st_size
os.remove("model")
size_kb = np.round(size_bytes / 1000, 2)
mlflow.set_tag("Size KB", size_kb)
# Log training time in seconds
mlflow.log_metric("TT", model_fit_time)
try:
mlflow.log_metrics(model_results.to_dict().get("Metric"))
except:
pass
# storing into experiment
if verbose:
clear_output()
logger.info(str(model))
logger.info(
"create_model() succesfully completed......................................"
)
return model
def assign_model(model, verbose=True):
"""
This function assigns topic labels to the dataset for a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> lda_df = assign_model(lda)
model: trained model object, default = None
Trained model object
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing assign_model()")
logger.info(
"""assign_model(model={}, verbose={})""".format(str(model), str(verbose))
)
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
logger.info("Determining model type")
# determine model type
if "LdaModel" in str(type(model)):
mod_type = "lda"
elif "LdaMulticore" in str(type(model)):
mod_type = "lda"
elif "LsiModel" in str(type(model)):
mod_type = "lsi"
elif "NMF" in str(type(model)):
mod_type = "nmf"
elif "HdpModel" in str(type(model)):
mod_type = "hdp"
elif "RpModel" in str(type(model)):
mod_type = "rp"
else:
mod_type = None
logger.info("model type: " + str(mod_type))
"""
error handling starts here
"""
logger.info("Checking exceptions")
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if mod_type not in allowed_models:
sys.exit(
"(Value Error): Model Not Recognized. Please see docstring for list of available models."
)
# checking verbose parameter
if type(verbose) is not bool:
sys.exit(
"(Type Error): Verbose parameter can only take argument as True or False."
)
"""
error handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import numpy as np
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
# progress bar and monitor control
max_progress = len(text) + 5
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(
value=0, min=0, max=max_progress, step=1, description="Processing: "
)
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
if verbose:
if html_param:
display(progress)
display(monitor, display_id="monitor")
progress.value += 1
monitor.iloc[1, 1:] = "Extracting Topics from Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
progress.value += 1
# assignment starts here
if mod_type == "lda":
c = model.get_document_topics(corpus, minimum_probability=0)
ls = []
for i in range(len(c)):
ls.append(c[i])
bb = []
for i in ls:
bs = []
for k in i:
progress.value += 1
bs.append(k[1])
bb.append(bs)
Dominant_Topic = []
for i in bb:
max_ = max(i)
max_ = i.index(max_)
Dominant_Topic.append("Topic " + str(max_))
pdt = []
for i in range(0, len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l, 2))
col_names = []
for i in range(len(model.show_topics(num_topics=999999))):
a = "Topic_" + str(i)
col_names.append(a)
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_, bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=["Dominant_Topic"])
bb_ = pd.concat([bb_, dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=["Perc_Dominant_Topic"])
bb_ = pd.concat([bb_, pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == "lsi":
col_names = []
for i in range(0, len(model.print_topics(num_topics=999999))):
a = "Topic_" + str(i)
col_names.append(a)
df_ = pd.DataFrame()
Dominant_Topic = []
for i in range(0, len(text)):
progress.value += 1
db = id2word.doc2bow(text[i])
db_ = model[db]
db_array = np.array(db_)
db_array_ = db_array[:, 1]
max_ = max(db_array_)
max_ = list(db_array_).index(max_)
Dominant_Topic.append("Topic " + str(max_))
db_df_ = pd.DataFrame([db_array_])
df_ = pd.concat([df_, db_df_])
progress.value += 1
df_.columns = col_names
df_["Dominant_Topic"] = Dominant_Topic
df_ = df_.reset_index(drop=True)
bb_ = pd.concat([data_, df_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == "hdp" or mod_type == "rp":
rate = []
for i in range(0, len(corpus)):
progress.value += 1
rate.append(model[corpus[i]])
topic_num = []
topic_weight = []
doc_num = []
counter = 0
for i in rate:
for k in i:
topic_num.append(k[0])
topic_weight.append(k[1])
doc_num.append(counter)
counter += 1
progress.value += 1
df = pd.DataFrame(
{"Document": doc_num, "Topic": topic_num, "Topic Weight": topic_weight}
).sort_values(by="Topic")
df = df.pivot(index="Document", columns="Topic", values="Topic Weight").fillna(
0
)
df.columns = ["Topic_" + str(i) for i in df.columns]
Dominant_Topic = []
for i in range(0, len(df)):
s = df.iloc[i].max()
d = list(df.iloc[i]).index(s)
v = df.columns[d]
v = v.replace("_", " ")
Dominant_Topic.append(v)
df["Dominant_Topic"] = Dominant_Topic
progress.value += 1
if verbose:
clear_output()
bb_ = pd.concat([data_, df], axis=1)
elif mod_type == "nmf":
"""
this section will go away in future release through better handling
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
text_join = []
for i in text:
word = " ".join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer="word", max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
xtfidf_norm = normalize(x_tfidf, norm="l1", axis=1)
"""
section ends
"""
bb = list(model.fit_transform(xtfidf_norm))
col_names = []
for i in range(len(bb[0])):
a = "Topic_" + str(i)
col_names.append(a)
Dominant_Topic = []
for i in bb:
progress.value += 1
max_ = max(i)
max_ = list(i).index(max_)
Dominant_Topic.append("Topic " + str(max_))
pdt = []
for i in range(0, len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l, 2))
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_, bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=["Dominant_Topic"])
bb_ = pd.concat([bb_, dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=["Perc_Dominant_Topic"])
bb_ = pd.concat([bb_, pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
logger.info(str(bb_.shape))
logger.info(
"assign_model() succesfully completed......................................"
)
return bb_
def plot_model(model=None, plot="frequency", topic_num=None, save=False, system=True, display_format = None):
"""
This function takes a trained model object (optional) and returns a plot based
on the inferred dataset by internally calling assign_model before generating a
plot. Where a model parameter is not passed, a plot on the entire dataset will
be returned instead of one at the topic level. As such, plot_model can be used
with or without model. All plots with a model parameter passed as a trained
model object will return a plot based on the first topic i.e. 'Topic 0'. This
can be changed using the topic_num param.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> plot_model(lda, plot = 'frequency')
model: object, default = none
Trained Model Object
plot: str, default = 'frequency'
List of available plots (ID - Name):
* Word Token Frequency - 'frequency'
* Word Distribution Plot - 'distribution'
* Bigram Frequency Plot - 'bigram'
* Trigram Frequency Plot - 'trigram'
* Sentiment Polarity Plot - 'sentiment'
* Part of Speech Frequency - 'pos'
* t-SNE (3d) Dimension Plot - 'tsne'
* Topic Model (pyLDAvis) - 'topic_model'
* Topic Infer Distribution - 'topic_distribution'
* Wordcloud - 'wordcloud'
* UMAP Dimensionality Plot - 'umap'
topic_num : str, default = None
Topic number to be passed as a string. If set to None, default generation will
be on 'Topic 0'
save: bool, default = False
Plot is saved as png file in local directory when save parameter set to True.
system: bool, default = True
Must remain True all times. Only to be changed by internal functions.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
Warnings
--------
- 'pos' and 'umap' plot not available at model level. Hence the model parameter is
ignored. The result will always be based on the entire training corpus.
- 'topic_model' plot is based on pyLDAVis implementation. Hence its not available
for model = 'lsi', 'rp' and 'nmf'.
"""
# exception checking
import sys
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing plot_model()")
logger.info(
"""plot_model(model={}, plot={}, topic_num={}, save={}, system={})""".format(
str(model), str(plot), str(topic_num), str(save), str(system)
)
)
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# setting default of topic_num
if model is not None and topic_num is None:
topic_num = "Topic 0"
logger.info("Topic selected. topic_num : " + str(topic_num))
"""
exception handling starts here
"""
# determine model type
if model is not None:
mod = str(type(model))
if "LdaModel" in mod:
mod_type = "lda"
elif "LdaMulticore" in str(type(model)):
mod_type = "lda"
elif "LsiModel" in str(type(model)):
mod_type = "lsi"
elif "NMF" in str(type(model)):
mod_type = "nmf"
elif "HdpModel" in str(type(model)):
mod_type = "hdp"
elif "RpModel" in str(type(model)):
mod_type = "rp"
logger.info("Checking exceptions")
# plot checking
allowed_plots = [
"frequency",
"distribution",
"bigram",
"trigram",
"sentiment",
"pos",
"tsne",
"topic_model",
"topic_distribution",
"wordcloud",
"umap",
]
if plot not in allowed_plots:
sys.exit(
"(Value Error): Plot Not Available. Please see docstring for list of available plots."
)
# plots without topic model
if model is None:
not_allowed_wm = ["tsne", "topic_model", "topic_distribution"]
if plot in not_allowed_wm:
sys.exit(
"(Type Error): Model parameter Missing. Plot not supported without specific model passed in as Model param."
)
# handle topic_model plot error
if plot == "topic_model":
not_allowed_tm = ["lsi", "rp", "nmf"]
if mod_type in not_allowed_tm:
sys.exit(
"(Type Error): Model not supported for plot = topic_model. Please see docstring for list of available models supported for topic_model."
)
# checking display_format parameter
plot_formats = [None, "streamlit"]
if display_format not in plot_formats:
raise ValueError("display_format can only be None or 'streamlit'.")
if display_format == "streamlit":
try:
import streamlit as st
except ImportError:
raise ImportError(
"It appears that streamlit is not installed. Do: pip install streamlit"
)
"""
error handling ends here
"""
logger.info("Importing libraries")
# import dependencies
import pandas as pd
import numpy
# import cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
# save parameter
if save:
save_param = True
else:
save_param = False
logger.info("save_param set to " + str(save_param))
logger.info("plot type: " + str(plot))
if plot == "frequency":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_words(corpus, n=None):
vec = CountVectorizer()
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.fit_transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
logger.info("Rendering Visual")
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_words(data_[target_], n=100)
df2 = pd.DataFrame(common_words, columns=["Text", "count"])
if display_format=="streamlit":
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 words after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 words after removing stop words",
asFigure=save_param,
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 words after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_words(filtered_df[target_], n=100)
df2 = pd.DataFrame(common_words, columns=["Text", "count"])
if display_format=="streamlit":
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df2.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param,
)
)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html("Word Frequency.html")
logger.info("Saving 'Word Frequency.html' in current active directory")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "distribution":
try:
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
b = data_[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
if display_format=="streamlit":
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title="Word Count Distribution",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(b)
else:
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title="Word Count Distribution",
asFigure=save_param
)
else:
title = str(topic_num) + ": " + "Word Count Distribution"
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
b = filtered_df[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
if display_format=="streamlit":
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(b)
else:
b = b[target_].iplot(
kind="hist",
bins=100,
xTitle="word count",
linecolor="black",
yTitle="count",
title=title,
asFigure=save_param
)
logger.info("Visual Rendered Successfully")
if save:
b.write_html("Distribution.html")
logger.info("Saving 'Distribution.html' in current active directory")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "bigram":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_bigram(corpus, n=None):
logger.info("Fitting CountVectorizer()")
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_bigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 bigrams after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 bigrams after removing stop words",
asFigure=save_param
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 bigrams after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_bigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param
)
)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html("Bigram.html")
logger.info("Saving 'Bigram.html' in current active directory")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "trigram":
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_trigram(corpus, n=None):
vec = CountVectorizer(ngram_range=(3, 3)).fit(corpus)
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()
]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_trigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 trigrams after removing stop words",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title="Top 100 trigrams after removing stop words",
asFigure=save_param
)
)
else:
title = (
str(topic_num) + ": " + "Top 100 trigrams after removing stop words"
)
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
common_words = get_top_n_trigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns=["Text", "count"])
logger.info("Rendering Visual")
if display_format=="streamlit":
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
)
st.write(df3)
else:
df3 = (
df3.groupby("Text")
.sum()["count"]
.sort_values(ascending=False)
.iplot(
kind="bar",
yTitle="Count",
linecolor="black",
title=title,
asFigure=save_param
)
)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html("Trigram.html")
logger.info("Saving 'Trigram.html' in current active directory")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "sentiment":
try:
# loadies dependencies
import plotly.graph_objects as go
from textblob import TextBlob
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
sentiments = data_[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
if display_format=="streamlit":
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title="Sentiment Polarity Distribution",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(sentiments)
else:
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title="Sentiment Polarity Distribution",
asFigure=save_param
)
else:
title = str(topic_num) + ": " + "Sentiment Polarity Distribution"
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
sentiments = filtered_df[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
if display_format=="streamlit":
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title=title,
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(sentiments)
else:
sentiments = sentiments[target_].iplot(
kind="hist",
bins=50,
xTitle="polarity",
linecolor="black",
yTitle="count",
title=title,
asFigure=save_param
)
logger.info("Visual Rendered Successfully")
if save:
sentiments.write_html("Sentiments.html")
logger.info("Saving 'Sentiments.html' in current active directory")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "pos":
from textblob import TextBlob
b = list(id2word.token2id.keys())
logger.info("Fitting TextBlob()")
blob = TextBlob(str(b))
pos_df = pd.DataFrame(blob.tags, columns=["word", "pos"])
pos_df = pos_df.loc[pos_df["pos"] != "POS"]
pos_df = pos_df.pos.value_counts()[:20]
logger.info("Rendering Visual")
if display_format=="streamlit":
pos_df = pos_df.iplot(
kind="bar",
xTitle="POS",
yTitle="count",
title="Top 20 Part-of-speech tagging for review corpus",
asFigure=True # plotly obj needs to be returned for streamlit to interpret
)
st.write(pos_df)
else:
pos_df = pos_df.iplot(
kind="bar",
xTitle="POS",
yTitle="count",
title="Top 20 Part-of-speech tagging for review corpus",
asFigure=save_param
)
logger.info("Visual Rendered Sucessfully")
if save:
pos_df.write_html("POS.html")
logger.info("Saving 'POS.html' in current active directory")
elif plot == "tsne":
logger.info(
"SubProcess assign_model() called =================================="
)
b = assign_model(model, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
b.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
c = []
for i in b.columns:
if "Topic_" in i:
a = i
c.append(a)
bb = b[c]
from sklearn.manifold import TSNE
logger.info("Fitting TSNE()")
X_embedded = TSNE(n_components=3).fit_transform(bb)
logger.info("Sorting Dataframe")
X = pd.DataFrame(X_embedded)
X["Dominant_Topic"] = b["Dominant_Topic"]
X.sort_values(by="Dominant_Topic", inplace=True)
X.dropna(inplace=True)
logger.info("Rendering Visual")
import plotly.express as px
df = X
fig = px.scatter_3d(
df,
x=0,
y=1,
z=2,
color="Dominant_Topic",
title="3d TSNE Plot for Topic Model",
opacity=0.7,
width=900,
height=800,
)
if system:
if display_format=="streamlit":
st.write(fig)
else:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
fig.write_html("TSNE.html")
logger.info("Saving 'TSNE.html' in current active directory")
elif plot == "topic_model":
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import warnings
warnings.filterwarnings("ignore")
pyLDAvis.enable_notebook()
logger.info("Preparing pyLDAvis visual")
vis = pyLDAvis.gensim.prepare(model, corpus, id2word, mds="mmds")
display(vis)
logger.info("Visual Rendered Successfully")
elif plot == "topic_distribution":
try:
iter1 = len(model.show_topics(999999))
except:
try:
iter1 = model.num_topics
except:
iter1 = model.n_components_
topic_name = []
keywords = []
for i in range(0, iter1):
try:
s = model.show_topic(i, topn=10)
topic_name.append("Topic " + str(i))
kw = []
for i in s:
kw.append(i[0])
keywords.append(kw)
except:
keywords.append("NA")
topic_name.append("Topic " + str(i))
keyword = []
for i in keywords:
b = ", ".join(i)
keyword.append(b)
kw_df = pd.DataFrame({"Topic": topic_name, "Keyword": keyword}).set_index(
"Topic"
)
logger.info(
"SubProcess assign_model() called =================================="
)
ass_df = assign_model(model, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
ass_df_pivot = ass_df.pivot_table(
index="Dominant_Topic", values="Topic_0", aggfunc="count"
)
df2 = ass_df_pivot.join(kw_df)
df2 = df2.reset_index()
df2.columns = ["Topic", "Documents", "Keyword"]
"""
sorting column starts
"""
logger.info("Sorting Dataframe")
topic_list = list(df2["Topic"])
s = []
for i in range(0, len(topic_list)):
a = int(topic_list[i].split()[1])
s.append(a)
df2["Topic"] = s
df2.sort_values(by="Topic", inplace=True)
df2.sort_values(by="Topic", inplace=True)
topic_list = list(df2["Topic"])
topic_list = list(df2["Topic"])
s = []
for i in topic_list:
a = "Topic " + str(i)
s.append(a)
df2["Topic"] = s
df2.reset_index(drop=True, inplace=True)
"""
sorting column ends
"""
logger.info("Rendering Visual")
import plotly.express as px
fig = px.bar(
df2,
x="Topic",
y="Documents",
hover_data=["Keyword"],
title="Document Distribution by Topics",
)
if system:
if display_format=="streamlit":
st.write(fig)
else:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
fig.write_html("Topic Distribution.html")
logger.info("Saving 'Topic Distribution.html' in current active directory")
elif plot == "wordcloud":
try:
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
stopwords = set(STOPWORDS)
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
atext = " ".join(review for review in data_[target_])
else:
logger.info(
"SubProcess assign_model() called =================================="
)
assigned_df = assign_model(model, verbose=False)
logger.info(
"SubProcess assign_model() end =================================="
)
filtered_df = assigned_df.loc[
assigned_df["Dominant_Topic"] == topic_num
]
atext = " ".join(review for review in filtered_df[target_])
logger.info("Fitting WordCloud()")
wordcloud = WordCloud(
width=800,
height=800,
background_color="white",
stopwords=stopwords,
min_font_size=10,
).generate(atext)
# plot the WordCloud image
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
plt.savefig("Wordcloud.png")
else:
plt.savefig("Wordcloud.png")
plt.close()
logger.info("Saving 'Wordcloud.png' in current active directory")
else:
if display_format=="streamlit":
st.write(plt)
else:
plt.show()
logger.info("Visual Rendered Successfully")
except:
logger.warning(
"Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
sys.exit(
"(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number."
)
elif plot == "umap":
# warnings
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel("ERROR")
# loading dependencies
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from yellowbrick.text import UMAPVisualizer
import matplotlib.pyplot as plt
tfidf = TfidfVectorizer()
logger.info("Fitting TfidfVectorizer()")
docs = tfidf.fit_transform(data_[target_])
# Instantiate the clustering model
clusters = KMeans(n_clusters=5, random_state=seed)
logger.info("Fitting KMeans()")
clusters.fit(docs)
plt.figure(figsize=(10, 6))
umap = UMAPVisualizer(random_state=seed)
logger.info("Fitting UMAP()")
umap.fit(docs, ["c{}".format(c) for c in clusters.labels_])
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
umap.show(outpath="UMAP.png")
else:
umap.show(outpath="UMAP.png", clear_figure=True)
logger.info("Saving 'UMAP.png' in current active directory")
else:
if display_format=="streamlit":
st.write(umap)
else:
umap.show()
logger.info("Visual Rendered Successfully")
logger.info(
"plot_model() succesfully completed......................................"
)
def tune_model(
model=None,
multi_core=False,
supervised_target=None,
estimator=None,
optimize=None,
custom_grid=None,
auto_fe=True,
fold=10,
verbose=True,
):
"""
This function tunes the ``num_topics`` parameter of a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> tuned_lda = tune_model(model = 'lda', supervised_target = 'status')
model: str, default = None
Enter ID of the models available in model library (ID - Model):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: bool, default = False
True would utilize all CPU cores to parallelize and speed up model
training. Ignored when ``model`` is not 'lda'.
supervised_target: str
Name of the target column for supervised learning. If None, the model
coherence value is used as the objective function.
estimator: str, default = None
Classification (ID - Name):
* 'lr' - Logistic Regression (Default)
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
Regression (ID - Name):
* 'lr' - Linear Regression (Default)
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
optimize: str, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa (default = 'Accuracy')
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE (default = 'R2')
custom_grid: list, default = None
By default, a pre-defined number of topics is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of num_topics to iterate over in custom_grid param.
auto_fe: bool, default = True
Automatic text feature engineering. When set to True, it will generate
text based features such as polarity, subjectivity, wordcounts. Ignored
when ``supervised_target`` is None.
fold: int, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
Trained Model with optimized ``num_topics`` parameter.
Warnings
--------
- Random Projections ('rp') and Non Negative Matrix Factorization ('nmf')
is not available for unsupervised learning. Error is raised when 'rp' or
'nmf' is passed without supervised_target.
- Estimators using kernel based methods such as Kernel Ridge Regressor,
Automatic Relevance Determinant, Gaussian Process Classifier, Radial Basis
Support Vector Machine and Multi Level Perceptron may have longer training
times.
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing tune_model()")
logger.info(
"""tune_model(model={}, multi_core={}, supervised_target={}, estimator={}, optimize={}, custom_grid={}, auto_fe={}, fold={}, verbose={})""".format(
str(model),
str(multi_core),
str(supervised_target),
str(estimator),
str(optimize),
str(custom_grid),
str(auto_fe),
str(fold),
str(verbose),
)
)
logger.info("Checking exceptions")
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
import sys
# checking for model parameter
if model is None:
sys.exit(
"(Value Error): Model parameter Missing. Please see docstring for list of available models."
)
# checking for allowed models
allowed_models = ["lda", "lsi", "hdp", "rp", "nmf"]
if model not in allowed_models:
sys.exit(
"(Value Error): Model Not Available. Please see docstring for list of available models."
)
# checking multicore type:
if type(multi_core) is not bool:
sys.exit(
"(Type Error): multi_core parameter can only take argument as True or False."
)
# check supervised target:
if supervised_target is not None:
all_col = list(data_.columns)
target = target_
all_col.remove(target)
if supervised_target not in all_col:
sys.exit(
"(Value Error): supervised_target not recognized. It can only be one of the following: "
+ str(all_col)
)
# supervised target exception handling
if supervised_target is None:
models_not_allowed = ["rp", "nmf"]
if model in models_not_allowed:
sys.exit(
"(Type Error): Model not supported for unsupervised tuning. Either supervised_target param has to be passed or different model has to be used. Please see docstring for available models."
)
# checking estimator:
if estimator is not None:
available_estimators = [
"lr",
"knn",
"nb",
"dt",
"svm",
"rbfsvm",
"gpc",
"mlp",
"ridge",
"rf",
"qda",
"ada",
"gbc",
"lda",
"et",
"lasso",
"ridge",
"en",
"lar",
"llar",
"omp",
"br",
"ard",
"par",
"ransac",
"tr",
"huber",
"kr",
"svm",
"knn",
"dt",
"rf",
"et",
"ada",
"gbr",
"mlp",
"xgboost",
"lightgbm",
"catboost",
]
if estimator not in available_estimators:
sys.exit(
"(Value Error): Estimator Not Available. Please see docstring for list of available estimators."
)
# checking optimize parameter
if optimize is not None:
available_optimizers = [
"MAE",
"MSE",
"RMSE",
"R2",
"ME",
"Accuracy",
"AUC",
"Recall",
"Precision",
"F1",
"Kappa",
]
if optimize not in available_optimizers:
sys.exit(
"(Value Error): optimize parameter Not Available. Please see docstring for list of available parameters."
)
# checking auto_fe:
if type(auto_fe) is not bool:
sys.exit(
"(Type Error): auto_fe parameter can only take argument as True or False."
)
# checking fold parameter
if type(fold) is not int:
sys.exit("(Type Error): Fold parameter only accepts integer value.")
"""
exception handling ends here
"""
logger.info("Preloading libraries")
# pre-load libraries
import pandas as pd
import ipywidgets as ipw
from ipywidgets import Output
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
# progress bar
if custom_grid is None:
max_steps = 25
else:
max_steps = 10 + len(custom_grid)
progress = ipw.IntProgress(
value=0, min=0, max=max_steps, step=1, description="Processing: "
)
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame(
[
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
["Status", ". . . . . . . . . . . . . . . . . .", "Loading Dependencies"],
["Step", ". . . . . . . . . . . . . . . . . .", "Initializing"],
],
columns=["", " ", " "],
).set_index("")
monitor_out = Output()
if verbose:
if html_param:
display(monitor_out)
if verbose:
if html_param:
with monitor_out:
display(monitor, display_id="monitor")
logger.info("Importing libraries")
# General Dependencies
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
import numpy as np
import plotly.express as px
# setting up cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
progress.value += 1
# define the problem
if supervised_target is None:
problem = "unsupervised"
logger.info("Objective : Unsupervised")
elif data_[supervised_target].value_counts().count() == 2:
problem = "classification"
logger.info("Objective : Classification")
else:
problem = "regression"
logger.info("Objective : Regression")
# define topic_model_name
logger.info("Defining model name")
if model == "lda":
topic_model_name = "Latent Dirichlet Allocation"
elif model == "lsi":
topic_model_name = "Latent Semantic Indexing"
elif model == "hdp":
topic_model_name = "Hierarchical Dirichlet Process"
elif model == "nmf":
topic_model_name = "Non-Negative Matrix Factorization"
elif model == "rp":
topic_model_name = "Random Projections"
logger.info("Topic Model Name: " + str(topic_model_name))
# defining estimator:
logger.info("Defining supervised estimator")
if problem == "classification" and estimator is None:
estimator = "lr"
elif problem == "regression" and estimator is None:
estimator = "lr"
else:
estimator = estimator
logger.info("Estimator: " + str(estimator))
# defining optimizer:
logger.info("Defining Optimizer")
if optimize is None and problem == "classification":
optimize = "Accuracy"
elif optimize is None and problem == "regression":
optimize = "R2"
else:
optimize = optimize
logger.info("Optimize: " + str(optimize))
progress.value += 1
# creating sentiments
if problem == "classification" or problem == "regression":
logger.info("Problem : Supervised")
if auto_fe:
logger.info("auto_fe param set to True")
monitor.iloc[1, 1:] = "Feature Engineering"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
from textblob import TextBlob
monitor.iloc[2, 1:] = "Extracting Polarity"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Polarity")
polarity = data_[target_].map(
lambda text: TextBlob(text).sentiment.polarity
)
monitor.iloc[2, 1:] = "Extracting Subjectivity"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Subjectivity")
subjectivity = data_[target_].map(
lambda text: TextBlob(text).sentiment.subjectivity
)
monitor.iloc[2, 1:] = "Extracting Wordcount"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Extracting Wordcount")
word_count = [len(i) for i in text]
progress.value += 1
# defining tuning grid
logger.info("Defining Tuning Grid")
if custom_grid is not None:
logger.info("Custom Grid used")
param_grid = custom_grid
else:
logger.info("Pre-defined Grid used")
param_grid = [2, 4, 8, 16, 32, 64, 100, 200, 300, 400]
master = []
master_df = []
monitor.iloc[1, 1:] = "Creating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
for i in param_grid:
logger.info("Fitting Model with num_topics = " + str(i))
progress.value += 1
monitor.iloc[2, 1:] = "Fitting Model With " + str(i) + " Topics"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# create and assign the model to dataset d
logger.info(
"SubProcess create_model() called =================================="
)
m = create_model(
model=model, multi_core=multi_core, num_topics=i, verbose=False
)
logger.info("SubProcess create_model() end ==================================")
logger.info(
"SubProcess assign_model() called =================================="
)
d = assign_model(m, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
if problem in ["classification", "regression"] and auto_fe:
d["Polarity"] = polarity
d["Subjectivity"] = subjectivity
d["word_count"] = word_count
master.append(m)
master_df.append(d)
# topic model creation end's here
if problem == "unsupervised":
logger.info("Problem : Unsupervised")
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
from gensim.models import CoherenceModel
logger.info("CoherenceModel imported successfully")
coherence = []
metric = []
counter = 0
for i in master:
logger.info("Evaluating Coherence with num_topics: " + str(i))
progress.value += 1
monitor.iloc[2, 1:] = (
"Evaluating Coherence With " + str(param_grid[counter]) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
model = CoherenceModel(
model=i, texts=text, dictionary=id2word, coherence="c_v"
)
model_coherence = model.get_coherence()
coherence.append(model_coherence)
metric.append("Coherence")
counter += 1
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame(
{"# Topics": param_grid, "Score": coherence, "Metric": metric}
)
df.columns = ["# Topics", "Score", "Metric"]
sorted_df = df.sort_values(by="Score", ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info("Rendering Visual")
fig = px.line(
df,
x="# Topics",
y="Score",
line_shape="linear",
title="Coherence Value and # of Topics",
color="Metric",
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
fig.show()
logger.info("Visual Rendered Successfully")
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)["Score"])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ "Coherence: "
+ str(best_m)
)
print(p)
elif problem == "classification":
logger.info("Importing untrained Classifier")
"""
defining estimator
"""
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
if estimator == "lr":
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=seed)
full_name = "Logistic Regression"
elif estimator == "knn":
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
full_name = "K Nearest Neighbours"
elif estimator == "nb":
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
full_name = "Naive Bayes"
elif estimator == "dt":
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=seed)
full_name = "Decision Tree"
elif estimator == "svm":
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(max_iter=1000, tol=0.001, random_state=seed)
full_name = "Support Vector Machine"
elif estimator == "rbfsvm":
from sklearn.svm import SVC
model = SVC(
gamma="auto", C=1, probability=True, kernel="rbf", random_state=seed
)
full_name = "RBF SVM"
elif estimator == "gpc":
from sklearn.gaussian_process import GaussianProcessClassifier
model = GaussianProcessClassifier(random_state=seed)
full_name = "Gaussian Process Classifier"
elif estimator == "mlp":
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(max_iter=500, random_state=seed)
full_name = "Multi Level Perceptron"
elif estimator == "ridge":
from sklearn.linear_model import RidgeClassifier
model = RidgeClassifier(random_state=seed)
full_name = "Ridge Classifier"
elif estimator == "rf":
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=10, random_state=seed)
full_name = "Random Forest Classifier"
elif estimator == "qda":
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
model = QuadraticDiscriminantAnalysis()
full_name = "Quadratic Discriminant Analysis"
elif estimator == "ada":
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(random_state=seed)
full_name = "AdaBoost Classifier"
elif estimator == "gbc":
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(random_state=seed)
full_name = "Gradient Boosting Classifier"
elif estimator == "lda":
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
full_name = "Linear Discriminant Analysis"
elif estimator == "et":
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(random_state=seed)
full_name = "Extra Trees Classifier"
elif estimator == "xgboost":
from xgboost import XGBClassifier
model = XGBClassifier(random_state=seed, n_jobs=-1, verbosity=0)
full_name = "Extreme Gradient Boosting"
elif estimator == "lightgbm":
import lightgbm as lgb
model = lgb.LGBMClassifier(random_state=seed)
full_name = "Light Gradient Boosting Machine"
elif estimator == "catboost":
from catboost import CatBoostClassifier
model = CatBoostClassifier(
random_state=seed, silent=True
) # Silent is True to suppress CatBoost iteration results
full_name = "CatBoost Classifier"
logger.info(str(full_name) + " Imported Successfully")
progress.value += 1
"""
start model building here
"""
acc = []
auc = []
recall = []
prec = []
kappa = []
f1 = []
for i in range(0, len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info(
"Training supervised model with num_topics: " + str(param_grid_val)
)
monitor.iloc[2, 1:] = (
"Evaluating Classifier With " + str(param_grid_val) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
# split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
# fit the model
logger.info("Fitting Model")
model.fit(X, y)
# generate the prediction and evaluate metric
logger.info("Generating Cross Val Predictions")
pred = cross_val_predict(model, X, y, cv=fold, method="predict")
acc_ = metrics.accuracy_score(y, pred)
acc.append(acc_)
recall_ = metrics.recall_score(y, pred)
recall.append(recall_)
precision_ = metrics.precision_score(y, pred)
prec.append(precision_)
kappa_ = metrics.cohen_kappa_score(y, pred)
kappa.append(kappa_)
f1_ = metrics.f1_score(y, pred)
f1.append(f1_)
if hasattr(model, "predict_proba"):
pred_ = cross_val_predict(model, X, y, cv=fold, method="predict_proba")
pred_prob = pred_[:, 1]
auc_ = metrics.roc_auc_score(y, pred_prob)
auc.append(auc_)
else:
auc.append(0)
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame(
{
"# Topics": param_grid,
"Accuracy": acc,
"AUC": auc,
"Recall": recall,
"Precision": prec,
"F1": f1,
"Kappa": kappa,
}
)
sorted_df = df.sort_values(by=optimize, ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
progress.value += 1
logger.info("Rendering Visual")
sd = pd.melt(
df,
id_vars=["# Topics"],
value_vars=["Accuracy", "AUC", "Recall", "Precision", "F1", "Kappa"],
var_name="Metric",
value_name="Score",
)
fig = px.line(
sd,
x="# Topics",
y="Score",
color="Metric",
line_shape="linear",
range_y=[0, 1],
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
title = str(full_name) + " Metrics and # of Topics"
fig.update_layout(
title={
"text": title,
"y": 0.95,
"x": 0.45,
"xanchor": "center",
"yanchor": "top",
}
)
fig.show()
logger.info("Visual Rendered Successfully")
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ str(optimize)
+ " : "
+ str(best_m)
)
print(p)
elif problem == "regression":
logger.info("Importing untrained Regressor")
"""
defining estimator
"""
monitor.iloc[1, 1:] = "Evaluating Topic Model"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
if estimator == "lr":
from sklearn.linear_model import LinearRegression
model = LinearRegression()
full_name = "Linear Regression"
elif estimator == "lasso":
from sklearn.linear_model import Lasso
model = Lasso(random_state=seed)
full_name = "Lasso Regression"
elif estimator == "ridge":
from sklearn.linear_model import Ridge
model = Ridge(random_state=seed)
full_name = "Ridge Regression"
elif estimator == "en":
from sklearn.linear_model import ElasticNet
model = ElasticNet(random_state=seed)
full_name = "Elastic Net"
elif estimator == "lar":
from sklearn.linear_model import Lars
model = Lars()
full_name = "Least Angle Regression"
elif estimator == "llar":
from sklearn.linear_model import LassoLars
model = LassoLars()
full_name = "Lasso Least Angle Regression"
elif estimator == "omp":
from sklearn.linear_model import OrthogonalMatchingPursuit
model = OrthogonalMatchingPursuit()
full_name = "Orthogonal Matching Pursuit"
elif estimator == "br":
from sklearn.linear_model import BayesianRidge
model = BayesianRidge()
full_name = "Bayesian Ridge Regression"
elif estimator == "ard":
from sklearn.linear_model import ARDRegression
model = ARDRegression()
full_name = "Automatic Relevance Determination"
elif estimator == "par":
from sklearn.linear_model import PassiveAggressiveRegressor
model = PassiveAggressiveRegressor(random_state=seed)
full_name = "Passive Aggressive Regressor"
elif estimator == "ransac":
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor(random_state=seed)
full_name = "Random Sample Consensus"
elif estimator == "tr":
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor(random_state=seed)
full_name = "TheilSen Regressor"
elif estimator == "huber":
from sklearn.linear_model import HuberRegressor
model = HuberRegressor()
full_name = "Huber Regressor"
elif estimator == "kr":
from sklearn.kernel_ridge import KernelRidge
model = KernelRidge()
full_name = "Kernel Ridge"
elif estimator == "svm":
from sklearn.svm import SVR
model = SVR()
full_name = "Support Vector Regression"
elif estimator == "knn":
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor()
full_name = "Nearest Neighbors Regression"
elif estimator == "dt":
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=seed)
full_name = "Decision Tree Regressor"
elif estimator == "rf":
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(random_state=seed)
full_name = "Random Forest Regressor"
elif estimator == "et":
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor(random_state=seed)
full_name = "Extra Trees Regressor"
elif estimator == "ada":
from sklearn.ensemble import AdaBoostRegressor
model = AdaBoostRegressor(random_state=seed)
full_name = "AdaBoost Regressor"
elif estimator == "gbr":
from sklearn.ensemble import GradientBoostingRegressor
model = GradientBoostingRegressor(random_state=seed)
full_name = "Gradient Boosting Regressor"
elif estimator == "mlp":
from sklearn.neural_network import MLPRegressor
model = MLPRegressor(random_state=seed)
full_name = "MLP Regressor"
elif estimator == "xgboost":
from xgboost import XGBRegressor
model = XGBRegressor(random_state=seed, n_jobs=-1, verbosity=0)
full_name = "Extreme Gradient Boosting Regressor"
elif estimator == "lightgbm":
import lightgbm as lgb
model = lgb.LGBMRegressor(random_state=seed)
full_name = "Light Gradient Boosting Machine"
elif estimator == "catboost":
from catboost import CatBoostRegressor
model = CatBoostRegressor(random_state=seed, silent=True)
full_name = "CatBoost Regressor"
logger.info(str(full_name) + " Imported Successfully")
progress.value += 1
"""
start model building here
"""
score = []
metric = []
for i in range(0, len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info(
"Training supervised model with num_topics: " + str(param_grid_val)
)
monitor.iloc[2, 1:] = (
"Evaluating Regressor With " + str(param_grid_val) + " Topics"
)
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
# prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) # droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
# split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
# fit the model
logger.info("Fitting Model")
model.fit(X, y)
# generate the prediction and evaluate metric
logger.info("Generating Cross Val Predictions")
pred = cross_val_predict(model, X, y, cv=fold, method="predict")
if optimize == "R2":
r2_ = metrics.r2_score(y, pred)
score.append(r2_)
elif optimize == "MAE":
mae_ = metrics.mean_absolute_error(y, pred)
score.append(mae_)
elif optimize == "MSE":
mse_ = metrics.mean_squared_error(y, pred)
score.append(mse_)
elif optimize == "RMSE":
mse_ = metrics.mean_squared_error(y, pred)
rmse_ = np.sqrt(mse_)
score.append(rmse_)
elif optimize == "ME":
max_error_ = metrics.max_error(y, pred)
score.append(max_error_)
metric.append(str(optimize))
monitor.iloc[1, 1:] = "Compiling Results"
monitor.iloc[1, 1:] = "Finalizing"
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
logger.info("Creating metrics dataframe")
df = pd.DataFrame({"# Topics": param_grid, "Score": score, "Metric": metric})
df.columns = ["# Topics", optimize, "Metric"]
# sorting to return best model
if optimize == "R2":
sorted_df = df.sort_values(by=optimize, ascending=False)
else:
sorted_df = df.sort_values(by=optimize, ascending=True)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info("Rendering Visual")
fig = px.line(
df,
x="# Topics",
y=optimize,
line_shape="linear",
title=str(full_name) + " Metrics and # of Topics",
color="Metric",
)
fig.update_layout(plot_bgcolor="rgb(245,245,245)")
progress.value += 1
# monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id="monitor")
monitor_out.clear_output()
progress.close()
fig.show()
logger.info("Visual Rendered Successfully")
best_k = np.array(sorted_df.head(1)["# Topics"])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0], 4)
p = (
"Best Model: "
+ topic_model_name
+ " |"
+ " # Topics: "
+ str(best_k)
+ " | "
+ str(optimize)
+ " : "
+ str(best_m)
)
print(p)
logger.info(str(best_model))
logger.info(
"tune_model() succesfully completed......................................"
)
return best_model
def evaluate_model(model):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> evaluate_model(lda)
model: object, default = none
A trained model object should be passed.
Returns:
None
"""
from ipywidgets import widgets
from ipywidgets.widgets import interact, fixed, interact_manual
import numpy as np
"""
generate sorted list
"""
try:
n_topic_assigned = len(model.show_topics())
except:
try:
n_topic_assigned = model.num_topics
except:
n_topic_assigned = model.n_components
final_list = []
for i in range(0, n_topic_assigned):
final_list.append("Topic " + str(i))
a = widgets.ToggleButtons(
options=[
("Frequency Plot", "frequency"),
("Bigrams", "bigram"),
("Trigrams", "trigram"),
("Sentiment Polarity", "sentiment"),
("Word Cloud", "wordcloud"),
],
description="Plot Type:",
disabled=False,
button_style="", # 'success', 'info', 'warning', 'danger' or ''
icons=[""],
)
b = widgets.Dropdown(options=final_list, description="Topic #:", disabled=False)
d = interact_manual(
plot_model,
model=fixed(model),
plot=a,
topic_num=b,
save=fixed(False),
system=fixed(True),
display_format=fixed(None),
)
def save_model(model, model_name, verbose=True):
"""
This function saves the trained model object into the current active
directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> save_model(lda, 'saved_lda_model')
model: object
A trained model object should be passed.
model_name: str
Name of pickle file to be passed as a string.
verbose: bool, default = True
When set to False, success message is not printed.
Returns:
Tuple of the model object and the filename.
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing save_model()")
logger.info(
"""save_model(model={}, model_name={}, verbose={})""".format(
str(model), str(model_name), str(verbose)
)
)
import joblib
model_name = model_name + ".pkl"
joblib.dump(model, model_name)
if verbose:
print("Model Succesfully Saved")
logger.info(str(model))
logger.info(
"save_model() succesfully completed......................................"
)
return (model, model_name)
def load_model(model_name, verbose=True):
"""
This function loads a previously saved model.
Example
-------
>>> from pycaret.nlp import load_model
>>> saved_lda = load_model('saved_lda_model')
model_name: str
Name of pickle file to be passed as a string.
verbose: bool, default = True
When set to False, success message is not printed.
Returns:
Trained Model
"""
import joblib
model_name = model_name + ".pkl"
if verbose:
print("Model Sucessfully Loaded")
return joblib.load(model_name)
def models():
"""
Returns table of models available in model library.
Example
-------
>>> from pycaret.nlp import models
>>> all_models = models()
Returns:
pandas.DataFrame
"""
import pandas as pd
model_id = ["lda", "lsi", "hdp", "rp", "nmf"]
model_name = [
"Latent Dirichlet Allocation",
"Latent Semantic Indexing",
"Hierarchical Dirichlet Process",
"Random Projections",
"Non-Negative Matrix Factorization",
]
model_ref = [
"gensim/models/ldamodel",
"gensim/models/lsimodel",
"gensim/models/hdpmodel",
"gensim/models/rpmodel",
"sklearn.decomposition.NMF",
]
df = pd.DataFrame({"ID": model_id, "Name": model_name, "Reference": model_ref})
df.set_index("ID", inplace=True)
return df
def get_logs(experiment_name=None, save=False):
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en', log_experiment = True)
>>> lda = create_model('lda')
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
import sys
if experiment_name is None:
exp_name_log_ = exp_name_log
else:
exp_name_log_ = experiment_name
import mlflow
from mlflow.tracking import MlflowClient
client = MlflowClient()
if client.get_experiment_by_name(exp_name_log_) is None:
sys.exit(
"No active run found. Check logging parameter in setup or to get logs for inactive run pass experiment_name."
)
exp_id = client.get_experiment_by_name(exp_name_log_).experiment_id
runs = mlflow.search_runs(exp_id)
if save:
file_name = str(exp_name_log_) + "_logs.csv"
runs.to_csv(file_name, index=False)
return runs
def get_config(variable):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> text = get_config('text')
Returns:
Global variable
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing get_config()")
logger.info("""get_config(variable={})""".format(str(variable)))
if variable == "text":
global_var = text
if variable == "data_":
global_var = data_
if variable == "corpus":
global_var = corpus
if variable == "id2word":
global_var = id2word
if variable == "seed":
global_var = seed
if variable == "target_":
global_var = target_
if variable == "html_param":
global_var = html_param
if variable == "exp_name_log":
global_var = exp_name_log
if variable == "logging_param":
global_var = logging_param
if variable == "log_plots_param":
global_var = log_plots_param
if variable == "USI":
global_var = USI
logger.info("Global variable: " + str(variable) + " returned")
logger.info(
"get_config() succesfully completed......................................"
)
return global_var
def set_config(variable, value):
"""
This function resets the global variables. Following variables are
accessible:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> from pycaret.nlp import *
>>> exp_name = setup(data = kiva, target = 'en')
>>> set_config('seed', 123)
Returns:
None
"""
import logging
try:
hasattr(logger, "name")
except:
logger = logging.getLogger("logs")
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler("logs.log")
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing set_config()")
logger.info(
"""set_config(variable={}, value={})""".format(str(variable), str(value))
)
if variable == "text":
global text
text = value
if variable == "data_":
global data_
data_ = value
if variable == "corpus":
global corpus
corpus = value
if variable == "id2word":
global id2word
id2word = value
if variable == "seed":
global seed
seed = value
if variable == "html_param":
global html_param
html_param = value
if variable == "exp_name_log":
global exp_name_log
exp_name_log = value
if variable == "logging_param":
global logging_param
logging_param = value
if variable == "log_plots_param":
global log_plots_param
log_plots_param = value
if variable == "USI":
global USI
USI = value
logger.info("Global variable: " + str(variable) + " updated")
logger.info(
"set_config() succesfully completed......................................"
)
def get_topics(data, text, model=None, num_topics=4):
"""
Callable from any external environment without requiring setup initialization.
"""
if model is None:
model = "lda"
s = setup(data=data, target=text)
c = create_model(model=model, num_topics=num_topics, verbose=False)
dataset = assign_model(c, verbose=False)
return dataset
| [] |
2024-01-10 | Chanlaw/ice | ice~recipes~adherence_tfew_paragraph.py | import re
from collections import Counter
from collections.abc import Awaitable
from collections.abc import Callable
from typing import Literal
from pydantic import BaseModel
from pydantic import BaseSettings
from structlog.stdlib import get_logger
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers.models.gpt2.tokenization_gpt2_fast import GPT2TokenizerFast
from ice.apis.openai import openai_complete
from ice.evaluation.evaluate_recipe_result import RecipeResult
from ice.metrics.gold_standards import list_experiments
from ice.paper import Paper
from ice.paper import Paragraph
from ice.paper import split_sentences
from ice.recipe import Recipe
from ice.utils import filter_async
from ice.utils import map_async
from ice.utils import max_by_value
from ..trace import recorder
from ..trace import trace
gpt2_tokenizer: GPT2TokenizerFast = AutoTokenizer.from_pretrained("gpt2")
def n_tokens(prompt: str) -> int:
tokenized = gpt2_tokenizer(prompt)
return len(tokenized.input_ids)
def n_remaining_tokens(prompt: str, ensure_min: int, capacity=4097):
remaining = capacity - n_tokens(prompt)
if remaining < ensure_min:
raise ValueError(
f"Prompt too long by {ensure_min - remaining} tokens: {prompt}"
)
return remaining
log = get_logger()
def extract_numbers(text: str) -> list[str]:
words = text.split()
set_number_str = {
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"teen",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
"hundred",
"thousand",
"million",
"billion",
"trillion",
"quadrillion",
"quintillion",
}
number_strings = list(filter(lambda word: word.lower() in set_number_str, words))
numbers_set = set("0123456789")
number_strings += list(
filter(lambda x: set(x).intersection(numbers_set) != set(), words)
)
# Remove parentheses
remove_parentheses = (
lambda s: s.replace("(", "")
.replace(")", "")
.replace("...", "")
.replace("..", "")
)
number_strings = list(map(remove_parentheses, number_strings))
# Remove "," or "." from the end of the number string
for i, number in enumerate(number_strings):
if number[-1] == "," or number[-1] == ".":
number_strings[i] = number[:-1]
return number_strings
N_TO_STRING: dict[int, str] = {
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
10: "ten",
}
def paragraphs_to_numbered_list(paragraphs: list[str]) -> str:
return "\n".join(
f"{n}. {paragraph}".strip() for n, paragraph in enumerate(paragraphs, 1)
)
def even_shorter_intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from six different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Third, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the information in this paper about adherence, attrition, and compliance:""".strip()
def shorter_intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from six different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these four excerpts from a paper studying Study 2 on depression and psychosis:
1. The intervention was a single session that lasted approximately one hour for participants to provide informed consent, complete a demographic form, watch videos relevant to their study arm, complete the assessments, and be debriefed. Participants in either of the video groups stayed for the full hour, but participants in the control condition who did not watch the video finished in about 50 min. In Study 2, which included two 8 min videos with diagnostic accuracy for both conditions, the protocol required an additional 15 min. Survey data were collected using SurveyCTO (Ver 2.30, Dobility, Inc., Cambridge, MA, USA), an android application, on tablets (www.surveycto.com/accessed on: 19 June 2017). In Study 1, after completion of the video session, participants were invited to participate in the optional qualitative interview to be held within one week.
2. After review of 2nd and 3rd year MBBS student rosters, 18 students were excluded prior to randomization because of being international students not speaking Nepali or having already completed their psychiatry rotation. Among the remaining students, 100 were selected for randomization to one of the three arms. No potential participants refused to participate in this study. An additional six students were excluded at the time of analysis because information on their demographic forms revealed that they were international students whose native language was not Nepali or they had completed their clinical psychiatry rotation; this information had not been up to date in the class rosters at the time of randomization (Figure 1 ). One participant in the service user arm was excluded because of both being an international non-Nepali student and having completed a psychiatry rotation. Demographic characteristics of these participants are in Table 2 . Of note, only three participants indicated that they were primarily interested psychiatry as a specialty (see Figure 2 ). Participants were randomized into one the three conditions: the control group with no video (n = 31, 33%), the didactic video group (n = 31, 33%), and the service user recovery testimonial video group (n = 32; 34%).
3. Due to limited time availability on the part of the researchers and students as well as the exploratory nature of the interviews, only six participants completed interviews. Qualitative results were analyzed from a subset of six students, two women and four men in their third year, who participated in in-depth interviews.
4. For the second study, 248 students were enrolled in first-and second-year MBBS program across the two institutions participating. From roster, 28 students were excluded because of being international or having completed a psychiatry clinical rotation. The remaining 220 students were randomized; however, seven students declined to participate or were unavailable during data collection periods. Therefore, 213 participants were randomly allocated to the following arms: didactic video condition (n = 73), the service user video condition (n = 72), and the no video control condition (n = 75) (Figure 3 ). At the analysis phase, there were additional exclusions because of missing data or identification of exclusion criteria that was not recorded in the school registers. Participant characteristics for each condition are shown in Table 4 .
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance. The first excerpt describes the methodology, describing the intervention as taking place in a single one-hour session. This does not tell us anything explicitly about adherence, but it does make it more likely that adherence was high, since participants only had to attend the single session, which is easy to do. The second excerpt says that 18 students were excluded prior to randomization; since this took place before sampling, it is not relevant to adherence. It also says that six students were excluded at the time of analysis because it turned out that they met exclusion criteria. Although this is not adherence strictly speaking, it is important to note when thinking about the validity of the results. The third excerpt says that only six participants completed interviews. The fourth excerpt says that in Study 2, seven students declined to participate or were not available during data collection after randomization of 220 students, and there were additional exclusions at analysis phase because of missing data or identification of exclusion criteria.
Here's all the information in this paper about adherence, attrition, and compliance: This paper does not discuss adherence explicitly. For the video study, six of the 100 randomized students were excluded from analysis, and in the second study, seven of the 220 randomized students declined to participate or were unavailable during data collection periods, with additional students excluded from the analysis because of missing data or identification of exclusion criteria. Only six participants completed interviews.
Third, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Fourth, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the information in this paper about adherence, attrition, and compliance:""".strip()
def intervention_generation_prompt(
paragraphs: list[str], intervention: str, final_reasoning: str | None = None
) -> str:
paragraph_n = N_TO_STRING[len(paragraphs)]
prefix = f"""From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or perform it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study. Sometimes studies provide specific numbers or percentages of people who dropped out (attrited), and sometimes they only provide qualitative descriptions, such as saying that adherence was "generally good." Often, papers are genuinely unclear, and we can only conclude that there is not enough information in the paper for us to know anything about adherence or compliance.
Let's look at excerpts from five different papers to see what information, if any, they provide about the study's adherence, attrition, or compliance. We'll have to identify what each extract tells us about adherence (some extracts may only discuss methodology or results, telling us nothing about adherence), and for some, we may have to conclude that the attrition or compliance is simply unclear.
First, consider these three excerpts from a paper studying the Tumaini game:
1. Intervention arm participants completed a 45-minute informational onboarding session, including instructions on the interface, technology, and game content. They were instructed to play at least 1 hour per day for the 16 days of the study and asked not to share their own gameplay profile with others. The game interface allows for 5 additional players' profiles so that others may play without compromising the enrolled player's data. Intervention participants were provided with a phone with the game preloaded and used it at their own pace for the duration of the intervention. Control participants received standard of care, namely no additional intervention beyond any existing sex education from family, school, and peers. No specific data on the content or source of this education were collected from participants. All study smartphones were returned by the participants at the end of the intervention period.
2. Preliminary cleaning of survey data was conducted in MS Excel, with additional cleaning and all analyses completed using SAS version 9.4 (SAS Institute Inc., Cary, NC, USA). All control arm participants were included in analyses. One participant from the intervention arm was removed from analyses of effect at T2 due to delayed completion of the T2 survey. His data were retained for T1-T3 analyses, as he completed all other study activities on time. Descriptive statistics on demographic questions and game feedback questions were computed.
3. We recruited and enrolled 60 adolescent participants. Half of the participants were allocated to the intervention arm. All adolescents who were recruited completed all 3 study visits, and all intervention arm participants initiated gameplay. Participant demographics are presented in Table 3 . There were no significant demographic differences between the two arms. Preliminary calculations of exposure indicate that the intervention arm played Tumaini a mean of approximately 27 hours over the 16 days of the intervention.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt describes the study's methodology, but does not tell us how many or how well participants followed the instructions, so it does not inform us about adherence. The second excerpt tells us that all control arm participants were included in analysis, but one intervention arm participant was removed from the analysis of effect at T2 but included in the T3 analysis; this is attrition information. The third excerpt says that all participants completed all visits and that all intervention arm participants initiated gameplay; this is adherence information.
Here's all the information in this paper about adherence, attrition, and compliance: All participants completed all visits, and all intervention arm participants initiated gameplay. One intervention arm participant was not included in the T2 analysis but was included in the T3 analysis.
Second, consider these three excerpts from a paper studying the Preschool Situational Self-Regulation Toolkit (PRSIST) Program:
1. All children in their final prior-to-school year in these centers, who attended at least one of the 1-2 assessment days, were invited to participate in this study. There were no further exclusion criteria. Parental consent to participate was provided for 547 3-5-year old children, all of whom were identified as likely to be attending school in the subsequent year. The flow of participants throughout the study is depicted in Figure 1 . At baseline, 473 of these children were assessed (86.5%), with non-participation largely due to absence on the day of assessment. The mean age of this sample was 4.44 years (SD = 0.38, range = 3.20-5.33), with a relative balance of boys and girls (48.2% girls). Children who were identified as of Aboriginal or Torres Strait Islander descent comprised 7.2% of the sample, which is in line with population estimates for this age group (Australian Institute of Health and Welfare (AIHW), 2012). Family income was diverse: 11.9% of families qualified for full childcare benefit subsidies (low income); 65.5% of families qualified for some childcare benefit (low-middle to middle-high income); and 22.7% of families did not qualify for any childcare benefit subsidy (high income). Maternal education levels were also diverse: 9.5% did not complete high school; 9.3% completed only high school; 30.6% had completed a diploma, trade, certificate; 34.6% completed a tertiary degree; and 16.0% a post-graduate qualification. At follow-up, 426 children were assessed, which corresponded to a 90.1% retention rate. Nonparticipation at follow-up was due to the child having left the center or absence on the day of assessment.
2. Based on these patterns of participation, 20 services (80%) were deemed to have met or exceeded the minimum threshold of participation (i.e., completed the professional development modules and met the minimum of three child activities per week). Those that did not participate in the program were a result of: preparations for government assessment and rating (n = 1); substantial illness, maternity leave or turnover of key staff that precluded participation (n = 2); or low-or non-participation for undisclosed reasons (n = 2). Two of these five centers did not participate in any program elements. The other three centers did not engage with professional development modules or induction teleconference call yet completed child activities. Overall, there were good levels of adherence to the program, especially amongst those centers without significant sector-imposed impediments to participation.
3. Inability to conclusively and exclusively provide evidence for one of these possibilities, however, highlights limitations within the current study. That is, although the evaluation was rigorously designed and executed according to CONSORT guidelines, funding considerations limited the roll-out and intervention period to only 6 months. It is possible that a full year of program implementation would yield stronger program effects (see, for example, Schachter, 2015). It is also possible that program effects would be strengthened with stricter adherence to highquality program implementation. While fidelity data indicate good compliance in the frequency and timing of program elements, data are insufficient to evaluate the integrity with which program elements were implemented. While in-person or video fidelity checks were not possible in the current study, this would help monitor adherence. As a researcher-implemented model of delivery would violate our aspiration for a lowcost and barrier-free resource for educators, a plausible middle ground might be a coaching model that supports educators in implementation and adaptation of the program in their context. Lastly, the program was designed with the intention to foster selfregulation in all children, and thus did not focus on instances of dysregulation. However, it is clear that child dysregulation remains a significant concern for educators (Neilsen-Hewett et al., 2019), and future iterations of the program would do well to more explicitly provide support for these children. In guiding such an expansion of the program, there is evidence that children with frequent and severe dysregulation require a different approach to fostering self-regulation, as demonstrated successfully in trauma-informed practice approaches (Holmes et al., 2015). Future studies would also do well to consider implications of differing educator qualifications and experience, whereby different types and levels of support may be needed at varying levels of behavior challenges and educators' skills to address these.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt includes demographic information about the participants but also reveals that at baseline, 473 of the total sample of 547 children were assessed (with non-participation mostly due to absence), and at follow-up, 426 children were assessed (with non-participation mostly due to the child having left the center or absence), corresponding to a 90.1% retention rate. The second excerpt describes compliance with protocols: 20 of the 25 intervention centers met or exceeded the minimum threshold of participation. The third excerpt describes compliance in the frequency and timing of program elements as "good" but also says that the study did not monitor adherence with in-person or video checks, which would have helped provide a better picture of compliance with the study design.
Here's all the information in this paper about adherence, attrition, and compliance: Of the initial sample of 547 children, 473 were assessed at baseline and 426 at follow-up. While 20 of 25 intervention centers met or exceeded the minimum threshold of participation and the frequency and timing of program elements was good, the study did not monitor adherence with in-person or video checks.
Third, consider these four excerpts from a paper studying Study 2 on depression and psychosis:
1. The intervention was a single session that lasted approximately one hour for participants to provide informed consent, complete a demographic form, watch videos relevant to their study arm, complete the assessments, and be debriefed. Participants in either of the video groups stayed for the full hour, but participants in the control condition who did not watch the video finished in about 50 min. In Study 2, which included two 8 min videos with diagnostic accuracy for both conditions, the protocol required an additional 15 min. Survey data were collected using SurveyCTO (Ver 2.30, Dobility, Inc., Cambridge, MA, USA), an android application, on tablets (www.surveycto.com/accessed on: 19 June 2017). In Study 1, after completion of the video session, participants were invited to participate in the optional qualitative interview to be held within one week.
2. After review of 2nd and 3rd year MBBS student rosters, 18 students were excluded prior to randomization because of being international students not speaking Nepali or having already completed their psychiatry rotation. Among the remaining students, 100 were selected for randomization to one of the three arms. No potential participants refused to participate in this study. An additional six students were excluded at the time of analysis because information on their demographic forms revealed that they were international students whose native language was not Nepali or they had completed their clinical psychiatry rotation; this information had not been up to date in the class rosters at the time of randomization (Figure 1 ). One participant in the service user arm was excluded because of both being an international non-Nepali student and having completed a psychiatry rotation. Demographic characteristics of these participants are in Table 2 . Of note, only three participants indicated that they were primarily interested psychiatry as a specialty (see Figure 2 ). Participants were randomized into one the three conditions: the control group with no video (n = 31, 33%), the didactic video group (n = 31, 33%), and the service user recovery testimonial video group (n = 32; 34%).
3. Due to limited time availability on the part of the researchers and students as well as the exploratory nature of the interviews, only six participants completed interviews. Qualitative results were analyzed from a subset of six students, two women and four men in their third year, who participated in in-depth interviews.
4. For the second study, 248 students were enrolled in first-and second-year MBBS program across the two institutions participating. From roster, 28 students were excluded because of being international or having completed a psychiatry clinical rotation. The remaining 220 students were randomized; however, seven students declined to participate or were unavailable during data collection periods. Therefore, 213 participants were randomly allocated to the following arms: didactic video condition (n = 73), the service user video condition (n = 72), and the no video control condition (n = 75) (Figure 3 ). At the analysis phase, there were additional exclusions because of missing data or identification of exclusion criteria that was not recorded in the school registers. Participant characteristics for each condition are shown in Table 4 .
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance. The first excerpt describes the methodology, describing the intervention as taking place in a single one-hour session. This does not tell us anything explicitly about adherence, but it does make it more likely that adherence was high, since participants only had to attend the single session, which is easy to do. The second excerpt says that 18 students were excluded prior to randomization; since this took place before sampling, it is not relevant to adherence. It also says that six students were excluded at the time of analysis because it turned out that they met exclusion criteria. Although this is not adherence strictly speaking, it is important to note when thinking about the validity of the results. The third excerpt says that only six participants completed interviews. The fourth excerpt says that in Study 2, seven students declined to participate or were not available during data collection after randomization of 220 students, and there were additional exclusions at analysis phase because of missing data or identification of exclusion criteria.
Here's all the information in this paper about adherence, attrition, and compliance: This paper does not discuss adherence explicitly. For the video study, six of the 100 randomized students were excluded from analysis, and in the second study, seven of the 220 randomized students declined to participate or were unavailable during data collection periods, with additional students excluded from the analysis because of missing data or identification of exclusion criteria. Only six participants completed interviews.
Fourth, consider these three excerpts from a paper studying antioxidant/anti-inflammatory supplement containing lemon verbena extract and omega-3 fatty acid:
1. Flow chart showing the dropout rate at different timepoints in the study.
2. Forty-eight (48) participants were enrolled for screening evaluation (Fig. 1 ) and after 3 exclusions, 45 participants were randomly assigned either to placebo or nutritional supplement groups, n = 22 and n = 23, respectively. Of these, 14 participants were withdrawn during the study for different reasons; there were 10 dropouts in the placebo group and 4 dropouts in the supplement group (treatment refusal, irregular treatment, starting on medication, or occurrence of an adverse event [AE]). Finally, 31 participants completed the study (12 in the placebo and 19 in the supplement group; Fig. 1 ).
3. Only 1 patient reported an AE (i.e., a heartburn sensation). The subject, who was in the placebo group, stopped the treatment immediately and was excluded from the study (Table 1 ). No major complications were reported by this subject.
Let's think about what each excerpt tells us, if anything, about adherence, attrition or compliance: The first excerpt refers to a flow chart showing the dropout rate, but since we do not have the figure here, we cannot conclude anything from this about the study's attrition. The second excerpt says that there were 10 dropouts in the placebo group of 22 participants and 4 dropouts in the supplement group of 23 participants, meaning that 31 participants out of the initial 45 participants after randomization completed the study. The third excerpt provides more detail for one patient in the placebo group who dropped out, stopping treatment after experiencing a heartburn sensation.
Here's all the the information in this paper about adherence, attrition, and compliance: Ten of the 22 participants in the placebo group dropped out, and 4 of the 23 participants in the supplement group dropped out.
Fifth, consider these {paragraph_n} excerpt{"s" if len(paragraphs) > 1 else ""} from a paper studying {intervention}:
{paragraphs_to_numbered_list(paragraphs).strip()}
Let's think about what {"each" if len(paragraphs) > 1 else "this"} excerpt tells us, if anything, about adherence, attrition or compliance:""".strip()
if final_reasoning is None:
return prefix
return f"""{prefix} {final_reasoning.strip()}
Here's all the the information in this paper about adherence, attrition, and compliance:""".strip()
async def complete_with_cache_buster(
prompt: str, temperature: float, max_tokens: int, top_p: float, stop, cache_id: int
):
return await openai_complete(
stop=stop,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
cache_id=cache_id,
)
def remove_last_subsentence(text: str) -> str:
sentences = split_sentences(text)
if not sentences[-1].strip().endswith("."):
log.warning("Removing last sentence", sentences=sentences)
sentences = sentences[:-1]
return " ".join(sentences)
@trace
async def sample_generation_answer_with_reasoning(
paragraphs: list[str],
intervention: str,
cache_id: int,
ranked_paragraphs: list[str],
) -> tuple["AnswerWithReasoning", Callable]:
"""Sample reasoning and a final answer,
given the prompt. Shorten the prompt dynamically
to fit in the paragraphs provided, by first
reducing the number of few-shot examples, then
dropping the paragraphs that are least likely to be
about adherence.
"""
for prompt_func in (
intervention_generation_prompt,
shorter_intervention_generation_prompt,
even_shorter_intervention_generation_prompt,
):
prompt = prompt_func(paragraphs, intervention)
used_prompt_func = prompt_func
if n_remaining_tokens(prompt, -100_000) >= 400:
break
while n_remaining_tokens(prompt, -100_000) < 400: # some huge negative number
paragraphs = remove_worst_paragraph(paragraphs, ranked_paragraphs)
if not paragraphs:
raise ValueError("Prompt too long with even one paragraph")
prompt = shorter_intervention_generation_prompt(paragraphs, intervention)
log.warning("Dropped paragraph", n=len(paragraphs))
response = await complete_with_cache_buster(
prompt=prompt,
temperature=0.4,
max_tokens=n_remaining_tokens(prompt, 400) - 100,
top_p=1,
stop=("\nFourth", "\nFifth", "\nSixth", "\nFinally"),
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if (
"Here's all the information in this paper about adherence, attrition, and compliance:"
in response_text
):
reasoning, answer = response_text.split(
"Here's all the information in this paper about adherence, attrition, and compliance:"
)
return (
AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=reasoning.strip(),
answer=remove_last_subsentence(answer.strip()),
token_usage=token_usage,
),
used_prompt_func,
)
log.warning(
"Unexpected response for final generation reasoning", response=response_text
)
return (
AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=remove_last_subsentence(response_text.strip()),
answer=response_text.strip(),
token_usage=token_usage,
),
used_prompt_func,
)
@trace
async def final_answer_with_reasoning(
paragraphs: list[str], intervention: str, ranked_paragraphs: list[str]
):
"""Sample ten completions, and choose a reasoning which has the most
numbers in common with the other reasonings.
Use that reasoning as support to complete the final answer.
"""
answers_short = [
(
await sample_generation_answer_with_reasoning(
paragraphs, intervention, cache_id, ranked_paragraphs
)
)
for cache_id in range(10)
]
answers = [answer[0] for answer in answers_short]
used_prompt_func = answers_short[0][1]
total_token_usage = sum([a.token_usage for a in answers])
print(f"Total token usage: {total_token_usage}")
numbers_in_answers = [extract_numbers(a.answer) for a in answers]
for nums, answer in zip(numbers_in_answers, answers):
if "unclear" in answer.answer.lower():
nums.append("Unclear")
def rank(numbers, number):
r = 0
for n in numbers:
if number in n:
r += 1
return r
scores: list[float] = []
for numbers in numbers_in_answers:
score = 0.0
for number in numbers:
score += rank(numbers_in_answers, number)
if numbers:
score /= len(numbers)
score += 0.01 * len(numbers)
scores.append(score)
else:
scores.append(0)
answers_with_scores = [(answer, score) for answer, score in zip(answers, scores)]
best_answer = max(answers_with_scores, key=lambda aws: aws[1])[0]
final_prompt = used_prompt_func(
paragraphs=paragraphs,
intervention=intervention,
final_reasoning=best_answer.reasoning,
)
final_answer = await complete_with_cache_buster(
prompt=final_prompt,
temperature=0.0,
max_tokens=n_remaining_tokens(final_prompt, 83),
top_p=1,
stop=("\nFourth", "\nFifth", "\nSixth", "\nFinally"),
cache_id=0,
)
final_answer_text = final_answer["choices"][0]["text"]
return AnswerWithReasoning(
paragraph="\n\n".join(paragraphs),
reasoning=best_answer.reasoning,
answer=remove_last_subsentence(final_answer_text),
token_usage=final_answer["usage"]["total_tokens"],
)
def intervention_classification_prompt(paragraph: str, intervention: str):
return f"""
From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence," "attrition," or "compliance". If too many participants failed to receive the intervention or failed to receive it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. Sometimes it can be tricky to tell whether the author is talking about adherence/compliance with the study's protocols versus simply discussing the results of the study. For simple interventions that are accomplished in one shot (e.g., having a group of college students complete a test in a lab that takes 30 minutes), the study doesn't discuss adherence unless something unusual happened, and we can safely assume that everyone in the sample completed the study.
Let's look at five examples to decide whether they contain information about adherence or compliance. For each paragraph, we'll conclude whether the paragraph does tell us about the study's adherence.
First, consider this paragraph from a paper studying non-cognitive skills certificate disclosure to job candidates and firms:
---
38 Information on whether each job interview in the matching intervention turned into a hire (and on the associated job characteristics) was collected in both the firm and worker follow-ups. We prefer to use information from the worker follow-ups for these match-level outcomes as measurement error is likely to be lower there for at least two reasons: (i) while the median firm was matched to three workers, the median worker was matched Figure 4 shows a summary of compliance and attrition. Starting from compliance, of the 1,230 scheduled job interviews, 515 (or 42%) actually took place. Lack of compliance is mainly due to workers having lost interest in being matched (32% of cases) or to the firm having lost interest (30% of cases) by the time they were called for the interviews. 39 Panel A of Appendix Table A6 explores the determinants of compliance, and shows very little evidence of selection on observables. 40 Importantly, Treatment does not predict the likelihood of the job interview taking place. This is not surprising, as the certificates were shown to firms and workers only conditional on the job interview taking place. Consistently with this, the Online Appendix confirms that the sample of job interviews that took place remains balanced on the main observable worker and firm characteristics. All the Treatment workers who showed up to the job interviews were given the certificates (corresponding to 49% of Treatment workers). The remaining Treatment certificates were disbursed to the workers shortly after the first worker follow-up survey. So by the second follow-up survey about 81% of Treatment workers had received the certificate. 41 oving on to attrition, the follow-up surveys targeted all firms and workers in the experimental sample, irrespective of whether the scheduled job interviews took place or not. We have very moderate attrition rates: these are about 12% in the firm follow-up, and about 14% in both worker follow-ups. 42 Panel B of Appendix Table A6 shows that attrition is not related to Treatment in either sample, and there is also very little evidence of observable characteristics determining attrition. Panel B of Appendix Table A5 .1 and Panels B and C of Table A5 .2 confirm that the samples of both workers and firms remain balanced on baseline characteristics at follow-up, so that attrition is not likely to affect the validity of the initial random assignment. 43 Therefore, we do not correct for attrition in our main regression specifications. 44 only one firm, so possible recall errors related to the respondent getting confused about the different job interviews are lower on the worker side; (ii) in 13% of the cases, the person that answered the firm follow-up survey is different from the owner that conducted the job interviews. Results using corresponding match-level information from the firm follow-up survey (not reported) are qualitatively similar.
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. First, we find out that of the 1,230 scheduled job interviews, only 515, or 42% took place. Then, we find out that all the treatment workers who showed up to job interviews were given certificates, which corresponds to 49% of treatment workers. Finally, by the second follow-up survey, 81% of the workers had received the certificate. This tells us about attrition, i.e., adherence.
These figures describe both how much and how well participants in the study complied with the study's protocol.
Conclusion: Yes, this paragraph does tell us about adherence, attrition, or compliance for the intervention.
Second, consider this paragraph from a paper studying relaxation and park walking during lunch breaks.
```
Lunch breaks constitute the longest within-workday rest period, but it is unclear how they affect recovery from job stress. We conducted two randomized controlled trials with 153 Finnish knowledge workers who engaged for 15 minutes daily in prescribed lunch break activities for ten consecutive working days. Participants were randomly assigned to a: 1) park walking group (N = 51), 2) relaxation exercises group (N = 46) and 3) control group (N = 56). The study was divided into two parts scheduled in spring (N = 83) and fall (N = 70). Recovery experiences (detachment, relaxation, enjoyment) and recovery outcomes (restoration, fatigue, job satisfaction) were assessed with SMS and paper-and-pencil questionnaires several times per day before, during and after the intervention period. A manipulation check revealed that both intervention groups reported less tension after lunch breaks during the intervention than before. In spring, the interventions did hardly affect recovery experiences and outcomes. In fall, restoration increased and fatigue decreased markedly immediately after lunch breaks and in the afternoon in both intervention groups (d = 0.22-0.58) and most consistent positive effects across the day were reported by the park walking group. Park walks and relaxation exercises during lunch breaks can enhance knowledge workers' recovery from work, but effects seem weak, short-lived and dependent on the season.
```
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. First, we find out that 51 participants were assigned to the park walking group, 46 to the relaxation exercises group, and 3 to the control group, and that the study was divided into two parts, a spring (n=83) and fall (n=70) group. This is simply information about the size of the sample and its allocation to different treatment arms; it tells us nothing about whether participants in these groups actually completed the intervention. For that, we would need to know, for example, how often those in the park walking group actually took walks in the park during their lunch breaks. Second, we find out that there was increased restoration and decreased fatigue (d=0.22-0.58) in both intervention groups in the fall. This is about the results of the study (what happened to the participants), not simply about how well they adhered to the intervention protocol.
These figures describe the size of the sample and the results of the study, but not how well participants adhered to the study's plan.
Conclusion: No, this paragraph does not tell us about adherence, attrition, or compliance for the intervention.
Third, consider this paragraph from a paper studying albendazole:
---
A somewhat lower proportion of pupils in school took the medicine in 1999. Among girls younger than thirteen and boys who were enrolled in school for at least part of the 1999 school year, the overall treatment rate was approximately 72 percent (73 percent in Group 1 and 71 percent in Group 2 schools), suggesting that the process of selection into treatment was fairly similar in the two years despite the change in consent rules. Of course, measured relative to the baseline population of students enrolled in early 1998, a smaller percentage of students were still in school in 1999 and hence, treatment rates in this baseline sample were considerably lower in 1999 than in 1998: among girls under thirteen years of age and all boys in treatment schools from the baseline sample, approximately 57 percent received medical treatment at some point in 1999, while only nine percent of the girls thirteen years of age and older received treatment. 17 nly five percent of comparison school pupils received medical treatment for worms independently of the program during the previous year, according to the 1999 pupil questionnaire. 18 An anthropological study examining worm treatment practices in a neighboring district in Kenya (Geissler et al. (2000)), finds that children self-treat the symptoms of helminth infections with local herbs, but found no case in which a child or parent purchased deworming 17 The difference between the 72 percent and 57 percent figures is due to Group 2 pupils who dropped out of school (or who could not be matched in the data cross years, despite the efforts of the NGO field staff) between years 1 and 2 of the project. Below, we compare infection outcomes for pupils who participated in the 1999 parasitological survey, all of whom were enrolled in school in 1999. Thus the parasitological survey sample consists of pupils enrolled in school in both 1998 and 1999 for both the treatment and comparison schools. To the extent that the deworming program itself affected enrolment outcomes-1999 school enrolment is approximately four percentage points higher in the treatment schools than the comparison schools-the pupils enrolled in the treatment versus comparison schools in 1999 will have different characteristics. However, since drop-out rates were lower in the treatment schools, this is likely to lead to a bias toward zero in the within-school health externality estimates, in which case our estimates serve as lower bounds on true within-school effects.
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance. The treatment rate among was approximately 72 percent in 1999. Is this a percentage of the participants in the study? It's not clear from this paragraph alone; we need more context. Similarly, we find that only five percent of comparison school pupils received medical treatment for worms independently of the program during the previous school year. This could be about adherence, but it could also be describing the results of the intervention. We would need a longer description of the study to find out.
Conclusion: Unclear; we don't know whether this paragraph tells us about adherence, attrition, or compliance for the intervention.
Fourth, consider this paragraph from a paper studying {intervention.strip()}:
---
{paragraph.strip()}
---
Let's think through what this paragraph tells us about the study's adherence, attrition, or compliance.""".strip()
def this_or_other_study_prompt(paragraph: str, intervention: str):
return f"""
From the textbook, "Critically Evaluating Interventional Studies," Chapter 3:
When evaluating the quality of a randomized controlled trial, you should also consider whether any participants dropped out of the study or failed to follow its protocols correctly. This is sometimes called "adherence" or "compliance". If too many participants failed to receive the intervention or failed to receive it correctly, for whatever reason, this may damage the internal validity of the study's results.
Unfortunately, papers are often not as clear as they should be when discussing adherence. Sometimes it can be tricky to tell whether the author is talking about the adherence/compliance with the study's own protocols versus simply discussing the adherence or compliance of a related work.
Let's look at five examples of paragraphs from papers that discuss adherence or compliance to decide whether they are describing adherence or compliance for the author's own study versus adherence/compliance of a different study or a related work. Usually, when the adherence or compliance being discussed belongs to a different study, that study is cited explicitly. If another study is not cited explicitly, you can assume that the adherence/compliance rate being discussed belongs to the author's own study.
For each paragraph, we'll conclude either that Yes, the adherence/compliance being discussed probably belongs to the author's own study, or No, that it probably belongs to a different study.
First, consider this paragraph from a paper studying the Preschool Situational Self-Regulation Toolkit (PRSIST) Program:
---
All children in their final prior-to-school year in these centers, who attended at least one of the 1-2 assessment days, were invited to participate in this study. There were no further exclusion criteria. Parental consent to participate was provided for 547 3-5-year old children, all of whom were identified as likely to be attending school in the subsequent year. The flow of participants throughout the study is depicted in Figure 1 . At baseline, 473 of these children were assessed (86.5%), with non-participation largely due to absence on the day of assessment. The mean age of this sample was 4.44 years (SD = 0.38, range = 3.20-5.33), with a relative balance of boys and girls (48.2% girls). Children who were identified as of Aboriginal or Torres Strait Islander descent comprised 7.2% of the sample, which is in line with population estimates for this age group (Australian Institute of Health and Welfare (AIHW), 2012). Family income was diverse: 11.9% of families qualified for full childcare benefit subsidies (low income); 65.5% of families qualified for some childcare benefit (low-middle to middle-high income); and 22.7% of families did not qualify for any childcare benefit subsidy (high income). Maternal education levels were also diverse: 9.5% did not complete high school; 9.3% completed only high school; 30.6% had completed a diploma, trade, certificate; 34.6% completed a tertiary degree; and 16.0% a post-graduate qualification. At follow-up, 426 children were assessed, which corresponded to a 90.1% retention rate. Nonparticipation at follow-up was due to the child having left the center or absence on the day of assessment.
---
Let's think through whether this paragraph describes adherence for the study in question or another study: When describing nonparticipation rates, the text does not contain any citations to related works. Further, these details are also shown in Figure 1, strongly suggesting that the adherence/compliance rate being discussed belongs to the author's own study.
Conclusion: Yes, the adherence/compliance being discussed probably belongs to the author's own study.
Second, consider this paragraph from a paper studying DDUGKY skills training programs:
---
In the Indian context, we were unable to find studies that have estimated the impact of youth skills training programs sponsored by the government. Although not offered by the government, an experimental study designed by Maitra and Mani (2017) and implemented in co-operation with non-governmental organizations offers estimates of the impact of a 6-month stitching and tailoring training program targeted at young women (aged 18-39 years) in New Delhi. The paper examined the 5 The youth training employment programs (Joven) in Latin America were initiated in Chile in 1991, and thereafter, similar programs have been implemented in Argentina, Colombia, Peru, and Uruguay. The various programs target youth from low-income families, with low educational attainment, and with limited or no job experience. The programs consist of basic literacy, training in a trade which is in demand, work experience, and help finding a job. Typically, the intervention lasts for 6 months and includes 200-400 h of training and 2-3 months of work experience. 6 Other experimental evaluations of vocational training program in developing countries include Acevedo et al. (2017) for the Dominican Republic, Attanasio et al. (2017) for Columbia, Maitra and Mani (2017) for India, Diaz and Rosas (2016) for Peru, Honorati (2015) for Kenya. 7 Although their paper does not focus on disadvantaged youth but on the general unemployed population, Hirshleifer et al. (2016) use a randomised experiment to assess the effect of a large-scale vocational training program in Turkey and conclude that the effect of being assigned to training had a 2 percentage point, but statistically not significant effect on the probability of being employed. impact of the program 6 months and 18 months after program completion on a sample of 594 women (409 treatment and 185 control). According to the study's findings, in the short term, women who received training were 4 percentage points more likely to be self-employed, 6 percentage points more likely to be employed and earn 150% more per month as compared to the control group. The effects persisted in the medium term. While the effects are impressive, the authors report that only 56% of those assigned to treatment completed the course and that there were a number of barriers to entry, chiefly, lack of access to credit, lack of child-care support and the distance from residence to the training center.
---
Let's think through whether this paragraph describes adherence for the study in question or another study: When describing how only 56% of those assigned to treatment completed the course, the authors are reporting the findings from an experiment in Hirshleifer et al. (2016). This means that the adherence/compliance being discussed belongs to that study, not the author's own study.
Conclusion: No, the adherence/compliance being discussed probably belongs to a different study.
Third, consider this paragraph from a paper studying {intervention.strip()}:
---
{paragraph.strip()}
---
Let's think through whether this paragraph describes adherence for the study in question or another study:
""".strip()
class AnswerWithReasoning(BaseModel):
paragraph: str
reasoning: str
answer: str
token_usage: int
@trace
async def intervention_classification_answer_with_reasoning(
paragraph: str,
intervention: str,
temperature: float,
cache_id: int = 0,
):
"""Sample reasoning and a final answer for the classification prompt, asking
"Does this paragraph contain information about adherence, compliance, or attrition?"
"""
cache_id # unused
response = await openai_complete(
prompt=intervention_classification_prompt(paragraph, intervention),
temperature=temperature,
max_tokens=657,
stop=("\nFifth,", "\nFinally,"),
top_p=1,
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if "Conclusion: " in response_text:
reasoning, answer_text = response_text.split("Conclusion:")
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=reasoning.strip(),
answer=answer_text.strip(),
token_usage=token_usage,
)
log.warning(
"Unexpected response in intervention classification",
response=response,
paragraph=paragraph,
)
print("Unexpected response:", response)
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=response_text.strip(),
answer="",
token_usage=token_usage,
)
@trace
async def this_or_other_classification_answer_with_reasoning(
paragraph: str,
intervention: str,
temperature: float,
cache_id: int = 0,
):
"""Sample reasoning and a final answer for the classification prompt,
asking, "Is this paragraph about adherence about a related work or
the study this paper is reporting on?"
"""
response = await openai_complete(
prompt=this_or_other_study_prompt(paragraph, intervention),
temperature=temperature,
max_tokens=768,
stop=("\nFourth,", "\nFinally,", "\n\nNow,"),
top_p=1,
cache_id=cache_id,
)
response_text = response["choices"][0]["text"]
token_usage = response["usage"]["total_tokens"]
if "Conclusion: " in response_text:
reasoning, answer_text = response_text.split("Conclusion:")
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=reasoning.strip(),
answer=answer_text.strip(),
token_usage=token_usage,
)
log.warning(
"Unexpected response in this or other classification",
response=response,
paragraph=paragraph,
)
print("Unexpected response:", response)
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=response_text.strip(),
answer="",
token_usage=token_usage,
)
def answer_has_prefix(answer: AnswerWithReasoning, prefix: str):
return answer.answer.lower().startswith(prefix.lower())
async def majority_vote(
answers: list[AnswerWithReasoning],
candidate_prefixes: tuple[str, ...] = ("Yes", "No", "Unclear"),
):
votes: Counter[str] = Counter()
for answer in answers:
for prefix in candidate_prefixes:
if answer_has_prefix(answer, prefix):
votes[prefix] += 1
break
return votes.most_common(1)[0][0]
def prompt_from_reasoning(
prompt_function: Callable[[str, str], str],
*,
paragraph: str,
intervention: str,
reasoning: str,
):
prefix = prompt_function(paragraph, intervention)
return f"""{ prefix } { reasoning }
Conclusion: """.strip()
@trace
async def zero_temp_final_classification(prompt: str):
"""Perform a final classification step using a reasoning
selected from the sampled classifications."""
return await openai_complete(
prompt=prompt,
stop=("\n"),
)
@trace
async def adherence_paragraph_classification(
selection_function: Callable[
[str, str, float, int], Awaitable[AnswerWithReasoning]
],
prompt_function: Callable[[str, str], str],
*,
paragraph: str,
intervention: str,
):
"""Using the selection and prompt functions provided,
complete the classification task by chain-of-thought reasoning,
high-temperature sampling, plurality voting, and zero-temperature
generation of the final classification.
"""
answers = [
(
await selection_function(
paragraph,
intervention,
0.4,
cache_id,
)
)
for cache_id in range(10, 20)
]
total_token_usage = sum(answer.token_usage for answer in answers)
print(f"Total token usage: {total_token_usage}")
most_common_prefix = await majority_vote(
answers, candidate_prefixes=("Yes", "No", "Unclear")
)
answers_with_most_common_prefix = [
a for a in answers if answer_has_prefix(a, most_common_prefix)
]
if not answers_with_most_common_prefix:
# just use the longest reasoning
best_reasoning = max(answers, key=lambda a: len(a.reasoning))
else:
best_reasoning = max(
answers_with_most_common_prefix, key=lambda a: len(a.reasoning)
)
zero_temp_answer = await zero_temp_final_classification(
prompt_from_reasoning(
prompt_function,
paragraph=paragraph,
intervention=intervention,
reasoning=best_reasoning.reasoning,
)
)
token_usage = zero_temp_answer["usage"]["total_tokens"]
return AnswerWithReasoning(
paragraph=paragraph,
reasoning=best_reasoning.reasoning,
answer=zero_temp_answer["choices"][0]["text"].strip(),
token_usage=total_token_usage + token_usage,
)
TFEW_ADHERENCE_ANSWER_CHOICES = ("no", "yes")
def make_multiple_adherence_prompts(
context: str, section: str, sentence: str
) -> list[tuple[str, tuple[str, str]]]:
prompts = [
f"Context: { context }\n\nSection: { section }\n\nAnswer yes if the following sentence is about how many participants in the study complied with the study's protocol, had to drop out, or withdrew; answer no if it is about something else, such as the study's design, sampling strategy, or results.\n\nSentence: { sentence }",
f'Context: { context }\n\nQuestion: Does "{ sentence }" describe how many people eligible for the intervention actually completed it or failed to complete it?\nOptions:\nA. Yes, "{ sentence }" describes how many people actually completed or failed to complete the intervention.\nB. No, "{ sentence }" does not describe how many people completed or failed to complete the intervention.',
f'Context: { context }\n\nQuestion: Is "{ sentence }" about the actual adherence or dropout rate of the study? True, False, or Neither?',
f"Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\n\nSection: { section }\nSentence: { sentence }",
f"Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\n\nSection: { section }\nSentence: { sentence }",
]
prompts = [prompt.strip() for prompt in prompts]
choices: list[tuple[str, str]] = [
("no", "yes"),
("B", "A"),
("False", "True"),
("no", "yes"),
("no", "yes"),
]
return [(prompt, choice) for prompt, choice in zip(prompts, choices)]
@trace
async def adherence_regex(sentence: str, level: int = 0) -> bool:
"""Simple regex for adherence-related English language patterns."""
match level:
case 0:
pattern = r"\b(adherence|Adherence|had to be excluded|were excluded|had to drop out|dropped out)\b"
case 1:
pattern = r"\b(withdrew|did not complete the)\b"
case 2:
pattern = r"\b(was omitted from|complied with)\b"
case _:
raise ValueError(f"Invalid level: { level }")
answer = re.search(pattern, sentence) is not None
return answer
def remove_worst_paragraph(paragraphs: list[str], ranked_paragraphs: list[str]):
overlap = [paragraph for paragraph in ranked_paragraphs if paragraph in paragraphs]
return [
paragraph
for paragraph in paragraphs
if paragraph in overlap[: len(paragraphs) - 1]
]
class AdherenceTfewSettings(BaseSettings):
qa_model = "adherence-tfew-multi"
backup_search_model = "mono-t5"
class AdherenceParagraphTfew(Recipe):
defaults = lambda self: AdherenceTfewSettings() # noqa: E731
async def is_possibly_adherence_sentence(
self, *, sentence: str, context: str, section: str
) -> bool:
"""Detect whether a sentence is possibly related to adherence, using up to 5 prompts."""
for prompt, choice_inputs in make_multiple_adherence_prompts(
context=context, section=section, sentence=sentence
):
choice_probs, _ = await self.agent(self.s.qa_model).classify(
prompt=prompt, choices=choice_inputs
)
choice, _ = max_by_value(choice_probs)
if choice == choice_inputs[1]:
return True
return False
async def is_possibly_adherence_paragraph(self, paragraph: Paragraph) -> bool:
"""Detect whether a paragraph is possibly related to adherence,
by checking whether any of its sentences are possibly adherence-related,
supplemented by regex."""
for sentence in paragraph.sentences:
is_adherence_sentence = await self.is_possibly_adherence_sentence(
sentence=sentence,
context=str(paragraph),
section=paragraph.sections[0].title if paragraph.sections else "",
)
if is_adherence_sentence:
return True
return await adherence_regex(str(paragraph), 0)
async def rank_paragraphs_by_adherence(
self, paragraphs: list[Paragraph]
) -> list[tuple[Paragraph, float]]:
"""Score all paragraphs by their probability of being about adherence.
These ranks will be used when we have to shorten prompts or where
nothing in particular seems to be much about adherence."""
async def score_paragraph(paragraph: Paragraph) -> float:
return await self.agent(self.s.backup_search_model).relevance(
question="How many participants actually received the intervention?",
context=str(paragraph),
)
scores = await map_async(paragraphs, score_paragraph)
ranked_paragraphs = {
paragraph: score
for score, paragraph in sorted(
zip(scores, paragraphs), reverse=True, key=lambda sp: sp[0]
)
}
return list(ranked_paragraphs.items())
async def top_n_paragraphs_of_possible_adherence_paragraphs(
self,
possible_adherence_paragraphs: list[Paragraph],
ranked_paragraphs: list[tuple[Paragraph, float]],
n: int,
) -> list[Paragraph]:
"""Shorten the list of paragraphs by keeping the paragraphs
most likely to be about adherence.
"""
ranked_paragraphs_dict = dict(ranked_paragraphs)
top_n_adherence_paragraphs = set(
[
paragraph
for paragraph in ranked_paragraphs_dict
if paragraph in possible_adherence_paragraphs
][:n]
)
# maintain input order
return [
paragraph
for paragraph in possible_adherence_paragraphs
if paragraph in top_n_adherence_paragraphs
]
async def adherence_paragraphs_recall(
self,
paragraphs: list[Paragraph],
record=recorder,
) -> list[Paragraph]:
"""Collect paragraphs that may be about adherence.
Err on the side of high recall.
"""
possible_adherence_paragraphs = await filter_async(
paragraphs, self.is_possibly_adherence_paragraph
)
if not possible_adherence_paragraphs:
for level in range(0, 3):
possible_adherence_paragraphs = [
paragraph
for paragraph in paragraphs
if (await adherence_regex(str(paragraph), level))
]
if possible_adherence_paragraphs:
break
ranked_paragraphs = await self.rank_paragraphs_by_adherence(paragraphs)
top_2_ranked_paragraphs = (
await self.top_n_paragraphs_of_possible_adherence_paragraphs(
paragraphs, ranked_paragraphs, 2
)
)
combined_adherence_paragraphs = [
paragraph
for paragraph in paragraphs
if paragraph in possible_adherence_paragraphs
or paragraph in top_2_ranked_paragraphs
]
NO_MORE_THAN_N_PARAGRAPHS = 6
if len(combined_adherence_paragraphs) > NO_MORE_THAN_N_PARAGRAPHS:
top_n_combined_adherence_paragraphs = (
await self.top_n_paragraphs_of_possible_adherence_paragraphs(
combined_adherence_paragraphs,
ranked_paragraphs,
NO_MORE_THAN_N_PARAGRAPHS,
)
)
record(
info=f"Found more than {NO_MORE_THAN_N_PARAGRAPHS} paragraphs, shortening by ranks",
classified_paragraphs=possible_adherence_paragraphs,
top_n_ranked=top_n_combined_adherence_paragraphs,
)
combined_adherence_paragraphs = top_n_combined_adherence_paragraphs
return combined_adherence_paragraphs
async def adherence_paragraphs(
self, paragraphs: list[Paragraph], intervention: str
) -> list[Paragraph]:
"""Collect paragraphs that may be about adherence,
combining a high-recall search with a high-precision refinement
step.
"""
possible_adherence_paragraphs = await self.adherence_paragraphs_recall(
paragraphs
)
async def is_really_adherence(paragraph: Paragraph) -> AnswerWithReasoning:
return await adherence_paragraph_classification(
intervention_classification_answer_with_reasoning,
intervention_classification_prompt,
paragraph=str(paragraph),
intervention=intervention,
)
classification_answers = await map_async(
possible_adherence_paragraphs, is_really_adherence
)
return [
paragraph
for paragraph, answer in zip(
possible_adherence_paragraphs, classification_answers
)
if answer_has_prefix(answer, "Yes") or "adherence" in str(paragraph).lower()
]
async def adherence_paragraphs_this_study(
self, paragraphs: list[Paragraph], intervention: str
) -> list[Paragraph]:
"""For paragraphs that we know are about adherence, which
are about adherence of a *related work* and not this paper?"""
really_adherence_paragraphs = await self.adherence_paragraphs(
paragraphs, intervention
)
async def is_adherence_this_study(paragraph: Paragraph) -> AnswerWithReasoning:
return await adherence_paragraph_classification(
this_or_other_classification_answer_with_reasoning,
this_or_other_study_prompt,
paragraph=str(paragraph),
intervention=intervention,
)
classification_answers = await map_async(
really_adherence_paragraphs, is_adherence_this_study
)
return [
paragraph
for paragraph, answer in zip(
really_adherence_paragraphs, classification_answers
)
if answer_has_prefix(answer, "Yes")
]
async def adherence_description(
self, paragraphs: list[Paragraph], intervention: str
) -> tuple[str, list[Paragraph]]:
"""Describe the adherence, compliance, or attrition that occurred in this study,
for this intervention."""
ranked_paragraphs = await self.rank_paragraphs_by_adherence(paragraphs)
ranked_paragraphs_dict = dict(ranked_paragraphs)
adherence_paragraphs_this_study = await self.adherence_paragraphs_this_study(
paragraphs, intervention
)
if not adherence_paragraphs_this_study:
return "Unclear", adherence_paragraphs_this_study
return (
await final_answer_with_reasoning(
[str(p) for p in adherence_paragraphs_this_study],
intervention,
[str(p) for p in list(ranked_paragraphs_dict.keys())],
)
).answer, adherence_paragraphs_this_study
async def list_experiments(
self, document_id: str, question_short_name: str = "adherence"
):
return list_experiments(
document_id=document_id, question_short_name=question_short_name
)
async def run(self, paper: Paper):
results = []
for intervention in await self.list_experiments(paper.document_id):
answer, excerpts = await self.adherence_description(
paper.paragraphs, intervention
)
recipe_result = RecipeResult(
document_id=paper.document_id,
question_short_name="adherence",
result=(answer, excerpts),
answer=answer,
excerpts=[str(excerpt) for excerpt in excerpts],
experiment=intervention,
classifcation_eq=(classification_eq_adherence,),
classification=(
"Unclear"
if not excerpts or "unclear" in answer.lower()
else "found"
),
)
results.append(recipe_result)
self.maybe_add_to_results(results)
return results
AdherenceClassification = Literal["explicit", "implicit", "missing"]
def classification_eq_adherence(
prediction: str | None,
gold: AdherenceClassification | None,
) -> bool | None:
if gold is None or gold == "implicit":
return None
if gold not in ["explicit", "missing"]:
return None
if prediction is None:
return None
if prediction.strip().lower().startswith("unclear"):
return gold == "missing"
else:
return gold == "explicit"
| [
"L",
"['PLACEHOLDER']",
"[\"Context: PLACEHOLDER\\n\\nSection: PLACEHOLDER\\n\\nAnswer yes if the following sentence is about how many participants in the study complied with the study's protocol, had to drop out, or withdrew; answer no if it is about something else, such as the study's design, sampling strategy, or results.\\n\\nSentence: PLACEHOLDER\", 'Context: PLACEHOLDER\\n\\nQuestion: Does \"PLACEHOLDER\" describe how many people eligible for the intervention actually completed it or failed to complete it?\\nOptions:\\nA. Yes, \"PLACEHOLDER\" describes how many people actually completed or failed to complete the intervention.\\nB. No, \"PLACEHOLDER\" does not describe how many people completed or failed to complete the intervention.', 'Context: PLACEHOLDER\\n\\nQuestion: Is \"PLACEHOLDER\" about the actual adherence or dropout rate of the study? True, False, or Neither?', 'Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\\n\\nSection: PLACEHOLDER\\nSentence: PLACEHOLDER', 'Does the following sentence from a research paper describe how many participants dropped out of or withdrew from the study?\\n\\nSection: PLACEHOLDER\\nSentence: PLACEHOLDER']"
] |
2024-01-10 | yamato0811/langchain-playground | 11-multiple-chains-original.py | from operator import itemgetter
from fastapi import FastAPI
from langserve import add_routes
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
prompt1 = ChatPromptTemplate.from_messages([
("system", "you are a helpful assistant that answers questions about people's hometowns."),
("human", "{person}はどこの都市に住んでいる人ですか?結果は都市の名前のみにしてください。出力例:[city]")
])
prompt2 = ChatPromptTemplate.from_messages([
("system", "you are a helpful assistant that answers questions about people's hometowns."),
("human", "{city}はどこの国の都市ですか? 言語は{language}で答えてください。")
])
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
chain1 = prompt1 | chat_model | StrOutputParser()
print(chain1)
chain2 = (
{"city": chain1, "language": itemgetter("language")}
| prompt2
| chat_model
| StrOutputParser()
)
# 2. App definition
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple API server using LangChain's Runnable interfaces",
)
# 3. Adding chain route
add_routes(
app,
chain1,
path="/chain1",
)
add_routes(
app,
chain2,
path="/chain2",
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000) | [
"{person}はどこの都市に住んでいる人ですか?結果は都市の名前のみにしてください。出力例:[city]",
"human",
"[('system', \"you are a helpful assistant that answers questions about people's hometowns.\"), ('human', '{person}はどこの都市に住んでいる人ですか?結果は都市の名前のみにしてください。出力例:[city]')]",
"you are a helpful assistant that answers questions about people's hometowns.",
"[('system', \"you are a helpful assistant that answers questions about people's hometowns.\"), ('human', '{city}はどこの国の都市ですか? 言語は{language}で答えてください。')]",
"{city}はどこの国の都市ですか? 言語は{language}で答えてください。"
] |
2024-01-10 | yamato0811/langchain-playground | 03-output-perser.py | from typing import List
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import BaseOutputParser
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
class CommaSeparatedListOutputParser(BaseOutputParser[List[str]]):
"""Parse the output of an LLM call to a comma-separated list."""
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
def main():
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
template = """You are a helpful assistant who generates comma separated lists.
A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.
ONLY return a comma separated list, and nothing more."""
human_template = "{text}"
chat_prompt = ChatPromptTemplate.from_messages([
("system", template),
("human", human_template),
])
chain = chat_prompt | chat_model | CommaSeparatedListOutputParser()
output = chain.invoke({"text": "colors"})
print(output)
if __name__ == "__main__":
main() | [
"[('system', 'You are a helpful assistant who generates comma separated lists.\\n A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\\n ONLY return a comma separated list, and nothing more.'), ('human', '{text}')]",
"human",
"You are a helpful assistant who generates comma separated lists.\n A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\n ONLY return a comma separated list, and nothing more.",
"{text}"
] |
2024-01-10 | yamato0811/langchain-playground | 04-langserve.py | #!/usr/bin/env python
from typing import List
from fastapi import FastAPI
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseOutputParser
from langserve import add_routes
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
# 1. Chain definition
class CommaSeparatedListOutputParser(BaseOutputParser[List[str]]):
"""Parse the output of an LLM call to a comma-separated list."""
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
template = """You are a helpful assistant who generates comma separated lists.
A user will pass in a category, and you should generate {num} objects in that category in a comma separated list.
ONLY return a comma separated list, and nothing more."""
human_template = "{text}"
chat_prompt = ChatPromptTemplate.from_messages([
("system", template),
("human", human_template),
])
category_chain = chat_prompt | chat_model | CommaSeparatedListOutputParser()
# 2. App definition
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple API server using LangChain's Runnable interfaces",
)
# 3. Adding chain route
add_routes(
app,
category_chain,
path="/category_chain",
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000) | [
"[('system', 'You are a helpful assistant who generates comma separated lists.\\nA user will pass in a category, and you should generate {num} objects in that category in a comma separated list.\\nONLY return a comma separated list, and nothing more.'), ('human', '{text}')]",
"human",
"You are a helpful assistant who generates comma separated lists.\nA user will pass in a category, and you should generate {num} objects in that category in a comma separated list.\nONLY return a comma separated list, and nothing more.",
"{text}"
] |
2024-01-10 | yamato0811/langchain-playground | 02-template.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
def main():
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
template = "You are a helpful assistant that translates {input_language} to {output_language}."
human_template = "{text}"
chat_prompt = ChatPromptTemplate.from_messages([
("system", template),
("human", human_template),
])
format_message = chat_prompt.format_messages(input_language="English", output_language="French", text="I love programming.")
print(chat_model.invoke(format_message))
if __name__ == "__main__":
main() | [
"You are a helpful assistant that translates {input_language} to {output_language}.",
"[('system', 'You are a helpful assistant that translates {input_language} to {output_language}.'), ('human', '{text}')]",
"human",
"{text}"
] |
2024-01-10 | yamato0811/langchain-playground | 01-chat.py | from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
def main():
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
text = "What would be a good company name for a company that makes colorful socks?"
messages = [HumanMessage(content=text)]
print(chat_model.invoke(messages))
if __name__ == "__main__":
main() | [
"What would be a good company name for a company that makes colorful socks?"
] |
2024-01-10 | yamato0811/langchain-playground | 11-multiple-chains.py | from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
import settings
OPEN_AI_API_KEY = settings.OPEN_AI_API_KEY
prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?")
prompt2 = ChatPromptTemplate.from_template(
"what country is the city {city} in? respond in {language}"
)
chat_model = ChatOpenAI(openai_api_key=OPEN_AI_API_KEY, model_name="gpt-3.5-turbo")
chain1 = prompt1 | chat_model | StrOutputParser()
chain2 = (
{"city": chain1, "language": itemgetter("language")}
| prompt2
| chat_model
| StrOutputParser()
)
output = chain2.invoke({"person": "obama", "language": "日本語"})
print(output) | [
"what country is the city {city} in? respond in {language}",
"what is the city {person} is from?"
] |
2024-01-10 | AdesholaAfolabi/tempo-ai-kb | knowledge_base.py | import os
import openai
from dotenv import load_dotenv, find_dotenv
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
load_dotenv(find_dotenv()) # Read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
directory = 'knowledge_docs'
def load_files(files):
"""
Load and process a list of files, returning document loaders.
Args:
directory (str): Directory where the files are located.
files (list of str): List of file paths to be loaded.
Returns:
list: List of document loaders for supported file types (PDF, DOC, DOCX).
"""
loaders = []
for file in files:
file_path = os.path.join(directory, file)
if file_path.endswith(".pdf"):
loaders.append(PyPDFLoader(file_path))
elif file_path.endswith((".doc", ".docx")):
loaders.append(Docx2txtLoader(file_path))
return loaders
def split_docs(files):
"""
Load and split multiple documents into smaller text chunks.
Returns:
list: List of text chunks generated by splitting the loaded documents.
"""
docs = []
loaders = load_files(files)
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)
splits = text_splitter.split_documents(docs)
return splits
def load_embeddings(persist_directory, files, vectordb=None):
"""
Create document embeddings and store them in a vector database.
Args:
persist_directory (str): Directory to save the embeddings and vector database.
files (list): List of file paths to process.
vectordb (Chroma, optional): Existing vector database. If provided, new embeddings will be added to it.
Returns:
Chroma: Chroma vector database containing document embeddings.
"""
splits = split_docs(files)
embedding = OpenAIEmbeddings()
if vectordb is not None and vectordb._collection.count() > 0:
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
return vectordb
else:
vectordb = Chroma.from_documents(
documents=splits,
embedding=embedding,
persist_directory=persist_directory
)
return vectordb
def get_moderation(question):
"""
Check if the question is safe to ask the model.
Args:
question (str): The question to check.
Returns:
list: A list of errors if the question is not safe, otherwise returns None.
"""
errors = {
"hate": "Content that expresses, incites, or promotes hate based on various factors.",
"hate/threatening": "Hateful content that includes violence or serious harm.",
"self-harm": "Content that promotes or depicts acts of self-harm.",
"sexual": "Content meant to arouse sexual excitement or promote sexual services.",
"sexual/minors": "Sexual content that includes an individual under 18 years old.",
"violence": "Content that promotes or glorifies violence or celebrates suffering.",
"violence/graphic": "Violent content that depicts death, violence, or injury in extreme detail.",
}
response = openai.Moderation.create(input=question)
if response.results[0].flagged:
result = [
error
for category, error in errors.items()
if response.results[0].categories[category]
]
return result
return None
def doc_chat(vectordb, model_name, question, return_docs=True):
"""
Retrieve an answer to a user's question using a document-based chatbot.
Args:
vectordb (Chroma): A database containing document embeddings.
model_name (str): Name of the chatbot model.
question (str): The user's question.
return_docs (bool, optional): Whether to return source documents with the answer (default is True).
Returns:
str: The chatbot's answer to the user's question.
"""
llm = ChatOpenAI(model_name=model_name, temperature=0)
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
# QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], template=template)
# qa = RetrievalQA.from_chain_type(
# llm,
# retriever=vectordb.as_retriever(),
# return_source_documents=return_docs,
# chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
# )
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
retriever = vectordb.as_retriever()
qa = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
memory=memory
)
result = qa({"question": question})
return result["answer"]
| [
"Use the following pieces of context to answer the question at the end.\n If you don't know the answer, just say that you don't know, don't try to make up an answer.\n {context}\n Question: {question}\n Helpful Answer:"
] |
2024-01-10 | AdesholaAfolabi/tempo-ai-kb | slackgpt.py | import os
import slack
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from slackeventsapi import SlackEventAdapter
from dotenv import load_dotenv, find_dotenv
from flask import Flask, request, Response
import openai
from knowledge_base import doc_chat, load_embeddings
import threading
import requests
# Load environment variables from .env file
load_dotenv(find_dotenv())
# Set OpenAI API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Define document files and directory for document embeddings
files = ["Ultimate_introduction_white_paper.pdf", "Planner-complete-guide-to-resource-planning.pdf", "roadmunk_support.pdf"]
persist_directory = "docs/chroma/"
# Create or load a vector database for document embeddings
vectordb = load_embeddings(persist_directory, files)
# Initialize the Slack client
client = slack.WebClient(token=os.getenv("SLACK_API_TOKEN"))
BOT_ID = client.api_call("auth.test")['user_id']
# Create a Flask app
app = Flask(__name__)
# Initialize SlackEventAdapter
slack_event_adapter = SlackEventAdapter(os.getenv("SIGNING_SECRET"), '/slack/events', app)
def some_processing(question, channel_id):
"""function for doing the actual work in a thread"""
response = doc_chat(vectordb, model_name="gpt-3.5-turbo", question=str(question))
client.chat_postMessage(channel=channel_id, text=response)
#return Response(), 200
@app.route('/ai-helper', methods=['POST'])
def ai_helper():
"""
Respond to AI-related queries in Slack.
"""
data = request.form
#user_id = data.get('user_id')
channel_id = data.get('channel_id')
question = data.get('text')
# starting a new thread for doing the actual processing
x = threading.Thread(
target=some_processing,
args=(question,channel_id,)
)
x.start()
return "Processing information.... please wait"
if __name__ == '__main__':
app.run(host="127.0.0.1", port=80, debug=True)
| [] |
2024-01-10 | emon1432/exam_script_evaluation_system | latex2sympy~my_openai.py | import openai
# Set your OpenAI API key
openai.api_key = "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"
# Provide a prompt and get a response
prompt = "find x of 2x + 2 = 10"
response = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=5,
temperature=0.7,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n", "x="],
)
print(response)
| [
"find x of 2x + 2 = 10"
] |
2024-01-10 | Samwich1998/Stress-Analysis-Head | Helper%20Files~Machine%20Learning~Feedback%20Control~ChatGPT~controlChatGPT.py | # -------------------------------------------------------------------------- #
# ---------------------------- Imported Modules ---------------------------- #
# General
import os
import sys
# OpenAI
import openai
# Import Files for Machine Learning
sys.path.append(os.path.dirname(__file__) + "/Helper Files/")
import browserControl # Methods for controlling the web browser.
import imageModifications # Methods for working with and altering images.
# -------------------------------------------------------------------------- #
# ---------------------------- ChatGPT Interface --------------------------- #
class chatGPTController:
def __init__(self, userName = ""):
# General model parameters.
self.textEngine = "gpt-4-0613" # See text models at https://platform.openai.com/docs/models/gpt-4
self.userName =userName # A unique username for the client. Not used for personalization.
# Set up the OpenAI API client
openai.api_key = "".join("s k - k s OqP6JXLjX A Mqmi2G JlT3Blbk FJltwvFxs YE4A Mw8KA Lpu6".split(" "))
# Instantiate necesarry classes.
self.browserController = browserControl.browserControl()
# ---------------------------------------------------------------------- #
# -------------------------- General Methods --------------------------- #
def displayImage(self, response):
# Get the image URL.
image_url = self.getImageURL(response)
# Open the image URL with the webdriver.
self.browserController.open_url(image_url)
def printModels(self):
print(openai.Model.list())
def getImageURL(self, response):
return response['data'][0]['url']
# ---------------------------------------------------------------------- #
# ---------------------------- Text Methods ---------------------------- #
def getTextReponse(self, textPrompt):
# Generate a response
completion = openai.Completion.create(
engine=self.textEngine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
response = completion.choices[0].text
return response
# ---------------------------------------------------------------------- #
# --------------------------- Image Methods ---------------------------- #
def getImageResponse(self, textPrompt):
# Assert the proper data format.
assert len(textPrompt) <= 1000, f"The maximum length is 1000 characters for text. Given {len(textPrompt)} characters"
assert isinstance(textPrompt, str), f"Expecting the text prompt to be a string. Given type {type(textPrompt)}. Value: {textPrompt}"
# Interface with chatGPT API.
response = openai.Image.create(
response_format = "url", # The format in which the generated images are returned. Must be one of url or b64_json.
prompt=textPrompt, # A text description of the desired image(s). The maximum length is 1000 characters.
size="1024x1024", # The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
user = self.userName, # A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
n=1, # The number of images to generate. Must be between 1 and 10.
)
return response
def varyImageResponse(self, image, mask, textPrompt):
response = openai.Image.create_edit(
response_format = "url", # The format in which the generated images are returned. Must be one of url or b64_json.
user = self.userName, # A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
prompt=textPrompt, # A text description of the desired image(s). The maximum length is 1000 characters.
size="1024x1024", # The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
image=image, # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
mask=mask, # An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
n=1, # The number of images to generate. Must be between 1 and 10.
)
return response
# ---------------------------------------------------------------------- #
# -------------------------------------------------------------------------- #
if __name__ == "__main__":
# Instantiate class.
gptController = chatGPTController()
imageController = imageModifications.imageModifications(os.path.dirname(__file__) + "/_savedImages/")
prompts = [
"I have a state anxiety score of 60 out of 80, a postive affectivity score of 10 out of 25, and a negative affectivity score of 18. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.",
"After your last image, my STAI state anxiety (20 - 80) score went from 60 to 80 out of 80, my postive affectivity score went from 10 to 15 out of 25, and my negative affectivity score went from 18 to 20 out of 25. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.",
"After your last image, my STAI state anxiety (20 - 80) score went from 80 to 50 out of 80, my postive affectivity score went from 15 to 14 out of 25, and my negative affectivity score went from 20 to 15 out of 25. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.",
]
prompts = [
# "Generate a calming image of a realistic beautiful beach.",
# "Display a calming image of a realistic outdoor view of a snowy oasis on christmas night.",
"Display a calming image of a realistic indoor view of a japenese zen house with a firepit, a koi pond, and the jungle.",
]
# Flags for which programs to run.
displayPromptedImages = False
editPromptedImage = True
# ---------------------------------------------------------------------- #
# --------------------- Generate Images for Display -------------------- #
if displayPromptedImages:
# For each prompt.
for prompt in prompts:
# Ask chatGPT to generate an image response.
response = gptController.getImageResponse(prompt)
gptController.displayImage(response)
# ---------------------------------------------------------------------- #
# ------------------------ Edit a Prompted Image ----------------------- #
if editPromptedImage:
# Ask chatGPT to generate an image response.
initialResponse = gptController.getImageResponse(prompts[0])
gptController.displayImage(initialResponse)
# Get the image content from the URL.
image_url = gptController.getImageURL(initialResponse)
imageRGBA = imageController.pullDownWebImage(image_url) # Convert the the chatGPT image format.
# Make a mask for the image.
imageMaskRGBA = imageController.make_top_half_translucent(imageRGBA)
# imageMaskRGBA = imageController.remove_hex_color(imageRGBA, "#FFFFFF")
# imageMaskRGBA = imageController.remove_similar_colors(imageRGBA, "#FFFFFF", tolerance = 250)
# Conver the images into the correct chatGPT format.
imageMask = imageController.rbga2ByteArray(imageMaskRGBA)
imageByteArray = imageController.rbga2ByteArray(imageRGBA)
# Regenerate the image with the mask filled in.
finalResponse = gptController.varyImageResponse(imageByteArray, imageMask, prompts[0])
gptController.displayImage(finalResponse)
| [
"['Display a calming image of a realistic indoor view of a japenese zen house with a firepit, a koi pond, and the jungle.']",
"['I have a state anxiety score of 60 out of 80, a postive affectivity score of 10 out of 25, and a negative affectivity score of 18. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.', 'After your last image, my STAI state anxiety (20 - 80) score went from 60 to 80 out of 80, my postive affectivity score went from 10 to 15 out of 25, and my negative affectivity score went from 18 to 20 out of 25. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.', 'After your last image, my STAI state anxiety (20 - 80) score went from 80 to 50 out of 80, my postive affectivity score went from 15 to 14 out of 25, and my negative affectivity score went from 20 to 15 out of 25. Generate an image that will reduce my anxiety GIVEN the anxiety scores I have told you. For example, you can display a lovely mountain range that is peaceful and transquil, use your judgement.']",
"False",
"True"
] |
2024-01-10 | Lcmalagon/ChatbotAGB | .ipynb_checkpoints~testapp3-checkpoint.py | # import streamlit as st
# from langchain.embeddings.openai import OpenAIEmbeddings
# from langchain.chat_models import ChatOpenAI
# from langchain.chains import ConversationalRetrievalChain
# from langchain.document_loaders.csv_loader import CSVLoader
# from langchain.vectorstores import FAISS
# import tempfile
# import os
# # Set API key for OpenAI
# os.environ["OPENAI_API_KEY"] = "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"
# uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
# if uploaded_file:
# print("Uploaded file detected.")
# # Use tempfile because CSVLoader only accepts a file_path
# with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
# tmp_file.write(uploaded_file.getvalue())
# tmp_file_path = tmp_file.name
# print(f"Temporary file path: {tmp_file_path}")
# loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
# data = loader.load()
# print(f"Number of documents loaded: {len(data)}")
# else:
# # Handle the case when no file is uploaded
# data = None
# print("No file uploaded.")
# if data is not None:
# embeddings = OpenAIEmbeddings()
# print("Embeddings created.")
# vectorstore = FAISS.from_documents(data, embeddings)
# print("Vectorstore created.")
# chain = ConversationalRetrievalChain.from_llm(
# llm=ChatOpenAI(temperature=0.0, model_name='gpt-4-1106-preview'),
# retriever=vectorstore.as_retriever()
# )
# print("Conversational retrieval chain created.")
# else:
# vectorstore = None
# chain = None
# print("Data is None, skipping embeddings, vectorstore, and chain creation.")
# def conversational_chat(query):
# print(f"Received query: {query}")
# if chain is not None:
# result = chain({"question": query, "chat_history": st.session_state['history']})
# print(f"Result from chain: {result}")
# st.session_state['history'].append((query, result["answer"]))
# return result["answer"]
# else:
# print("Chain is None, returning default message.")
# return "Sorry, no data is available."
# if 'history' not in st.session_state:
# st.session_state['history'] = []
# print("Initialized 'history' in session state.")
# if 'generated' not in st.session_state:
# if uploaded_file is not None:
# file_name = uploaded_file.name
# print(f"Uploaded file name: {file_name}")
# else:
# file_name = "the data" # Default text when no file is uploaded
# print("No file uploaded, using default file name.")
# st.session_state['generated'] = ["Hello! Ask me anything about " + file_name + " 🤗"]
# if 'past' not in st.session_state:
# st.session_state['past'] = ["Hey! 👋"]
# response_container = st.container()
# container = st.container()
# with container:
# with st.form(key='my_form', clear_on_submit=True):
# user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
# submit_button = st.form_submit_button(label='Send')
# if submit_button and user_input:
# print(f"User input submitted: {user_input}")
# output = conversational_chat(user_input)
# print(f"Output generated: {output}")
# st.session_state['past'].append(user_input)
# st.session_state['generated'].append(output)
# # Display the chat history
# with response_container:
# for past_input, generated_output in zip(st.session_state['past'], st.session_state['generated']):
# st.text(past_input)
# st.text(generated_output)
import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
import os
# Set API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Chat AGB")
st.markdown("---")
# Description
st.markdown(
"""
<h5 style='text-align:center;'>I'm your Alumni Database Chatbot, powered by LangChain and Streamlit.
I leverage advanced language models to provide insightful answers from your alumni database.
Upload a CSV file and ask me questions about the alumni data.</h5>
""",
unsafe_allow_html=True)
st.markdown("---")
if uploaded_file:
print("Uploaded file detected.")
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
print(f"Temporary file path: {tmp_file_path}")
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(f"Number of documents loaded: {len(data)}")
else:
data = None
print("No file uploaded.")
if data is not None:
embeddings = OpenAIEmbeddings()
print("Embeddings created.")
vectorstore = FAISS.from_documents(data, embeddings)
print("Vectorstore created.")
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0.0, model_name='gpt-4'),
retriever=vectorstore.as_retriever()
)
print("Conversational retrieval chain created.")
else:
vectorstore = None
chain = None
print("Data is None, skipping embeddings, vectorstore, and chain creation.")
def conversational_chat(query):
print(f"Received query: {query}")
if chain is not None:
result = chain({"question": query, "chat_history": st.session_state['history']})
print(f"Result from chain: {result}")
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
else:
print("Chain is None, returning default message.")
return "Sorry, no data is available."
if 'history' not in st.session_state:
st.session_state['history'] = []
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
print(f"User input submitted: {user_input}")
output = conversational_chat(user_input)
print(f"Output generated: {output}")
# Append both user input and chatbot response to chat history
st.session_state['history'].append(("Question", user_input))
st.session_state['history'].append(("Answer", output))
# Display the chat history in the Streamlit UI
with response_container:
for role, message in st.session_state['history']:
if role == "Question":
st.markdown(f"**Question:** {message}")
elif role == "Answer":
st.markdown(f"**Answer:** {message}")
| [] |
2024-01-10 | Lcmalagon/ChatbotAGB | .ipynb_checkpoints~ChatAGBpub-checkpoint.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
import os
# Set API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Chat AGB")
st.markdown("---")
# Description
st.markdown(
"""
<h5 style='text-align:center;'>I'm your Alumni Database Chatbot, powered by LangChain and Streamlit.
I leverage advanced language models to provide insightful answers from your alumni database.
Upload a CSV file and ask me questions about the alumni data.</h5>
""",
unsafe_allow_html=True)
st.markdown("---")
if uploaded_file:
print("Uploaded file detected.")
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
print(f"Temporary file path: {tmp_file_path}")
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(f"Number of documents loaded: {len(data)}")
else:
data = None
print("No file uploaded.")
if data is not None:
embeddings = OpenAIEmbeddings()
print("Embeddings created.")
vectorstore = FAISS.from_documents(data, embeddings)
print("Vectorstore created.")
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo'),
retriever=vectorstore.as_retriever()
)
print("Conversational retrieval chain created.")
else:
vectorstore = None
chain = None
print("Data is None, skipping embeddings, vectorstore, and chain creation.")
def conversational_chat(query):
print(f"Received query: {query}")
if chain is not None:
result = chain({"question": query, "chat_history": st.session_state['history']})
print(f"Result from chain: {result}")
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
else:
print("Chain is None, returning default message.")
return "Sorry, no data is available."
if 'history' not in st.session_state:
st.session_state['history'] = []
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
print(f"User input submitted: {user_input}")
output = conversational_chat(user_input)
print(f"Output generated: {output}")
# Append both user input and chatbot response to chat history
st.session_state['history'].append(("Question", user_input))
st.session_state['history'].append(("Answer", output))
# Display the chat history in the Streamlit UI
with response_container:
for role, message in st.session_state['history']:
if role == "Question":
st.markdown(f"**Question:** {message}")
elif role == "Answer":
st.markdown(f"**Answer:** {message}")
| [] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGBpub%20(1)%20(1).py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
# Load the documents
loader = CSVLoader(file_path='Clean Data .csv')
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
# chain = RetrievalQA.from_chain_type(llm=OpenAI(model_name="gpt-4-1106-preview"), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
llm = ChatOpenAI(model_name="gpt-4-1106-preview")
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handling form submission
if st.session_state['chain']:
with st.form(key='question_form'):
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.form_submit_button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."}) | [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | testapp1.py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import time
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
saved_path = tmp_file.name
print(f"File saved successfully at {saved_path}")
return saved_path
except Exception as e:
st.error('Error saving uploaded file.')
print(f"Error saving file: {e}")
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
start_time = time.time()
print("Loading CSV...")
loader = CSVLoader(file_path=file_path)
load_time = time.time()
print(f"CSV Loaded in {load_time - start_time} seconds")
print("Creating Index...")
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
index_time = time.time()
print(f"Index Created in {index_time - load_time} seconds")
print("Creating Chain with GPT-4-1106-preview model...")
llm = ChatOpenAI(model_name="gpt-4-1106-preview")
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
chain_time = time.time()
print(f"Chain Created in {chain_time - index_time} seconds")
total_time = time.time()
print(f"Total load_and_index_data time: {total_time - start_time} seconds")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
print(f"Uploaded file saved at {saved_file_path}")
if "last_file_path" not in st.session_state or st.session_state.last_file_path != saved_file_path:
print("Initializing new chain with uploaded file...")
st.session_state.chain = load_and_index_data(saved_file_path)
st.session_state.last_file_path = saved_file_path
print("Chain initialized successfully.")
else:
print("Using existing chain.")
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handling form submission
if st.session_state.get('chain'):
with st.form(key='question_form'):
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.form_submit_button("Submit")
if submit_button and user_input:
print(f"User input: {user_input}")
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
print(f"Chatbot response: {response}")
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."})
| [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGBpub%20(3).py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
# Load the documents
loader = CSVLoader(file_path=file_path)
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
# chain = RetrievalQA.from_chain_type(llm=OpenAI(model_name="gpt-4-1106-preview"), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
llm = ChatOpenAI(model_name="gpt-4-1106-preview")
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handling form submission
if st.session_state['chain']:
with st.form(key='question_form'):
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.form_submit_button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."}) | [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGBpub%20(4).py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
@st.cache_resource
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
def load_and_index_data(file_path):
# Load the documents
loader = CSVLoader(file_path=file_path)
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
# chain = RetrievalQA.from_chain_type(llm=OpenAI(model_name="gpt-4-1106-preview"), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
llm = ChatOpenAI(model_name="gpt-4-1106-preview")
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
saved_file_path = None
if file_upload is not None and saved_file_path is None:
saved_file_path = save_uploaded_file(file_upload)
print(saved_file_path)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handling form submission
if st.session_state['chain']:
with st.form(key='question_form'):
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.form_submit_button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."}) | [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGBold.py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
# Load the documents from CSV
loader = CSVLoader(file_path=file_path)
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if st.session_state['chain']:
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."})
| [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | testapp3.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
import os
# Set API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
if uploaded_file:
print("Uploaded file detected.")
# Use tempfile because CSVLoader only accepts a file_path
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
print(f"Temporary file path: {tmp_file_path}")
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(f"Number of documents loaded: {len(data)}")
else:
# Handle the case when no file is uploaded
data = None
print("No file uploaded.")
if data is not None:
embeddings = OpenAIEmbeddings()
print("Embeddings created.")
vectorstore = FAISS.from_documents(data, embeddings)
print("Vectorstore created.")
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0.0, model_name='gpt-4-1106-preview'),
retriever=vectorstore.as_retriever()
)
print("Conversational retrieval chain created.")
else:
vectorstore = None
chain = None
print("Data is None, skipping embeddings, vectorstore, and chain creation.")
def conversational_chat(query):
print(f"Received query: {query}")
if chain is not None:
result = chain({"question": query, "chat_history": st.session_state['history']})
print(f"Result from chain: {result}")
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
else:
print("Chain is None, returning default message.")
return "Sorry, no data is available."
if 'history' not in st.session_state:
st.session_state['history'] = []
print("Initialized 'history' in session state.")
if 'generated' not in st.session_state:
if uploaded_file is not None:
file_name = uploaded_file.name
print(f"Uploaded file name: {file_name}")
else:
file_name = "the data" # Default text when no file is uploaded
print("No file uploaded, using default file name.")
st.session_state['generated'] = ["Hello! Ask me anything about " + file_name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! 👋"]
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
print(f"User input submitted: {user_input}")
output = conversational_chat(user_input)
print(f"Output generated: {output}")
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
# Display the chat history
with response_container:
for past_input, generated_output in zip(st.session_state['past'], st.session_state['generated']):
st.text(past_input)
st.text(generated_output)
| [] |
2024-01-10 | Lcmalagon/ChatbotAGB | testapp2.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
import os
# Set API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
if uploaded_file:
print("Uploaded file detected.")
# Use tempfile because CSVLoader only accepts a file_path
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
print(f"Temporary file path: {tmp_file_path}")
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(f"Number of documents loaded: {len(data)}")
else:
# Handle the case when no file is uploaded
data = None
print("No file uploaded.")
if data is not None:
embeddings = OpenAIEmbeddings()
print("Embeddings created.")
vectorstore = FAISS.from_documents(data, embeddings)
print("Vectorstore created.")
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0.0, model_name='gpt-4-1106-preview'),
retriever=vectorstore.as_retriever()
)
print("Conversational retrieval chain created.")
else:
vectorstore = None
chain = None
print("Data is None, skipping embeddings, vectorstore, and chain creation.")
def conversational_chat(query):
print(f"Received query: {query}")
if chain is not None:
result = chain({"question": query, "chat_history": st.session_state['history']})
print(f"Result from chain: {result}")
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
else:
print("Chain is None, returning default message.")
return "Sorry, no data is available."
if 'history' not in st.session_state:
st.session_state['history'] = []
print("Initialized 'history' in session state.")
if 'generated' not in st.session_state:
if uploaded_file is not None:
file_name = uploaded_file.name
print(f"Uploaded file name: {file_name}")
else:
file_name = "the data" # Default text when no file is uploaded
print("No file uploaded, using default file name.")
st.session_state['generated'] = ["Hello! Ask me anything about " + file_name + " 🤗"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! 👋"]
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
print(f"User input submitted: {user_input}")
output = conversational_chat(user_input)
print(f"Output generated: {output}")
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
| [] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGB1.py | import streamlit as st
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
# Load the documents from CSV
loader = CSVLoader(file_path=file_path)
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if st.session_state['chain']:
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."})
| [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | Lcmalagon/ChatbotAGB | ChatAGBpub.py | import streamlit as st
<<<<<<< HEAD
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import tempfile
# Set the API key for OpenAI
os.environ["OPENAI_API_KEY"] = "your chatgpt api here"
# Function to save the uploaded file to a temporary file
def save_uploaded_file(uploaded_file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
return tmp_file.name
except Exception as e:
st.error('Error saving uploaded file.')
return None
# Function to load and index the CSV data
@st.cache_resource
def load_and_index_data(file_path):
# Load the documents from CSV
loader = CSVLoader(file_path=file_path)
# Create an index using the loaded documents
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Create a question-answering chain using the index
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
return chain
# Initialize the chatbot chain in the session state
if "chain" not in st.session_state:
st.session_state['chain'] = None
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Alumni Database Chatbot")
file_upload = st.sidebar.file_uploader("Please Upload the Alumni CSV", type=['csv'])
# If a file is uploaded, save it and load/index the data
if file_upload is not None:
saved_file_path = save_uploaded_file(file_upload)
if saved_file_path:
st.session_state.chain = load_and_index_data(saved_file_path)
# Chat UI
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you with the alumni database?"}]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handling form submission
if st.session_state['chain']:
with st.form(key='question_form'):
user_input = st.text_input("Ask a question about the alumni:", key="user_input")
submit_button = st.form_submit_button("Submit")
if submit_button and user_input:
# Get the response from the chatbot
response = st.session_state.chain({"question": user_input})
# Use 'result' to access the answer
if 'result' in response:
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": response['result']})
else:
# Handle the case where 'result' is not in the response
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": "Sorry, I could not find an answer."})
=======
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
import os
# Set API key for OpenAI
os.environ["OPENAI_API_KEY"] = "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"
uploaded_file = st.sidebar.file_uploader("Upload", type="csv")
# Streamlit UI for uploading the CSV and initializing the chatbot
st.title("Chat AGB")
st.markdown("---")
# Description
st.markdown(
"""
<h5 style='text-align:center;'>I'm your Alumni Database Chatbot, powered by LangChain and Streamlit.
I leverage advanced language models to provide insightful answers from your alumni database.
Upload a CSV file and ask me questions about the alumni data.</h5>
""",
unsafe_allow_html=True)
st.markdown("---")
if uploaded_file:
print("Uploaded file detected.")
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
tmp_file_path = tmp_file.name
print(f"Temporary file path: {tmp_file_path}")
loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(f"Number of documents loaded: {len(data)}")
else:
data = None
print("No file uploaded.")
if data is not None:
embeddings = OpenAIEmbeddings()
print("Embeddings created.")
vectorstore = FAISS.from_documents(data, embeddings)
print("Vectorstore created.")
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0.0, model_name='gpt-3.5-turbo'),
retriever=vectorstore.as_retriever()
)
print("Conversational retrieval chain created.")
else:
vectorstore = None
chain = None
print("Data is None, skipping embeddings, vectorstore, and chain creation.")
def conversational_chat(query):
print(f"Received query: {query}")
if chain is not None:
result = chain({"question": query, "chat_history": st.session_state['history']})
print(f"Result from chain: {result}")
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
else:
print("Chain is None, returning default message.")
return "Sorry, no data is available."
if 'history' not in st.session_state:
st.session_state['history'] = []
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query:", placeholder="Ask questions about your alumni database here", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
print(f"User input submitted: {user_input}")
output = conversational_chat(user_input)
print(f"Output generated: {output}")
# Append both user input and chatbot response to chat history
st.session_state['history'].append(("Question", user_input))
st.session_state['history'].append(("Answer", output))
# Display the chat history in the Streamlit UI
with response_container:
for role, message in st.session_state['history']:
if role == "Question":
st.markdown(f"**Question:** {message}")
elif role == "Answer":
st.markdown(f"**Answer:** {message}")
>>>>>>> 4079ef4 (Initial commit)
| [
"How can I help you with the alumni database?",
"Sorry, I could not find an answer."
] |
2024-01-10 | frankplus/coronavirus-irc-bot | lib~apis.py | import requests
import datetime
from urllib.parse import urlparse, parse_qs, urlencode
from config import *
from lib.utils import json_request_get, http_request_get, http_request_post
import pypandoc
from bs4 import BeautifulSoup
import miniflux
import urllib
import emoji
import logging
import twitter
from openai import OpenAI
logging.getLogger().setLevel(logging.DEBUG)
class Chatbot:
def __init__(self):
self.client = OpenAI(api_key=CHATGPT_KEY)
def elaborate_query(self, conversation):
system_message = f"Sei un amico di nome {BOTNAME}. Le tue risposte sono brevi ma divertenti."
try:
response = self.client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": system_message}] + conversation
)
return response.choices[0].message.content
except Exception as e:
logging.error(f"Failed to send request to chatgpt: {e}")
class Miniflux:
def __init__(self):
self.client = miniflux.Client(MINIFLUX_URL, MINIFLUX_USER, MINIFLUX_PSW)
def get_new_entries(self, limit = 1):
try:
entries = self.client.get_entries(status="unread", limit=limit)["entries"]
except miniflux.ClientError as err:
logging.error("miniflux client error: {}".format(err.get_error_reason()))
return None
except:
logging.exception("Unexpected error getting RSS entries")
return None
response = ""
for entry in entries:
try:
publish_date = datetime.datetime.strptime(entry["published_at"], "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
publish_date = datetime.datetime.strptime(entry["published_at"], "%Y-%m-%dT%H:%M:%SZ")
publish_date = publish_date.strftime("%Y-%m-%d")
response += "\x0303[miniflux]\x03 {} {} on {} \x02→\x02 {} \n".format(entry["url"], entry["author"], publish_date, entry["title"])
# mark entries as read
if entries:
entry_ids = [entry["id"] for entry in entries]
self.client.update_entries(entry_ids, status="read")
return response
def get_latest_news(query = None):
if query:
url = 'http://newsapi.org/v2/everything?q={}&sortBy=relevancy&apiKey={}'.format(query, NEWSAPI_KEY)
else:
url = 'http://newsapi.org/v2/top-headlines?country=it&sortBy=publishedAt&apiKey={}'.format(NEWSAPI_KEY)
data = json_request_get(url)
if not data:
return None
if data["status"] == "ok" and data["totalResults"] > 0:
article = data["articles"][0]
description = article["description"].replace('\n', ' ')
info = '{} - {}'.format(article["url"], description)
return info
else:
return "I haven't found anything"
def get_weather(location):
url = 'http://api.openweathermap.org/data/2.5/forecast?q={}&units=metric&appid={}'.format(location, OPENWEATHER_KEY)
data = json_request_get(url)
if not data:
return None
if data["cod"] == '200':
name = data["city"]["name"]
today = data["list"][0]
response = "Weather for {} is {}, the temperature is around {}°C. " \
.format(name, today["weather"][0]["description"], today["main"]["temp"])
for day in data["list"]:
date = datetime.date.today() + datetime.timedelta(days=1)
if day["dt_txt"] == date.strftime("%Y-%m-%d 12:00:00"):
response = response + "Tomorrow at 12:00 will be {}, the temperature will be around {}°C." \
.format(day["weather"][0]["description"], day["main"]["temp"])
else:
response = data["message"]
return response
def get_youtube_videoinfo(item):
title = item["snippet"]["title"]
channel = item["snippet"]["channelTitle"]
description = item["snippet"]["description"]
description = description[:150] if len(description) > 150 else description
description = description.replace('\n', ' ')
publish_date = item["snippet"]["publishedAt"]
try:
publish_date = datetime.datetime.strptime(item["snippet"]["publishedAt"], "%Y-%m-%dT%H:%M:%S.%f%z")
except :
publish_date = datetime.datetime.strptime(item["snippet"]["publishedAt"], "%Y-%m-%dT%H:%M:%S%z")
publish_date = publish_date.strftime("%b %d %Y")
return "{} on {} \x02{} →\x02 {}".format(channel, publish_date, title, description)
def get_youtube_description(query):
parsed_url = urlparse(query)
url_queries = parse_qs(parsed_url.query)
if "v" in url_queries:
video_id = url_queries["v"][0]
url = 'https://www.googleapis.com/youtube/v3/videos?part=id%2C+snippet&id={}&key={}'.format(video_id, YOUTUBE_KEY)
data = json_request_get(url)
if not data:
return None
items = data["items"]
if len(items) > 0:
info = get_youtube_videoinfo(items[0])
return "\x0303[youtube]\x03 {}".format(info)
def search_youtube_video(query, music=False):
q = {'part':'snippet', 'maxResults': 1, 'type':'video', 'q': query, 'key': YOUTUBE_KEY}
if music:
q['videoCategoryId'] = 10
url = "https://www.googleapis.com/youtube/v3/search?"+urlencode(q)
data = json_request_get(url)
if not data:
return None
items = data["items"]
if len(items) > 0:
item = items[0]
video_id = item["id"]["videoId"]
url = "https://www.youtube.com/watch?v={}".format(video_id)
info = get_youtube_videoinfo(item)
return "\x0303[youtube]\x03 {} {}".format(url, info)
return "I haven't found anything"
def search_image(query):
q = {'num': 1, 'searchType':'image', 'q': query, 'key': YOUTUBE_KEY, 'cx': SEARCH_ENGINE}
url = "https://www.googleapis.com/customsearch/v1?"+urlencode(q)
data = json_request_get(url)
if not data:
return None
items = data["items"]
if len(items) > 0:
item = items[0]
url = item['link']
title = item['title']
return f'\x0303[image]\x03 {title} → {url}'
return "I haven't found anything"
def url_meta(url):
resp = http_request_get(url)
if not resp:
return None
soup = BeautifulSoup(resp.text, 'lxml')
meta = ""
title = soup.title
if title:
title = title.text.strip().replace('\n', ' ')
meta += f'\x0303<title>\x03 {title} \n'
description = soup.find('meta', {'name':'description'})
if not description:
return meta
description = description.get('content')
if not description:
return meta
description = description[:200].strip().replace('\n', ' ')
meta += f'\x0303<description>\x03 {description} \n'
return meta
def get_url_info(url):
response = get_youtube_description(url)
if response:
return response
response = url_meta(url)
if response:
return response
def latex_to_png(formula):
formula = "\\bg_ffffff {}".format(formula)
r = requests.get( 'http://latex.codecogs.com/png.latex?\dpi{{300}} {formula}'.format(formula=formula))
return r.url
def latex_to_text(formula):
latex = '${}$'.format(formula)
try:
return pypandoc.convert_text(latex, 'plain', format='latex')
except:
return None
def wolfram_req(query):
url = f'https://api.wolframalpha.com/v1/result?i={query}&appid={WOLFRAM_KEY}'
resp = http_request_get(url)
if resp:
return resp.text.replace('\n', '. ')
def plot_function(query):
q = {'q': query}
q = urllib.parse.urlencode(q)
return f'https://frankplus.github.io/plasm?{q}'
def emojize(query):
return emoji.emojize(query, use_aliases=True)
def tweet(message):
api = twitter.Api(consumer_key=TWITTER_CONSUMER_KEY,
consumer_secret=TWITTER_CONSUMER_SECRET,
access_token_key=TWITTER_ACCESS_TOKEN_KEY,
access_token_secret=TWITTER_ACCESS_TOKEN_SECRET,
input_encoding='utf-8')
try:
status = api.PostUpdate(message)
logging.info(status)
link = f"https://twitter.com/{status.user.screen_name}/status/{status.id}"
return f"Message tweeted! {link}"
except:
logging.exception("Could not send tweet message")
return "Error sending tweet"
def fortune():
return json_request_get("http://yerkee.com/api/fortune")['fortune']
| [
"content"
] |
2024-01-10 | CiscoSE/ciscodocs-gpt3 | utils~gpt3utils.py | import json
import utils
import openai
def getApiKey(config_location):
try:
with open(config_location, "r") as file:
config = json.loads(file.read())
return config["api_key"]
except:
return None
def uploadFile(file, api_key):
try:
openai.api_key = api_key
response = openai.File.create(file = open(file, encoding="utf8"), purpose="answers")
return response["id"]
except:
return "Upload failed!"
def listFiles(api_key):
openai.api_key = api_key
return openai.File.list()
def deleteFile(file_id, api_key):
try:
openai.api_key = api_key
response = openai.File(file_id).delete()
return response
except:
return "File deletion failed!"
def deleteAllFiles(api_key):
openai.api_key = api_key
data = listFiles(api_key)
for file in data["data"]:
deleteFile(file["id"], api_key)
return "All files deleted"
def answerQuestion(question_text, file_id, api_key):
try:
openai.api_key = api_key
response = openai.Answer.create(
search_model = "ada",
model = "davinci",
question = question_text,
file = file_id,
examples = [
["What is the latest version of DNA Center?", "The latest version is 2.1.2.4"],
["Please describe SWIM.", "SWIM is a feature that manages software upgrades and controls the consistency of image versions and configurations across your network."]
],
examples_context = "Cisco DNA Center’s latest release, 2.1.2.4, is a major update to this solution, with enhancements that greatly facilitate SDA policy and segmentation, identification of network endpoints, Wi-Fi 6 upgrades, power-over-Ethernet (PoE) management, and security and ITSM integrations.",
max_tokens = 100,
max_rerank = 300
)
answer_text = response["answers"][0]
return answer_text
except:
return "I do not know how to answer that." | [] |
2024-01-10 | sungmin9939/transformers | src~transformers~models~openai~modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import gelu_new, silu
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
from ...modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from ...utils import logging
from .configuration_openai import OpenAIGPTConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "openai-gpt"
_CONFIG_FOR_DOC = "OpenAIGPTConfig"
_TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer"
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
"""Load tf pre-trained weights in a pytorch model (from NumPy arrays here)"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info(f"Loading weights from {openai_checkpoint_folder_path}")
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
# Check that the token and position embeddings weight dimensions map those of the init parameters.
if model.tokens_embed.weight.shape != init_params[1].shape:
raise ValueError(
f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape: {init_params[1].shape}"
)
if model.positions_embed.weight.shape != init_params[0].shape:
raise ValueError(
f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape: {init_params[0].shape}"
)
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
if name[-2:] != ":0":
raise ValueError(f"Layer {name} does not end with :0")
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
# Ensure that the pointer and array have compatible shapes.
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "silu": silu, "gelu": gelu_new, "swish": silu}
class Attention(nn.Module):
def __init__(self, nx, n_positions, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implementation]
if n_state % config.n_head != 0:
raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}")
self.register_buffer(
"bias", torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions)
)
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.functional.softmax(w, dim=-1)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implementation: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implementation: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_positions, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_positions, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
attn_outputs = self.attn(
x,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = OpenAIGPTConfig
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
Multiple choice classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
OPENAI_GPT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`OpenAIGPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
self.register_buffer("position_ids", torch.arange(config.n_positions))
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrix from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss,
logits=lm_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1]`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
Return:
Examples:
```python
>>> from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
>>> import torch
>>> tokenizer = OpenAIGPTTokenizer.from_pretrained("openai-gpt")
>>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-gpt")
>>> tokenizer.add_special_tokens(
... {"cls_token": "[CLS]"}
>>> ) # Add a [CLS] to the vocabulary (we should train it also!)
>>> model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.lm_logits
>>> mc_logits = outputs.mc_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
lm_loss, mc_loss = None, None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return OpenAIGPTDoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
[`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
the last value in each row of the batch).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = OpenAIGPTModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
# Ensure the batch size is > 1 if there is no padding.
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [] |
2024-01-10 | PKUinfo/PKUinfo | pyprocessor~GPTProcessor.py | import os
import openai
import json
from keys import openai_APIKEY, openai_BASE
class GPTProcessor:
def __init__(self):
openai.api_key = openai_APIKEY
openai.api_base = openai_BASE
self.id = 0
def ask_chatgpt(self, my_messages): #可以做异步,和GPT连接请求发出后,在全部完成前可以切换到其它线程
ans = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=my_messages,
).choices[0].message.content
return ans
def txt2json(self, text):
result = self.ask_chatgpt(text)
return result
def first_guess(self,title,text):
my_messages = [
{"role": "system", "content": "根据下面这个标题和部分文字,你觉得是否可能是活动的预告,请直接告诉我答案为'是'或'否'。请不要回复任何其它内容。请回复一个字,不要回复多个字。如果你觉得不确定,也请回复'是'。"},
{"role": "user", "content": title + '\n' + text[:20]},
]
ans = self.ask_chatgpt(my_messages)
print(ans)
if ans == '否':
return False
return True
def Text_to_JSON(self, text):
my_messages = [
{"role": "system", "content": "Now that you are engaged in professional text processing, please let me know if the push below is a preview of an event (such as a lecture or ticket collection) (而不是活动总结或者其它内容). If so, please use JSON format to tell me the 'event_name', 'event_time', 'location', 'organizational_unit', and 'event_summary'. 时间尽可能规范到特定的时刻,如果有多个时间请一并输出,如没有则以None存在,推送发布时间为2023年,如果有多个活动,请返回list,如果没有则返回空list。Otherwise, tell me the activity category it belongs to."},
{"role": "user", "content": text},
]
infolist = self.ask_chatgpt(my_messages)
infolist = infolist.replace('\n', ' ')
try:
print(infolist)
json_list = json.loads(infolist)
except:
print('Error: json.loads error')
return []
if type(json_list) != list:
json_list = [json_list]
should_exist = ['event_name', 'event_time', 'location', 'organizational_unit', 'event_summary']
result_list = []
for info in json_list:
all_exist = True
for key in should_exist:
if key not in info:
print('Error: ', key, ' not exist')
all_exist = False
break
if not all_exist:
continue
ask_time_message = [
{"role": "system", "content": "请你告诉我这个活动最早的时间,请以 XXXX-XX-XX XX:XX 的格式告诉我。请注意最早的时间只会存在一个,请直接告诉我这个格式不要解释任何理由和补充,如果没有特定的时间,截止时间请告诉我为24:00,开始时间请告诉我00:00,如果未能提供请告诉我None"},
{"role": "user", "content": str(info)},
]
start_time = self.ask_chatgpt(ask_time_message)
if len(start_time) != 16 or start_time[4] != '-' or start_time[7] != '-' or start_time[10] != ' ' or start_time[13] != ':' or not start_time[:4].isdigit() or not start_time[5:7].isdigit() or not start_time[8:10].isdigit() or not start_time[11:13].isdigit() or not start_time[14:16].isdigit():
print('Error: start_time format error')
continue
json_data = {
'event_name': info.get('event_name', None),
'data': start_time[:10],
'time': start_time[11:16] + ':00',
'event_time': str(info.get('event_time', None)),
'location': info.get('location', None),
'organizational_unit': info.get('organizational_unit', None),
'event_summary': info.get('event_summary', None),
}
result_list.append(json_data)
return result_list
def process_dir(self, current_dir):
for filename in os.listdir(current_dir):
try:
file_path = os.path.join(current_dir, filename)
if os.path.isdir(file_path):
self.process_dir(file_path)
elif filename.endswith('.txt'):
with open(file_path,encoding='utf-8') as f:
text = f.read()
json_data = self.Text_to_JSON(text)
except:
print('Error processing file: ', filename)
continue
print('Done!')
if __name__ == '__main__':
test_text = '''8999'''
processor = GPTProcessor()
print ( processor.Text_to_JSON(test_text) ) | [
"根据下面这个标题和部分文字,你觉得是否可能是活动的预告,请直接告诉我答案为'是'或'否'。请不要回复任何其它内容。请回复一个字,不要回复多个字。如果你觉得不确定,也请回复'是'。",
"PLACEHOLDER\nPLACEHOLDER",
"Now that you are engaged in professional text processing, please let me know if the push below is a preview of an event (such as a lecture or ticket collection) (而不是活动总结或者其它内容). If so, please use JSON format to tell me the 'event_name', 'event_time', 'location', 'organizational_unit', and 'event_summary'. 时间尽可能规范到特定的时刻,如果有多个时间请一并输出,如没有则以None存在,推送发布时间为2023年,如果有多个活动,请返回list,如果没有则返回空list。Otherwise, tell me the activity category it belongs to.",
"请你告诉我这个活动最早的时间,请以 XXXX-XX-XX XX:XX 的格式告诉我。请注意最早的时间只会存在一个,请直接告诉我这个格式不要解释任何理由和补充,如果没有特定的时间,截止时间请告诉我为24:00,开始时间请告诉我00:00,如果未能提供请告诉我None"
] |
2024-01-10 | Charles-Gormley/ai-economist-trial | tutorials~rllib~env_wrapper.py | # Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
"""
Wrapper for making the gather-trade-build environment an OpenAI compatible environment.
This can then be used with reinforcement learning frameworks such as RLlib.
"""
import os
import pickle
import random
import warnings
import numpy as np
from ai_economist import foundation
from gym import spaces
from gym.utils import seeding
from ray.rllib.env.multi_agent_env import MultiAgentEnv
_BIG_NUMBER = 1e20
def recursive_list_to_np_array(d):
if isinstance(d, dict):
new_d = {}
for k, v in d.items():
if isinstance(v, list):
new_d[k] = np.array(v)
elif isinstance(v, dict):
new_d[k] = recursive_list_to_np_array(v)
elif isinstance(v, (float, int, np.floating, np.integer)):
new_d[k] = np.array([v])
elif isinstance(v, np.ndarray):
new_d[k] = v
else:
raise AssertionError
return new_d
raise AssertionError
def pretty_print(dictionary):
for key in dictionary:
print("{:15s}: {}".format(key, dictionary[key].shape))
print("\n")
class RLlibEnvWrapper(MultiAgentEnv):
"""
Environment wrapper for RLlib. It sub-classes MultiAgentEnv.
This wrapper adds the action and observation space to the environment,
and adapts the reset and step functions to run with RLlib.
"""
def __init__(self, env_config, verbose=False):
self.env_config_dict = env_config["env_config_dict"]
# Adding env id in the case of multiple environments
if hasattr(env_config, "worker_index"):
self.env_id = (
env_config["num_envs_per_worker"] * (env_config.worker_index - 1)
) + env_config.vector_index
else:
self.env_id = None
self.env = foundation.make_env_instance(**self.env_config_dict)
self.verbose = verbose
self.sample_agent_idx = str(self.env.all_agents[0].idx)
obs = self.env.reset()
self.observation_space = self._dict_to_spaces_dict(obs["0"])
self.observation_space_pl = self._dict_to_spaces_dict(obs["p"])
if self.env.world.agents[0].multi_action_mode:
self.action_space = spaces.MultiDiscrete(
self.env.get_agent(self.sample_agent_idx).action_spaces
)
self.action_space.dtype = np.int64
self.action_space.nvec = self.action_space.nvec.astype(np.int64)
else:
self.action_space = spaces.Discrete(
self.env.get_agent(self.sample_agent_idx).action_spaces
)
self.action_space.dtype = np.int64
if self.env.world.planner.multi_action_mode:
self.action_space_pl = spaces.MultiDiscrete(
self.env.get_agent("p").action_spaces
)
self.action_space_pl.dtype = np.int64
self.action_space_pl.nvec = self.action_space_pl.nvec.astype(np.int64)
else:
self.action_space_pl = spaces.Discrete(
self.env.get_agent("p").action_spaces
)
self.action_space_pl.dtype = np.int64
self._seed = None
if self.verbose:
print("[EnvWrapper] Spaces")
print("[EnvWrapper] Obs (a) ")
pretty_print(self.observation_space)
print("[EnvWrapper] Obs (p) ")
pretty_print(self.observation_space_pl)
print("[EnvWrapper] Action (a)", self.action_space)
print("[EnvWrapper] Action (p)", self.action_space_pl)
def _dict_to_spaces_dict(self, obs):
dict_of_spaces = {}
for k, v in obs.items():
# list of lists are listified np arrays
_v = v
if isinstance(v, list):
_v = np.array(v)
elif isinstance(v, (int, float, np.floating, np.integer)):
_v = np.array([v])
# assign Space
if isinstance(_v, np.ndarray):
x = float(_BIG_NUMBER)
# Warnings for extreme values
if np.max(_v) > x:
warnings.warn("Input is too large!")
if np.min(_v) < -x:
warnings.warn("Input is too small!")
box = spaces.Box(low=-x, high=x, shape=_v.shape, dtype=_v.dtype)
low_high_valid = (box.low < 0).all() and (box.high > 0).all()
# This loop avoids issues with overflow to make sure low/high are good.
while not low_high_valid:
x = x // 2
box = spaces.Box(low=-x, high=x, shape=_v.shape, dtype=_v.dtype)
low_high_valid = (box.low < 0).all() and (box.high > 0).all()
dict_of_spaces[k] = box
elif isinstance(_v, dict):
dict_of_spaces[k] = self._dict_to_spaces_dict(_v)
else:
raise TypeError
return spaces.Dict(dict_of_spaces)
@property
def pickle_file(self):
if self.env_id is None:
return "game_object.pkl"
return "game_object_{:03d}.pkl".format(self.env_id)
def save_game_object(self, save_dir):
assert os.path.isdir(save_dir)
path = os.path.join(save_dir, self.pickle_file)
with open(path, "wb") as F:
pickle.dump(self.env, F)
def load_game_object(self, save_dir):
assert os.path.isdir(save_dir)
path = os.path.join(save_dir, self.pickle_file)
with open(path, "rb") as F:
self.env = pickle.load(F)
@property
def n_agents(self):
return self.env.n_agents
@property
def summary(self):
last_completion_metrics = self.env.previous_episode_metrics
if last_completion_metrics is None:
return {}
last_completion_metrics["completions"] = int(self.env._completions)
return last_completion_metrics
def get_seed(self):
return int(self._seed)
def seed(self, seed):
# Using the seeding utility from OpenAI Gym
# https://github.com/openai/gym/blob/master/gym/utils/seeding.py
_, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as an uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
if self.verbose:
print(
"[EnvWrapper] twisting seed {} -> {} -> {} (final)".format(
seed, seed1, seed2
)
)
seed = int(seed2)
np.random.seed(seed2)
random.seed(seed2)
self._seed = seed2
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
return recursive_list_to_np_array(obs)
def step(self, action_dict):
obs, rew, done, info = self.env.step(action_dict)
assert isinstance(obs[self.sample_agent_idx]["action_mask"], np.ndarray)
return recursive_list_to_np_array(obs), rew, done, info
| [] |
2024-01-10 | Argandov/slack-gpt-bot | functions.py | from langchain.chat_models import ChatOpenAI
import os
from langchain.chains import LLMChain
from dotenv import find_dotenv, load_dotenv
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
os.environ["openai_api_key"] = OPENAI_API_KEY
def _respond(user_input):
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=1)
# CHANGE THIS:
template = """
You're (name), a personal assistant.
Your goal is to provide clear and concise advice on diverse topics (etc).
Use simple language and avoid technical jargon, unless explicitly asked for by the user.
Be responsive and helpful to users.
Make sure to sign off with {signature}.
"""
signature = "Best, your personal bot" # CHANGE THIS
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "Here's what the user is asking you: {user_input}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(llm=chat, prompt=chat_prompt)
response = chain.run(user_input=user_input, signature=signature)
return response
| [
"[PLACEHOLDER, PLACEHOLDER]",
"\n \n You're (name), a personal assistant.\n Your goal is to provide clear and concise advice on diverse topics (etc). \n Use simple language and avoid technical jargon, unless explicitly asked for by the user.\n Be responsive and helpful to users.\n Make sure to sign off with {signature}.\n \n ",
"Here's what the user is asking you: {user_input}"
] |
2024-01-10 | jairodriguez/chatgpt-memory | rest_api.py | from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel
from chatgpt_memory.datastore import RedisDataStore, RedisDataStoreConfig
from chatgpt_memory.environment import OPENAI_API_KEY, REDIS_HOST, REDIS_PASSWORD, REDIS_PORT
from chatgpt_memory.llm_client import ChatGPTClient, ChatGPTConfig, ChatGPTResponse, EmbeddingClient, EmbeddingConfig
from chatgpt_memory.memory import MemoryManager
# Instantiate an EmbeddingConfig object with the OpenAI API key
embedding_config = EmbeddingConfig(api_key=OPENAI_API_KEY)
# Instantiate an EmbeddingClient object with the EmbeddingConfig object
embed_client = EmbeddingClient(config=embedding_config)
# Instantiate a RedisDataStoreConfig object with the Redis connection details
redis_datastore_config = RedisDataStoreConfig(
host=REDIS_HOST,
port=REDIS_PORT,
password=REDIS_PASSWORD,
)
# Instantiate a RedisDataStore object with the RedisDataStoreConfig object
redis_datastore = RedisDataStore(config=redis_datastore_config)
# Instantiate a MemoryManager object with the RedisDataStore object and EmbeddingClient object
memory_manager = MemoryManager(datastore=redis_datastore, embed_client=embed_client, topk=1)
# Instantiate a ChatGPTConfig object with the OpenAI API key and verbose set to True
chat_gpt_config = ChatGPTConfig(api_key=OPENAI_API_KEY, verbose=False)
# Instantiate a ChatGPTClient object with the ChatGPTConfig object and MemoryManager object
chat_gpt_client = ChatGPTClient(config=chat_gpt_config, memory_manager=memory_manager)
class MessagePayload(BaseModel):
conversation_id: Optional[str]
message: str
app = FastAPI()
@app.post("/converse/")
async def create_item(message_payload: MessagePayload) -> ChatGPTResponse:
response = chat_gpt_client.converse(**message_payload.dict())
return response
| [] |
2024-01-10 | dnddnjs/mujoco-pg | npg.py | import numpy as np
from utils import *
from hparams import HyperParams as hp
def get_returns(rewards, masks):
rewards = torch.Tensor(rewards)
masks = torch.Tensor(masks)
returns = torch.zeros_like(rewards)
running_returns = 0
for t in reversed(range(0, len(rewards))):
running_returns = rewards[t] + hp.gamma * running_returns * masks[t]
returns[t] = running_returns
returns = (returns - returns.mean()) / returns.std()
return returns
def get_loss(actor, returns, states, actions):
mu, std, logstd = actor(torch.Tensor(states))
log_policy = log_density(torch.Tensor(actions), mu, std, logstd)
returns = returns.unsqueeze(1)
objective = returns * log_policy
objective = objective.mean()
return objective
def train_critic(critic, states, returns, critic_optim):
criterion = torch.nn.MSELoss()
n = len(states)
arr = np.arange(n)
for epoch in range(5):
np.random.shuffle(arr)
for i in range(n // hp.batch_size):
batch_index = arr[hp.batch_size * i: hp.batch_size * (i + 1)]
batch_index = torch.LongTensor(batch_index)
inputs = torch.Tensor(states)[batch_index]
target = returns.unsqueeze(1)[batch_index]
values = critic(inputs)
loss = criterion(values, target)
critic_optim.zero_grad()
loss.backward()
critic_optim.step()
def fisher_vector_product(actor, states, p):
p.detach()
kl = kl_divergence(new_actor=actor, old_actor=actor, states=states)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True)
kl_grad = flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters())
kl_hessian_p = flat_hessian(kl_hessian_p)
return kl_hessian_p + 0.1 * p
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(actor, states, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = fisher_vector_product(actor, states, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def train_model(actor, critic, memory, actor_optim, critic_optim):
memory = np.array(memory)
states = np.vstack(memory[:, 0])
actions = list(memory[:, 1])
rewards = list(memory[:, 2])
masks = list(memory[:, 3])
# ----------------------------
# step 1: get returns
returns = get_returns(rewards, masks)
# ----------------------------
# step 2: train critic several steps with respect to returns
train_critic(critic, states, returns, critic_optim)
# ----------------------------
# step 3: get gradient of loss and hessian of kl
loss = get_loss(actor, returns, states, actions)
loss_grad = torch.autograd.grad(loss, actor.parameters())
loss_grad = flat_grad(loss_grad)
step_dir = conjugate_gradient(actor, states, loss_grad.data, nsteps=10)
# ----------------------------
# step 4: get step direction and step size and update actor
params = flat_params(actor)
new_params = params + 0.5 * step_dir
update_model(actor, new_params)
| [] |
2024-01-10 | dnddnjs/mujoco-pg | trpo.py | import numpy as np
from utils import *
from hparams import HyperParams as hp
from model import Actor
def get_gae(rewards, masks, values):
rewards = torch.Tensor(rewards)
masks = torch.Tensor(masks)
returns = torch.zeros_like(rewards)
advants = torch.zeros_like(rewards)
running_returns = 0
previous_value = 0
running_advants = 0
for t in reversed(range(0, len(rewards))):
running_returns = rewards[t] + hp.gamma * running_returns * masks[t]
running_tderror = rewards[t] + hp.gamma * previous_value * masks[t] - \
values.data[t]
running_advants = running_tderror + hp.gamma * hp.lamda * \
running_advants * masks[t]
returns[t] = running_returns
previous_value = values.data[t]
advants[t] = running_advants
advants = (advants - advants.mean()) / advants.std()
return returns, advants
def surrogate_loss(actor, advants, states, old_policy, actions):
mu, std, logstd = actor(torch.Tensor(states))
new_policy = log_density(torch.Tensor(actions), mu, std, logstd)
advants = advants.unsqueeze(1)
surrogate = advants * torch.exp(new_policy - old_policy)
surrogate = surrogate.mean()
return surrogate
def train_critic(critic, states, returns, advants, critic_optim):
criterion = torch.nn.MSELoss()
n = len(states)
arr = np.arange(n)
for epoch in range(5):
np.random.shuffle(arr)
for i in range(n // hp.batch_size):
batch_index = arr[hp.batch_size * i: hp.batch_size * (i + 1)]
batch_index = torch.LongTensor(batch_index)
inputs = torch.Tensor(states)[batch_index]
target1 = returns.unsqueeze(1)[batch_index]
target2 = advants.unsqueeze(1)[batch_index]
values = critic(inputs)
loss = criterion(values, target1 + target2)
critic_optim.zero_grad()
loss.backward()
critic_optim.step()
def fisher_vector_product(actor, states, p):
p.detach()
kl = kl_divergence(new_actor=actor, old_actor=actor, states=states)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True)
kl_grad = flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters())
kl_hessian_p = flat_hessian(kl_hessian_p)
return kl_hessian_p + 0.1 * p
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(actor, states, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = fisher_vector_product(actor, states, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def train_model(actor, critic, memory, actor_optim, critic_optim):
memory = np.array(memory)
states = np.vstack(memory[:, 0])
actions = list(memory[:, 1])
rewards = list(memory[:, 2])
masks = list(memory[:, 3])
values = critic(torch.Tensor(states))
# ----------------------------
# step 1: get returns and GAEs
returns, advants = get_gae(rewards, masks, values)
# ----------------------------
# step 2: train critic several steps with respect to returns
train_critic(critic, states, returns, advants, critic_optim)
# ----------------------------
# step 3: get gradient of loss and hessian of kl
mu, std, logstd = actor(torch.Tensor(states))
old_policy = log_density(torch.Tensor(actions), mu, std, logstd)
loss = surrogate_loss(actor, advants, states, old_policy.detach(), actions)
loss_grad = torch.autograd.grad(loss, actor.parameters())
loss_grad = flat_grad(loss_grad)
step_dir = conjugate_gradient(actor, states, loss_grad.data, nsteps=10)
loss = loss.data.numpy()
# ----------------------------
# step 4: get step direction and step size and full step
params = flat_params(actor)
shs = 0.5 * (step_dir * fisher_vector_product(actor, states, step_dir)
).sum(0, keepdim=True)
step_size = 1 / torch.sqrt(shs / hp.max_kl)[0]
full_step = step_size * step_dir
# ----------------------------
# step 5: do backtracking line search for n times
old_actor = Actor(actor.num_inputs, actor.num_outputs)
update_model(old_actor, params)
expected_improve = (loss_grad * full_step).sum(0, keepdim=True)
expected_improve = expected_improve.data.numpy()
flag = False
fraction = 1.0
for i in range(10):
new_params = params + fraction * full_step
update_model(actor, new_params)
new_loss = surrogate_loss(actor, advants, states, old_policy.detach(),
actions)
new_loss = new_loss.data.numpy()
loss_improve = new_loss - loss
expected_improve *= fraction
kl = kl_divergence(new_actor=actor, old_actor=old_actor, states=states)
kl = kl.mean()
print('kl: {:.4f} loss improve: {:.4f} expected improve: {:.4f} '
'number of line search: {}'
.format(kl.data.numpy(), loss_improve, expected_improve[0], i))
# see https: // en.wikipedia.org / wiki / Backtracking_line_search
if kl < hp.max_kl and (loss_improve / expected_improve) > 0.5:
flag = True
break
fraction *= 0.5
if not flag:
params = flat_params(old_actor)
update_model(actor, params)
print('policy update does not impove the surrogate')
| [] |
2024-01-10 | Vaibhav67979/Virtual-Assistant | integ.py | import openai
import gradio as gr
openai.api_key = "hidden-open-ai-api-key"
messages = [
{"role": "system", "content": "You are a helpful and kind AI Assistant."},
]
def chatbot(input):
if input:
messages.append({"role": "user", "content": input})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
inputs = gr.inputs.Textbox(lines=7, label="Chat with AI")
outputs = gr.outputs.Textbox(label="Reply")
gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, title="AI Chatbot",
description="Ask anything you want",
theme="compact").launch(share=True)
| [
"INPUT",
"You are a helpful and kind AI Assistant."
] |
2024-01-10 | nevermoreluo/sentry | src~sentry~api~endpoints~event_ai_suggested_fix.py | import logging
import random
import openai
from django.conf import settings
from django.dispatch import Signal
from django.http import HttpResponse, StreamingHttpResponse
from sentry import eventstore, features
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.types.ratelimit import RateLimit, RateLimitCategory
from sentry.utils import json
from sentry.utils.cache import cache
logger = logging.getLogger(__name__)
from rest_framework.request import Request
from rest_framework.response import Response
openai.api_key = settings.OPENAI_API_KEY
openai_policy_check = Signal()
# How many stacktrace frames do we want per exception?
MAX_STACKTRACE_FRAMES = 15
# How many exceptions do we want?
MAX_EXCEPTIONS = 3
# Do we want tags? They don't seem particularly useful
ADD_TAGS = False
FUN_PROMPT_CHOICES = [
"[haiku about the error]",
"[hip hop rhyme about the error]",
"[4 line rhyme about the error]",
"[2 stanza rhyme about the error]",
"[anti joke about the error]",
]
PROMPT = """\
You are an assistant that analyses software errors, describing the problem with the following rules:
* Be helpful, playful and a bit snarky and sarcastic
* Do not talk about the rules in explanations
* Use emojis frequently in the snarky greeting and closing prompt
* The frames of a stack trace is shown with most recent call first
* Stack frames are either from app code or third party libraries
* Never show code examples as diff
* When describing the problem:
* Explain the error and message
* Explain where in the code the issue happend
* Explain the nature of the issue
* When proposing a solution:
* Explain what code changes are necessary to resolve it
* Explain where the solution should be
* Mention best practices for preventing this
* Remember Sentry's marketing message: "Sentry can't fix this"
Write the answers into the following template:
```
[snarky greeting]
#### Problem Description
[detailed description of the problem]
#### Proposed Solution
[proposed solution to fix this issue]
[fixed code example]
#### What Else
[uplifting closing statements]
___FUN_PROMPT___
```
"""
# Theset tags are removed because they are quite unstable between different events
# of the same issue, and typically unrelated to something that the AI assistant
# can answer.
BLOCKED_TAGS = frozenset(
[
"user",
"server_name",
"host",
"release",
"handled",
"client_os",
"client_os.name",
"browser",
"browser.name",
"environment",
"runtime",
"device",
"device.family",
"gpu",
"gpu.name",
"gpu.vendor",
"url",
"trace",
"otel",
]
)
def get_openai_policy(organization):
"""Uses a signal to determine what the policy for OpenAI should be."""
results = openai_policy_check.send(
sender=EventAiSuggestedFixEndpoint, organization=organization
)
result = "allowed"
# Last one wins
for _, new_result in results:
if new_result is not None:
result = new_result
return result
def set_if_value(d, key, value):
if value is not None:
d[key] = value
def trim_frames(frames, frame_allowance=MAX_STACKTRACE_FRAMES):
frames_len = 0
app_frames = []
system_frames = []
for frame in frames:
frames_len += 1
if frame.get("in_app"):
app_frames.append(frame)
else:
system_frames.append(frame)
if frames_len <= frame_allowance:
return frames
remaining = frames_len - frame_allowance
app_count = len(app_frames)
system_allowance = max(frame_allowance - app_count, 0)
if system_allowance:
half_max = int(system_allowance / 2)
# prioritize trimming system frames
for frame in system_frames[half_max:-half_max]:
frame["delete"] = True
remaining -= 1
else:
for frame in system_frames:
frame["delete"] = True
remaining -= 1
if remaining:
app_allowance = app_count - remaining
half_max = int(app_allowance / 2)
for frame in app_frames[half_max:-half_max]:
frame["delete"] = True
return [x for x in frames if not x.get("delete")]
def describe_event_for_ai(event, model):
detailed = model.startswith("gpt-4")
data = {}
msg = event.get("message")
if msg:
data["message"] = msg
platform = event.get("platform")
if platform and platform != "other":
data["language"] = platform
exceptions = data.setdefault("exceptions", [])
for idx, exc in enumerate(
reversed((event.get("exception", {})).get("values", ())[:MAX_EXCEPTIONS])
):
exception = {}
if idx > 0:
exception["raised_during_handling_of_previous_exception"] = True
exception["num"] = idx + 1
exc_type = exc.get("type")
if exc_type:
exception["type"] = exc_type
exception["message"] = exc.get("value")
mechanism = exc.get("mechanism") or {}
exc_meta = mechanism.get("meta")
if exc_meta:
exception["exception_info"] = exc_meta
if mechanism.get("handled") is False:
exception["unhandled"] = True
frames = exc.get("stacktrace", {}).get("frames")
first_in_app = True
if frames:
stacktrace = []
for frame in reversed(frames):
stack_frame = {}
set_if_value(stack_frame, "func", frame.get("function"))
set_if_value(stack_frame, "module", frame.get("module"))
set_if_value(stack_frame, "file", frame.get("filename"))
set_if_value(stack_frame, "line", frame.get("lineno"))
if frame.get("in_app"):
stack_frame["in_app"] = True
crashed_here = False
if first_in_app:
crashed_here = True
stack_frame["crash"] = "here"
first_in_app = False
line = frame.get("context_line") or ""
if (crashed_here and idx == 0) or detailed:
pre_context = frame.get("pre_context")
if pre_context:
stack_frame["code_before"] = pre_context
stack_frame["code"] = line
post_context = frame.get("post_context")
if post_context:
stack_frame["code_after"] = post_context
# {snip} usually appears in minified lines. skip that
elif "{snip}" not in line:
set_if_value(stack_frame, "code", line.strip())
stacktrace.append(stack_frame)
if stacktrace:
exception["stacktrace"] = trim_frames(stacktrace)
exceptions.append(exception)
if ADD_TAGS:
tags = data.setdefault("tags", {})
for tag_key, tag_value in sorted(event["tags"]):
if tag_key not in BLOCKED_TAGS:
tags[tag_key] = tag_value
return data
def suggest_fix(event_data, model="gpt-3.5-turbo", stream=False):
"""Runs an OpenAI request to suggest a fix."""
prompt = PROMPT.replace("___FUN_PROMPT___", random.choice(FUN_PROMPT_CHOICES))
event_info = describe_event_for_ai(event_data, model=model)
response = openai.ChatCompletion.create(
model=model,
temperature=0.7,
messages=[
{"role": "system", "content": prompt},
{
"role": "user",
"content": json.dumps(event_info),
},
],
stream=stream,
)
if stream:
return reduce_stream(response)
return response["choices"][0]["message"]["content"]
def reduce_stream(response):
for chunk in response:
delta = chunk["choices"][0]["delta"]
if "content" in delta:
yield delta["content"]
@region_silo_endpoint
class EventAiSuggestedFixEndpoint(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
# go away
private = True
enforce_rate_limit = True
rate_limits = {
"GET": {
RateLimitCategory.IP: RateLimit(5, 1),
RateLimitCategory.USER: RateLimit(5, 1),
RateLimitCategory.ORGANIZATION: RateLimit(5, 1),
},
}
def get(self, request: Request, project, event_id) -> Response:
"""
Makes AI make suggestions about an event
````````````````````````````````````````
This endpoint returns a JSON response that provides helpful suggestions about how to
understand or resolve an event.
"""
# To use this feature you need the feature enabled and openai needs to be configured
if not settings.OPENAI_API_KEY or not features.has(
"organizations:open-ai-suggestion", project.organization, actor=request.user
):
raise ResourceDoesNotExist
event = eventstore.backend.get_event_by_id(project.id, event_id)
if event is None:
raise ResourceDoesNotExist
# Check the OpenAI access policy
policy = get_openai_policy(request.organization)
policy_failure = None
stream = request.GET.get("stream") == "yes"
if policy == "subprocessor":
policy_failure = "subprocessor"
elif policy == "individual_consent":
if request.GET.get("consent") != "yes":
policy_failure = "individual_consent"
elif policy == "allowed":
pass
else:
logger.warning("Unknown OpenAI policy state")
if policy_failure is not None:
return HttpResponse(
json.dumps({"restriction": policy_failure}),
content_type="application/json",
status=403,
)
# Cache the suggestion for a certain amount by primary hash, so even when new events
# come into the group, we are sharing the same response.
cache_key = "ai:" + event.get_primary_hash()
suggestion = cache.get(cache_key)
if suggestion is None:
try:
suggestion = suggest_fix(event.data, stream=stream)
except openai.error.RateLimitError as err:
return HttpResponse(
json.dumps({"error": err.json_body["error"]}),
content_type="text/plain; charset=utf-8",
status=429,
)
if stream:
def stream_response():
buffer = []
for item in suggestion:
buffer.append(item)
yield item.encode("utf-8")
cache.set(cache_key, "".join(buffer), 300)
resp = StreamingHttpResponse(stream_response(), content_type="text/event-stream")
# make nginx happy
resp["x-accel-buffering"] = "no"
# make webpack devserver happy
resp["cache-control"] = "no-transform"
return resp
cache.set(cache_key, suggestion, 300)
if stream:
return HttpResponse(
suggestion,
content_type="text/plain; charset=utf-8",
)
return HttpResponse(
json.dumps({"suggestion": suggestion}),
content_type="application/json",
)
| [
"['[haiku about the error]', '[hip hop rhyme about the error]', '[4 line rhyme about the error]', '[2 stanza rhyme about the error]', '[anti joke about the error]']",
"___FUN_PROMPT___",
"You are an assistant that analyses software errors, describing the problem with the following rules:\n\n* Be helpful, playful and a bit snarky and sarcastic\n* Do not talk about the rules in explanations\n* Use emojis frequently in the snarky greeting and closing prompt\n* The frames of a stack trace is shown with most recent call first\n* Stack frames are either from app code or third party libraries\n* Never show code examples as diff\n* When describing the problem:\n * Explain the error and message\n * Explain where in the code the issue happend\n * Explain the nature of the issue\n* When proposing a solution:\n * Explain what code changes are necessary to resolve it\n * Explain where the solution should be\n * Mention best practices for preventing this\n* Remember Sentry's marketing message: \"Sentry can't fix this\"\n\nWrite the answers into the following template:\n\n```\n[snarky greeting]\n\n#### Problem Description\n\n[detailed description of the problem]\n\n#### Proposed Solution\n\n[proposed solution to fix this issue]\n\n[fixed code example]\n\n#### What Else\n\n[uplifting closing statements]\n\n___FUN_PROMPT___\n```\n"
] |
2024-01-10 | onskop/chatter | support~chat_classic.py | import openai
#open file .stranmlit/secrets.toml and read value of openai_key
with open('.streamlit/secrets.toml') as f:
#search for line with openai_key
openai_key = f.readline().split('=')[1].strip().replace('"', '')
print(openai_key)
openai.api_key = openai_key
with open('support/text.txt', encoding='utf-8') as f:
prompt = f.read()
messages=[
{"role": "system", "content": "You are a helpful assistant and answer with very long texts."},
{"role": "user", "content": prompt}
]
for i in range(200):
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, temperature=0.1, max_tokens=150)
print(response.choices[0].message.content)
print('Run number: ', i, 'completed\n')
print('-----------------------------------\n') | [
"You are a helpful assistant and answer with very long texts."
] |
2024-01-10 | allswellthatsmaxwell/llm-backend | app~routes.py | import os
import json
from flask import request, Blueprint, Flask, make_response, jsonify, Response
import requests
import aiohttp
from app.filesystem import FileSystem
from app.transcription import TranscriptionPipeline
app = Flask(__name__)
app_routes = Blueprint("app_routes", __name__)
HOMEDIR = os.path.expanduser("~")
APPDATA_PATH = f"{HOMEDIR}/llmll/dev_app_data"
LOGFILES_DIR = f"{APPDATA_PATH}/logfiles"
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
filesystem = FileSystem(root=APPDATA_PATH)
@app_routes.route("/transcribe", methods=["POST"])
async def transcribe():
print("Entering routes.transcribe...")
pipeline = TranscriptionPipeline(request, filesystem)
response_data = await pipeline.run()
return make_response(jsonify(response_data))
@app_routes.route("/chat", methods=["POST"])
async def chat():
incoming_request_data = request.get_json()
openai_url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Authorization': f'Bearer {OPENAI_API_KEY}',
'Content-Type': 'application/json'
}
async with aiohttp.ClientSession() as session:
async with session.post(openai_url, headers=headers, json=incoming_request_data) as response:
response_data = await response.json()
return response_data, response.status
@app_routes.route('/synthesize_speech', methods=['POST'])
async def synthesize_speech():
print("Entering routes.synthesize_speech...")
data = request.json
input_text = data.get("text")
voice = data.get("voice", "onyx")
if not input_text:
print("No text provided")
return Response("No text provided", status=400)
headers = {
"Authorization": f"Bearer {OPENAI_API_KEY}",
"Content-Type": "application/json"
}
payload = json.dumps({
"model": "tts-1",
"input": input_text,
"voice": voice
})
async with aiohttp.ClientSession() as session:
async with session.post("https://api.openai.com/v1/audio/speech", headers=headers, data=payload) as response:
if response.status != 200:
return Response(f"Error from OpenAI API: {await response.text()}", status=response.status)
return Response(await response.read(), mimetype='audio/mp3')
| [] |
2024-01-10 | aleksanderhan/labpilot | jupyter-pilot-backend~jupyter-pilot-backend~websocket_handler.py | import json
import tornado.web
import tornado.websocket
import tornado.ioloop
from langchain.chains.sequential import SequentialChain
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
from .prompt import debug_template, debug_explain_template, explain_template, refactor_template
from .callback import DefaultCallbackHandler
class RefactorWebSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cells = {}
def check_origin(self, origin):
# Override to enable support for allowing all cross-origin traffic
return True
def on_message(self, message):
data = json.loads(message)
code = data.get('code', 'No code provided')
model = data.get('model', 'gpt-3.5-turbo')
temp = data.get('temp', 1)
cell_id = data.get("cellId", None)
openai_api_key = data.get("openai_api_key", None)
memory = self.cells.get(cell_id)
if not memory:
memory = ConversationBufferWindowMemory(k=3, memory_key="memory", return_messages=True)
self.cells[cell_id] = memory
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temp, streaming=True, callbacks=[DefaultCallbackHandler(self.write_message)])
prompt_template = PromptTemplate(input_variables=["memory", "code"], template=refactor_template)
chain = LLMChain(
llm=llm,
prompt=prompt_template,
verbose=True,
memory=memory
)
chain({"code": code})
class DebugWebSocketHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
# Override to enable support for allowing all cross-origin traffic
return True
def on_message(self, message):
data = json.loads(message)
code = data.get('code', 'No code provided')
output = data.get('output', 'No output provided')
error = data.get('error', 'No error provided')
model = data.get('model', 'gpt-3.5-turbo')
temp = data.get('temp', 1)
openai_api_key = data.get("openai_api_key", None)
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temp, streaming=True, callbacks=[DefaultCallbackHandler(self.write_message)])
debug_prompt_template = PromptTemplate(input_variables=["code", "output", "error"], template=debug_template)
debug_chain = LLMChain(
llm=llm,
prompt=debug_prompt_template,
verbose=True,
output_key="refactored"
)
debug_explain_prompt_template = PromptTemplate(input_variables=["code", "output", "error", "refactored"], template=debug_explain_template)
debug_explain_chain = LLMChain(
llm=llm,
prompt=debug_explain_prompt_template,
verbose=True,
output_key="explanation"
)
overall_chain = SequentialChain(
chains=[debug_chain, debug_explain_chain],
input_variables=["code", "output", "error"],
# Here we return multiple variables
output_variables=["refactored", "explanation"],
verbose=True
)
overall_chain({"code": code, "output": output, "error": error})
class ExplainWebSocketHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
# Override to enable support for allowing all cross-origin traffic
return True
def on_message(self, message):
data = json.loads(message)
code = data.get('code', 'No code provided')
model = data.get('model', 'gpt-3.5-turbo')
temp = data.get('temp', 1)
openai_api_key = data.get("openai_api_key", None)
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temp, streaming=True, callbacks=[DefaultCallbackHandler(self.write_message)])
prompt_template = PromptTemplate(input_variables=["code"], template=explain_template)
chain = LLMChain(
llm=llm,
prompt=prompt_template,
verbose=True
)
chain({"code": code}) | [
"refactored"
] |
2024-01-10 | aleksanderhan/labpilot | jupyter-pilot-backend~jupyter-pilot-backend~terminal~terminal.py | import openai
import langchain
import json
import websockets
import asyncio
import os
import time
from nbformat.v4 import new_notebook
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool, Tool, StructuredTool, ShellTool
from langchain.schema.messages import SystemMessage, AIMessage, HumanMessage
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.prompts import MessagesPlaceholder, PromptTemplate
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.chains import LLMChain
from typing import List, Dict, Any, Union, Optional
from langchain.pydantic_v1 import BaseModel, Field
from notebook.services.contents.filemanager import FileContentsManager
from langchain.agents import AgentExecutor
from .prompt import *
from .callback import DefaultCallbackHandler, PrintCallbackHandler
from .agent import OpenAIMultiFunctionsAgent
import traceback
import tracemalloc
tracemalloc.start()
langchain.debug = True
class SharedState:
def __init__(self):
self.answer = None
self.has_answer = asyncio.Event()
class CreateNewNotebookInput(BaseModel):
filename: str = Field(description="Required filename of the notebook.")
class ReadCellInput(BaseModel):
index: int = Field(description="Required index of the cell to be read.")
filename: Optional[str] = Field(description="Optional filename of the notebook to read from. If no filename is given, the active notebook will be used.")
class InsertCodeCellInput(BaseModel):
index: int = Field(description="Required index of where to insert the cell.")
code: str = Field(description="Required code to be inserted.")
filename: Optional[str] = Field(description="Optional filename of the notebook to insert code into. If no filename is given, the active notebook will be used.")
class InsertMarkdownCellInput(BaseModel):
index: int = Field(description="Required index of where to insert the cell.")
text: str = Field(description="Required markdown text to be inserted.")
filename: Optional[str] = Field(description="Optional filename of the notebook to insert markdown into. If no filename is given, the active notebook will be used.")
class EditCodeCellInput(BaseModel):
index: int = Field(description="Required index of which cell to edit.")
code: str = Field(description="Required code to be inserted.")
filename: Optional[str] = Field(description="Optional filename of the notebook to edit. If no filename is given, the active notebook will be used.")
class EditMarkdownCellInput(BaseModel):
index: int = Field(description="Required index of which cell to edit.")
text: str = Field(description="Required markdown text to be inserted.")
filename: Optional[str] = Field(description="Optional filename of the notebook to edit. If no filename is given, the active notebook will be used.")
class RunCellInput(BaseModel):
index: int = Field(description="Required index of which cell to run.")
filename: Optional[str] = Field(description="Optional filename of the notebook to run cell in. If no filename is given, the active notebook will be used.")
class DeleteCellInput(BaseModel):
index: int = Field(description="Required index of which cell to delete.")
filename: Optional[str] = Field(description="Optional filename of the notebook to delete cell from. If no filename is given, the active notebook will be used.")
class ReadNotebookInput(BaseModel):
filename: Optional[str] = Field(description="Optional filename of the notebook to read. If no filename is given, the active notebook will be used.")
class MyContentsManager(FileContentsManager):
def __init__(self, **kwargs):
super(MyContentsManager, self).__init__(**kwargs)
def create_notebook(self, path):
# Create an empty notebook
nb = new_notebook()
nb.metadata.kernelspec = {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
}
super().save({"type": "notebook", "content": nb}, path)
class Terminal(object):
def __init__(self):
self.agent = None
self.chat_history_memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
self.create_notebook_state = SharedState()
self.read_cell_state = SharedState()
self.insert_code_state = SharedState()
self.insert_markdown_state = SharedState()
self.edit_code_cell_state = SharedState()
self.edit_markdown_cell_state = SharedState()
self.run_code_cell_state = SharedState()
self.delete_cell_state = SharedState()
self.read_notebook_state = SharedState()
async def start(self):
print("starting terminal backend")
self.primary_ws = await websockets.serve(self.primary_web_socket, "0.0.0.0", 8080)
self.secondary_ws = await websockets.serve(self.secondary_web_socket, "0.0.0.0", 8081)
await self.primary_ws.wait_closed()
def create_agent(self, websocket, model, temp, openai_api_key):
model += "-0613" # Better functions calling model
self.model = model
self.temp = temp
callback=DefaultCallbackHandler(websocket)
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temp, streaming=True, callbacks=[callback])
tools = [
ShellTool(name="shell_tool"),
self.get_create_new_notebook_tool(websocket),
self.get_read_cell_tool(websocket),
self.get_insert_code_cell_tool(websocket),
self.get_insert_markdown_cell_tool(websocket),
self.get_edit_code_cell_tool(websocket),
self.get_edit_markdown_cell_tool(websocket),
self.get_run_code_cell_tool(websocket),
self.get_delete_cell_tool(websocket),
self.get_read_notebook_summary_tool(websocket)
]
extra_prompt_messages = [
SystemMessage(content=f"The current time and date is {time.strftime('%c')}"),
MessagesPlaceholder(variable_name="memory"),
SystemMessage(content="Let's work the following out in a step by step way to be sure we have the right answer. Let's first understand the problem and devise a plan to solve the problem.")
]
prompt = OpenAIMultiFunctionsAgent.create_prompt(system_message=SystemMessage(content=agent_system_message), extra_prompt_messages=extra_prompt_messages)
agent = OpenAIMultiFunctionsAgent(
llm=llm,
tools=tools,
prompt=prompt,
max_iterations=15,
verbose=True,
handle_parsing_errors=True
)
self.agent = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
return_intermediate_steps=False,
handle_parsing_errors=True,
memory=self.chat_history_memory
)
async def primary_web_socket(self, websocket, path):
async for message in websocket:
print("primary_web_socket received message", message)
data = json.loads(message)
await self.handle_primary_ws_response(websocket, data)
async def handle_primary_ws_response(self, websocket, data):
if data.get("method") == "clear":
self.chat_history_memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
else:
self.create_agent(websocket, data["model"], data["temp"], data["openai_api_key"])
try:
await self.agent.arun(data["message"])
except Exception as e:
msg = "Server error encountered during execution: " + str(e)
self.chat_history_memory.save_context({"input": data["message"]}, {"output": msg})
traceback.print_exc()
response = {
"method": "systemError",
"message": msg
}
await websocket.send(json.dumps(response))
data["message"] = """An error occured while running the previously tool. Try again,
but make sure to conform to the function calling format and validate the input to the tool."""
await self.handle_primary_ws_response(websocket, data)
async def secondary_web_socket(self, websocket, path):
async for message in websocket:
print("secondary_web_socket received message", message)
data = json.loads(message)
if path == "/openNotebook":
self.create_notebook_state.answer = data
self.create_notebook_state.has_answer.set()
elif path == "/readCell":
self.read_cell_state.answer = data
self.read_cell_state.has_answer.set()
elif path == "/insertCodeCell":
self.insert_code_state.answer = data
self.insert_code_state.has_answer.set()
elif path == "/insertMarkdownCell":
self.insert_markdown_state.answer = data
self.insert_markdown_state.has_answer.set()
elif path == "/editCodeCell":
self.edit_code_cell_state.answer = data
self.edit_code_cell_state.has_answer.set()
elif path == "/editMarkdownCell":
self.edit_markdown_cell_state.answer = data
self.edit_markdown_cell_state.has_answer.set()
elif path == "/runCode":
self.run_code_cell_state.answer = data
self.run_code_cell_state.has_answer.set()
elif path == "/deleteCell":
self.delete_cell_state.answer = data
self.delete_cell_state.has_answer.set()
elif path == "/readNotebook":
self.read_notebook_state.answer = data
self.read_notebook_state.has_answer.set()
else:
print(f"secondary_web_socket() - path {path} not recognized")
def get_create_new_notebook_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda filename: self.create_new_notebook_tool(default_ws, filename),
coroutine=lambda filename: self.create_new_notebook_tool(default_ws, filename),
name="create_new_notebook_tool",
description="""Useful when you want to start a new project with a Jupyter notebook and set it as the active notebook.
You should enter the filename (remember to use ".ipynb" extension) of the notebook.""",
args_schema=CreateNewNotebookInput
)
async def create_new_notebook_tool(self, default_ws, filename):
try:
mgr = MyContentsManager()
mgr.create_notebook(filename)
request = {
"request": {"filename": filename},
"start": True,
"method": "openNotebook"
}
await default_ws.send(json.dumps(request))
await self.create_notebook_state.has_answer.wait()
answer = self.create_notebook_state.answer
self.create_notebook_state = SharedState()
return answer["message"]
except Exception as e:
traceback.print_exc()
return f"Notebook with filename: {filename} failed to be created. Ask user for what to do next."
def get_read_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda index, filename=None: self.read_cell_tool(default_ws, index, filename),
coroutine=lambda index, filename=None: self.read_cell_tool(default_ws, index, filename),
name="read_cell_tool",
description="""Useful when you want to read the conent of a cell of a Jupyter notebook. If no filename is given the active notebook will be used.
This tool cannot read files, only cells from a jupyter notebook.
You should enter the index of the cell you want to read.""",
args_schema=ReadCellInput
)
async def read_cell_tool(self, default_ws, index, filename):
try:
request = {
"request": {"index": index, "filename": filename},
"start": True,
"method": "readCell"
}
await default_ws.send(json.dumps(request))
await self.read_cell_state.has_answer.wait()
answer = self.read_cell_state.answer
self.read_cell_state = SharedState()
return answer["message"]
except Exception as e:
return "ERROR: " + str(e)
def get_insert_code_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda code, index, filename=None: self.insert_code_cell_tool(default_ws, code, index, filename),
coroutine=lambda code, index, filename=None: self.insert_code_cell_tool(default_ws, code, index, filename),
name="insert_code_cell_tool",
description="""Useful when you want to insert a code cell in a Jupyter notebook. If no filename is given the active notebook will be used.
You should enter code and index of the cell you want to insert.""",
args_schema=InsertCodeCellInput
)
async def insert_code_cell_tool(self, default_ws, code, index, filename):
try:
request = {
"request": {"index": index, "code": code, "filename": filename},
"start": True,
"method": "insertCodeCell"
}
await default_ws.send(json.dumps(request))
await self.insert_code_state.has_answer.wait()
answer = self.insert_code_state.answer
self.insert_code_state = SharedState()
return answer["message"]
except Exception as e:
return "ERROR: " + str(e)
def get_insert_markdown_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda text, index, filename=None: self.insert_markdown_cell_tool(default_ws, text, index, filename),
coroutine=lambda text, index, filename=None: self.insert_markdown_cell_tool(default_ws, text, index, filename),
name="insert_markdown_cell_tool",
description="""Useful when you want to insert a mardkown cell in a Jupyter notebook. If no filename is given the active notebook will be used.
You should enter markdown text and index of the cell you want to insert.""",
args_schema=InsertMarkdownCellInput
)
async def insert_markdown_cell_tool(self, default_ws, text, index, filename):
try:
request = {
"request": {"index": index, "text": text, "filename": filename},
"start": True,
"method": "insertMarkdownCell"
}
await default_ws.send(json.dumps(request))
await self.insert_markdown_state.has_answer.wait()
answer = self.insert_markdown_state.answer
self.insert_markdown_state = SharedState()
return answer["message"]
except Exception as e:
return "ERROR: " + str(e)
def get_edit_code_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda code, index, filename=None: self.edit_code_cell_tool(default_ws, code, index, filename),
coroutine=lambda code, index, filename=None: self.edit_code_cell_tool(default_ws, code, index, filename),
name="edit_code_cell_tool",
description="""Useful when you want to edit a code cell in a Jupyter notebook. If no filename is given the active notebook will be used.
You must always enter the code and the index of the cell to be edited.
You should enter the code to be inserted and the index of the cell you want to edit.""",
args_schema=EditCodeCellInput
)
async def edit_code_cell_tool(self, default_ws, code, index, filename):
try:
request = {
"request": {"index": index, "code": code, "filename": filename},
"start": True,
"method": "editCodeCell"
}
await default_ws.send(json.dumps(request))
await self.edit_markdown_cell_state.has_answer.wait()
answer = self.edit_markdown_cell_state.answer
self.edit_markdown_cell_state = SharedState()
return answer["message"]
except Exception as e:
return "ERROR: " + str(e)
def get_edit_markdown_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda text, index, filename=None: self.edit_markdown_cell_tool(default_ws, text, index, filename),
coroutine=lambda text, index, filename=None: self.edit_markdown_cell_tool(default_ws, text, index, filename),
name="edit_markdown_cell_tool",
description="""Useful when you want to edit a markdown cell in a Jupyter notebook. If no filename is given the active notebook will be used.
You must always enter the markdown text and the index of the cell to be edited.
You should enter the markdown text to be inserted and the index of the cell you want to edit.""",
args_schema=EditMarkdownCellInput
)
async def edit_markdown_cell_tool(self, default_ws, text, index, filename):
try:
request = {
"request": {"index": index, "text": text, "filename": filename},
"start": True,
"method": "editMarkdownCell"
}
await default_ws.send(json.dumps(request))
await self.edit_code_cell_state.has_answer.wait()
answer = self.edit_code_cell_state.answer
self.edit_code_cell_state = SharedState()
return answer["message"]
except Exception as e:
return "ERROR: " + str(e)
def get_run_code_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda index, filename=None: self.run_code_cell_tool(default_ws, index, filename),
coroutine=lambda index, filename=None: self.run_code_cell_tool(default_ws, index, filename),
name="run_code_cell_tool",
description="""Useful when you want to run a code cell in a Jupyter notebook. If no filename is given the active notebook will be used.
The tool outputs the result of the execution. You should enter the index of the cell you want to run.""",
args_schema=RunCellInput
)
async def run_code_cell_tool(self, default_ws, index, filename):
try:
request = {
"request": {"index": index, "filename": filename},
"start": True,
"method": "runCode",
}
await default_ws.send(json.dumps(request))
await self.run_code_cell_state.has_answer.wait()
answer = self.run_code_cell_state.answer
self.run_code_cell_state = SharedState()
return answer["message"]
except Exception as e:
traceback.print_exc()
return "ERROR: " + str(e)
def get_delete_cell_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda index, filename=None: self.delete_cell_tool(default_ws, index, filename),
coroutine=lambda index, filename=None: self.delete_cell_tool(default_ws, index, filename),
name="delete_cell_tool",
description="""Useful when you want to delete a code cell in a Jupyter notebook. If no filename is given the active notebook will be used.
This tool cannot delete files! You should enter the index of the cell you want to delete.""",
args_schema=DeleteCellInput
)
async def delete_cell_tool(self, default_ws, index, filename):
try:
request = {
"request": {"index": index, "filename": filename},
"start": True,
"method": "deleteCell"
}
await default_ws.send(json.dumps(request))
await self.delete_cell_state.has_answer.wait()
answer = self.delete_cell_state.answer
self.delete_cell_state = SharedState()
return answer["message"]
except Exception as e:
traceback.print_exc()
return "ERROR: " + str(e)
def get_read_notebook_summary_tool(self, default_ws):
return StructuredTool.from_function(
func=lambda filename=None: self.read_notebook_summary_tool(default_ws, filename),
coroutine=lambda filename=None: self.read_notebook_summary_tool(default_ws, filename),
name="read_notebook_summary_tool",
description="""Useful when you want to get a summary of the whole notebook to see whats in each cell and its outputs.
If you give no filename the active notebook will be used.You should enter the filename of the notebook.""",
args_schema=ReadNotebookInput
)
async def read_notebook_summary_tool(self, default_ws, filename):
try:
request = {
"request": {"filename": filename},
"start": True,
"method": "readNotebook"
}
await default_ws.send(json.dumps(request))
await self.read_notebook_state.has_answer.wait()
answer = self.read_notebook_state.answer
self.read_notebook_state = SharedState()
llm = ChatOpenAI(model=self.model, temperature=self.temp)
prompt_template = PromptTemplate(input_variables=["notebook"], template=read_notebook_summary_template)
chain = LLMChain(
llm=llm,
prompt=prompt_template,
verbose=True
)
return chain({"notebook": answer["message"]})
except Exception as e:
traceback.print_exc()
return "ERROR: " + str(e)
| [
"Let's work the following out in a step by step way to be sure we have the right answer. Let's first understand the problem and devise a plan to solve the problem.",
"notebook"
] |
2024-01-10 | LinusOstlund/infinity-quiz | src~quizmaster.py | from typing import Any
from dotenv import load_dotenv
import openai
import os
import json
import numpy as np
import random
class Quizmaster:
def __init__(self):
load_dotenv()
self.openai = openai
self.openai.api_key = os.getenv("OPENAI_API_KEY")
self.prompts = json.load(open("../data/prompts.json", "r"))
# TODO make methods to load these
self.few_shot_samples = json.load(open("../data/few-shots.json", "r"))
self.categories = json.load(open("../data/categories.json", "r"))
self.moods = json.load(open("../data/moods.json", "r"))
self.current_category = None
def _load_categories(self, filename="../data/categories.json"):
"""
Load the predifined categories and subcategories from a json file.
"""
if os.path.exists(filename):
try:
with open(filename, "r") as f:
categories = json.load(f)
except json.JSONDecodeError:
print(f"File could not be decoded as JSON: {filename}")
except Exception as e:
print(f"An error occurred: {e}")
return categories
def _load_few_shot_samples(self, filename="../data/few-shots.json"):
"""
Load the few shot samples from a json file.
The few shots are loaded in order to provide the LLM with some context,
and to make it easier for the LLM to generate a question in the right format.
"""
if os.path.exists(filename):
try:
with open(filename, "r") as f:
few_shot_samples = json.load(f)
except json.JSONDecodeError:
print(f"File could not be decoded as JSON: {filename}")
except Exception as e:
print(f"An error occurred: {e}")
return few_shot_samples
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.generate_question(*args, **kwds)
def generate_question(self, category=None, subcategory=None):
"""
Design a prompt for the LLM to generate a question.
The response should be in a json format, which is achieved by prompt engineering.
If category and subcategory are not provided, they will be randomly selected.
TODO If it is not in the correct format, the LLM will be asked to try again.
"""
# Select three random few shot samples
few_shots = dict()
few_shots = random.sample(self.few_shot_samples, 3)
few_shots = json.dumps(few_shots)
# generate a random category and subcategory
if category is None or subcategory is None:
category, subcategory = self.get_random_category_and_subcategory()
self.current_category = (category, subcategory)
prompt = f"""
{self.prompts["user"]} \n
({category}, {subcategory}) \n
{few_shots} \n
Provide a json response below: \n
"""
print(prompt)
# TODO lägg in error hantering ifall gpt-3.5 är upptagen
response = self.openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": self.prompts["system"]},
{"role": "user", "content": prompt},
],
)
# Retrieve the generated content from the API-response
content = response["choices"][0]["message"]["content"]
try:
return json.loads(content)
except json.JSONDecodeError:
print(f"Generated content could not be decoded as JSON: {content}")
except Exception as e:
print(f"An error occurred: {e}")
def get_random_category_and_subcategory(self):
"""
Return a random category and subcategory from the predefined categories.
TODO this should return a list of categories and subcategories, so the user can pick one.
"""
# Produce a list of tuples with (category, subcategory) pairs
category_and_subcategory_tuples = [
(k, v) for k, values in self.categories[0].items() for v in values
]
# Select a random tuple from the list
category, subcategory = random.choice(category_and_subcategory_tuples)
return category, subcategory
def rationale(self, question, selected_option, answer):
"""
Går att smycka ut rejält, typ var otrevlig, eller Explain to me as if I was 5 years old.
"""
mood = random.choice(self.moods)
prompt = f"""
I selected \"{selected_option}\" as the answer to the question \"{question}\".
The correct answer was {answer}. \n
{mood["description"]} \n
Could you explain the reasoning behind the correct answer and shed light on whether my selection was valid or not? \n
If you use apostrophes, make sure to escape them with a backslash, like this: \\'
"""
response = self.openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.8,
max_tokens=150,
)
return mood["emoji"] + " " + response.choices[0].text.strip()
if __name__ == "__main__":
quizmaster = Quizmaster()
question = quizmaster()
print(question)
| [
"\n I selected \"PLACEHOLDER\" as the answer to the question \"PLACEHOLDER\". \n The correct answer was PLACEHOLDER. \n\n\n PLACEHOLDER \n\n\n Could you explain the reasoning behind the correct answer and shed light on whether my selection was valid or not? \n\n If you use apostrophes, make sure to escape them with a backslash, like this: \\'\n "
] |
2024-01-10 | shancw96/tech-basis | AI~langchain_rest_hosted~temp.py | from langchain.callbacks.base import AsyncCallbackManager
from langchain.chains import ChatVectorDBChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.vectorstores.base import VectorStore
def get_chain(
vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False
) -> ChatVectorDBChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = OpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager
)
qa = ChatVectorDBChain(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager,
)
return qa
| [] |
2024-01-10 | shancw96/tech-basis | AI~langchain_rest_hosted~websocket.py | from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain import LLMChain
import uvicorn
from prompts import QA_PROMPT
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from utils import ChatResponse, StreamingLLMCallbackHandler, get_or_create_chatgroup_vector_db
from langchain.chains.summarize import load_summarize_chain
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.schema import (
HumanMessage,
)
app = FastAPI()
templates = Jinja2Templates(directory="templates")
@app.get("/")
async def get(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.websocket("/chat")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
stream_handler = StreamingLLMCallbackHandler(websocket)
handler = StreamingStdOutCallbackHandler()
OPENAI_API_KEY_SUMMARY = 'sk-Icpn09CQJ5Pp6'
summaryllm = ChatOpenAI(temperature=0.5, openai_api_key=OPENAI_API_KEY_SUMMARY)
OPENAI_API_KEY_EMBEDDING = 'sk-'
embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY_EMBEDDING)
OPENAI_API_KEY_ANSWER = "sk-Qkgetd"
chatllm = ChatOpenAI(temperature=0.8, openai_api_key=OPENAI_API_KEY_ANSWER, streaming=True, verbose=True)
while True:
await websocket.send_json({"type": "start"})
question = await websocket.receive_text()
# resp = ChatResponse(sender="you", message=question, type="stream")
# await websocket.send_json(resp.dict())
# # Construct a response
# start_resp = ChatResponse(sender="bot", message="", type="start")
# await websocket.send_json(start_resp.dict())
# db = get_or_create_chatgroup_vector_db("chat_id", embedding, "store")
# print("正在读取与提问相关联的记忆...")
# docs = db.similarity_search(query=question, k=4)
# # refine question
# chain = load_summarize_chain(summaryllm, chain_type="stuff")
# summary = chain.run(docs)
# print("总结上下文如下:", summary)
# format_question = QA_PROMPT.format_prompt(context=summary, question=question).to_string()
await chatllm.apredict(question)
# print("正在添加聊天记录至记忆库...", ["Human: " + question, "Assistant: " + res])
# db.add_texts(["Human" + question, "Assistant: " + res])
# db.persist()
# end_resp = ChatResponse(sender="bot", message="", type="end")
await websocket.send_json({"type": "end"})
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) | [] |
2024-01-10 | jagaldol/chat-foodie | chatbot~self_instruct~gpt3_api.py | import json
import tqdm
import os
import random
import openai
from datetime import datetime
import argparse
import time
def make_requests(
model,
messages,
max_tokens,
temperature,
top_p,
n,
stream,
frequency_penalty,
presence_penalty,
stop,
logit_bias,
user,
retries=3,
api_key=None,
organization=None
):
response = None
target_length = max_tokens
if api_key is not None:
openai.api_key = api_key
if organization is not None:
openai.organization = organization
retry_cnt = 0
backoff_time = 10
messages = [[{"role": "system", "content": "you're the best making {instruction, input, output} data set and the best assistant"},
{"role": "user", "content": f'{prompt}'}] for prompt in messages]
results = []
for message in messages:
retry_cnt = 0
while retry_cnt <= retries:
try:
response = openai.ChatCompletion.create(
model=model,
messages=message,
max_tokens=target_length,
temperature=temperature,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=stop,
n=n,
)
break
except openai.error.OpenAIError as e:
print(f"OpenAIError: {e}.")
if "Please reduce the length of the messages or completion" in str(e):
target_length = int(target_length * 0.8)
print(f"Reducing target length to {target_length}, retrying...")
else:
print(f"Retrying in {backoff_time} seconds...")
time.sleep(backoff_time)
backoff_time *= 1.5
retry_cnt += 1
results.append(response["choices"][0])
return results | [
"you're the best making {instruction, input, output} data set and the best assistant"
] |
2024-01-10 | dariofavaron/llm-examples | pages~2_Langchain_Quickstart.py | import streamlit as st
from langchain import OpenAI
st.title('🦜🔗 Langchain Quickstart App')
with st.sidebar:
openai_api_key = st.text_input('OpenAI API Key')
def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
st.info(llm(input_text))
with st.form('my_form'):
text = st.text_area('Enter text:', 'What are 3 key advice for learning how to code?')
submitted = st.form_submit_button('Submit')
if submitted:
generate_response(text)
| [] |
2024-01-10 | dariofavaron/llm-examples | Chatbot.py | import openai
import streamlit as st
from streamlit_chat import message
with st.sidebar:
openai_api_key = st.text_input('OpenAI API Key',key='chatbot_api_key')
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("💬 Streamlit GPT")
#openai.api_key = st.secrets.openai_api_key
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
with st.form("chat_input", clear_on_submit=True):
a, b = st.columns([4, 1])
user_input = a.text_input(
label="Your message:",
placeholder="What would you like to say?",
label_visibility="collapsed",
)
b.form_submit_button("Send", use_container_width=True)
for msg in st.session_state.messages:
message(msg["content"], is_user=msg["role"] == "user")
if user_input and not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
if user_input and openai_api_key:
openai.api_key = openai_api_key
st.session_state.messages.append({"role": "user", "content": user_input})
message(user_input, is_user=True)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
message(msg.content)
| [
"How can I help you?"
] |
2024-01-10 | gsgen3d/gsgen | trainer.py | import os
import gc
import numpy as np
import datetime
import warnings
from pathlib import Path
import torch
from tqdm import tqdm
from PIL import Image, ImageFilter
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
import torch.nn.functional as F
from einops import repeat
from omegaconf import OmegaConf
from data import CameraPoseProvider, SingleViewCameraPoseProvider
from gs.gaussian_splatting import GaussianSplattingRenderer
from utils.misc import (
to_primitive,
C,
step_check,
stack_dicts,
get_file_list,
dict_to_device,
dump_config,
huggingface_online,
huggingface_offline,
get_current_cmd,
get_dict_slice,
seed_everything,
)
from utils.transforms import qvec2rotmat_batched
from utils.ops import binary_cross_entropy
from utils.initialize import base_initialize, initialize
from utils.dpt import DPT
from utils.spiral import (
get_camera_path_fixed_elevation,
get_random_pose_fixed_elevation,
)
from utils.colormaps import apply_float_colormap, apply_depth_colormap
from utils.wandb import get_num_runs
from utils.loss import depth_loss, get_image_loss
from guidance import get_guidance
from prompt import get_prompt_processor
import wandb
import shutil
import imageio
from torch.utils.tensorboard import SummaryWriter
from rich.console import Console
from torchmetrics import PearsonCorrCoef
console = Console()
def convert_to_image(outs):
outs["depth"] = apply_depth_colormap(outs["depth"], outs["opacity"])
outs["opacity"] = apply_float_colormap(outs["opacity"])
final = torch.cat(list(outs.values()), dim=-2)
class Trainer(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.step = 0
self.max_steps = cfg.max_steps
self.mode = cfg.get("mode", "text_to_3d")
disable_warnings = self.cfg.get("disable_warnings", False)
if disable_warnings:
console.print(f"[red]Ignore All Warnings!!!")
warnings.simplefilter("ignore")
try:
torch.set_default_device(cfg.device)
torch.set_default_dtype(torch.float32)
except AttributeError:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# FIXME setting offline flags should be done before importing transformers
if self.cfg.huggingface_offline:
huggingface_offline()
else:
huggingface_online()
prompt = (
self.cfg.prompt.prompt.strip().replace(" ", "_").lower()[:64]
) # length limited by wandb
day_timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
hms_timestamp = datetime.datetime.now().strftime("%H%M%S")
timestamp = f"{hms_timestamp}|{day_timestamp}"
num_runs = get_num_runs("gsgen")
uid = f"{num_runs}|{timestamp}|{prompt}"
tags = [day_timestamp, prompt, self.cfg.guidance.type, self.mode]
notes = self.cfg.notes
self.timestamp = timestamp
self.depth_estimator = None
if cfg.estimators.depth.enabled:
self.depth_estimator = DPT(device=cfg.device, mode="depth")
self.pearson = PearsonCorrCoef().to(cfg.device)
if cfg.estimators.normal.enabled:
self.normal_estimator = DPT(device=cfg.device, mode="normal")
if self.mode == "text_to_3d":
self.dataset = CameraPoseProvider(cfg.data)
elif self.mode == "image_to_3d":
self.dataset = SingleViewCameraPoseProvider(cfg.data)
self.text_prompt = self.cfg.prompt.prompt
self.loader = iter(
DataLoader(
self.dataset,
batch_size=cfg.batch_size,
shuffle=False,
collate_fn=self.dataset.collate,
num_workers=0,
)
)
if self.mode == "image_to_3d":
assert "image" in self.cfg, "image should be provided in image_to_3d mode"
assert (
self.depth_estimator is not None
), "depth estimator should be provided"
image = Path(self.cfg.image)
assert image.exists(), f"{image} not exists"
image = Image.open(image)
if self.cfg.get("image_blur", False):
image = image.filter(ImageFilter.GaussianBlur(radius=3))
self.image = ToTensor()(image).moveaxis(0, -1)
self.mask = self.image[..., 3] > 0.0
self.image = self.image[..., :3].to(self.cfg.device)
self.depth_map = self.depth_estimator(self.image[None, ...])
# NOTE: I found this important
# TODO: validate this is correct way to normalize depth
self.depth_map = (
(self.depth_map - self.depth_map[0][self.mask].mean())
* self.cfg.get("depth_scale", 100.0)
# * self.dataset.get_reso
# / 256
+ self.dataset.original_camera_distance
)
initial_values = initialize(
cfg.init,
image=self.image,
depth_map=self.depth_map,
mask=self.mask,
c2w=self.dataset.original_out["c2w"],
camera_info=self.dataset.original_out["camera_info"],
)
self.image_loss_fn = get_image_loss(0.2, "l2")
elif self.mode == "text_to_3d":
initial_values = initialize(cfg.init)
# initial_values = base_initialize(cfg.init)
self.renderer = GaussianSplattingRenderer(
cfg.renderer, initial_values=initial_values
).to(cfg.device)
self.renderer.setup_lr(cfg.lr)
self.renderer.set_optimizer(cfg.optimizer)
if self.cfg.auxiliary.enabled:
self.aux_guidance = get_guidance(cfg.auxiliary)
self.aux_guidance.set_text(
self.cfg.auxiliary.get("prompt", self.cfg.prompt.prompt)
)
self.guidance = get_guidance(cfg.guidance)
if self.cfg.guidance.get("keep_complete_pipeline", False):
self.prompt_processor = get_prompt_processor(
cfg.prompt, guidance_model=self.guidance
)
else:
self.prompt_processor = get_prompt_processor(cfg.prompt)
self.prompt_processor.cleanup()
gc.collect()
torch.cuda.empty_cache()
self.save_dir = Path(f"./checkpoints/{prompt}/{day_timestamp}/{hms_timestamp}")
if not self.save_dir.exists():
self.save_dir.mkdir(parents=True, exist_ok=True)
self.log_dir = Path(f"./logs/{prompt}/{day_timestamp}/{hms_timestamp}")
if not self.log_dir.exists():
self.log_dir.mkdir(parents=True, exist_ok=True)
self.eval_dir = self.save_dir / "eval"
if not self.eval_dir.exists():
self.eval_dir.mkdir(parents=True, exist_ok=True)
wandb.tensorboard.patch(root_logdir=str(self.log_dir))
overrided_group = self.cfg.get("group", prompt)
addtional_tags = self.cfg.get("tags", [])
tags = tags + addtional_tags
if cfg.wandb:
wandb.init(
project="gsgen",
name=uid,
config=to_primitive(cfg),
sync_tensorboard=True,
# magic=True,
save_code=True,
group=overrided_group,
notes=notes,
tags=tags,
)
wandb.watch(
self.renderer,
log="all",
log_freq=100,
)
self.writer = SummaryWriter(str(self.log_dir))
cmd = get_current_cmd()
self.writer.add_text("cmd", cmd, 0)
self.save_code_snapshot()
self.start = 0
self.last_out = None
console.print(f"[red]UID: {uid} started")
@property
def optimizer(self):
return self.renderer.optimizer
@classmethod
def load(cls, ckpt, override_cfg=None):
if not isinstance(ckpt, dict):
ckpt = torch.load(ckpt, map_location="cpu")
step = ckpt["step"]
cfg = OmegaConf.create(ckpt["cfg"])
if override_cfg is not None:
cfg.update(override_cfg)
trainer = cls(cfg)
trainer.renderer = GaussianSplattingRenderer.load(
cfg.renderer, ckpt["params"]
).to(cfg.device)
trainer.renderer.setup_lr(cfg.lr)
trainer.renderer.set_optimizer(cfg.optimizer)
trainer.step = step
trainer.update(step)
return trainer
def save(self):
params = self.renderer.get_params_for_save()
cfg = to_primitive(self.cfg)
state = {
"params": params,
"cfg": cfg,
"step": self.step,
}
save_dir = self.save_dir / "ckpts"
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
torch.save(state, self.save_dir / "ckpts" / f"step_{self.step}.pt")
def save_code_snapshot(self):
# learned from threestudio
self.code_dir = self.save_dir / "code"
if not self.code_dir.exists():
self.code_dir.mkdir(parents=True, exist_ok=True)
files = get_file_list()
for f in files:
dst = self.code_dir / f
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(f, str(dst))
config_dir = self.save_dir / "config" / "parsed.yaml"
if not config_dir.parent.exists():
config_dir.parent.mkdir(parents=True, exist_ok=True)
dump_config(str(config_dir), self.cfg)
def update(self, step):
self.dataset.update(step)
self.renderer.update(step)
self.guidance.update(step)
self.prompt_processor.update(step)
def train_step(self):
self.train()
batch = next(self.loader)
out = self.renderer(batch, self.cfg.use_bg, self.cfg.rgb_only)
prompt_embeddings = self.prompt_processor()
guidance_out = self.guidance(
out["rgb"],
prompt_embeddings,
elevation=batch["elevation"],
azimuth=batch["azimuth"],
camera_distance=batch["camera_distance"],
c2w=batch["c2w"],
rgb_as_latents=False,
)
loss = 0.0
if "loss_sds" in guidance_out.keys():
loss += (
C(self.cfg.loss.sds, self.step, self.max_steps)
* guidance_out["loss_sds"]
)
self.writer.add_scalar(
"loss_weights/sds",
C(self.cfg.loss.sds, self.step, self.max_steps),
self.step,
)
self.writer.add_scalar("loss/sds", guidance_out["loss_sds"], self.step)
if "loss_vsd" in guidance_out.keys():
loss += (
C(self.cfg.loss.vsd, self.step, self.max_steps)
* guidance_out["loss_vsd"]
)
self.writer.add_scalar(
"loss_weights/vsd",
C(self.cfg.loss.vsd, self.step, self.max_steps),
self.step,
)
self.writer.add_scalar("loss/vsd", guidance_out["loss_vsd"], self.step)
if "loss_lora" in guidance_out.keys():
loss += (
C(self.cfg.loss.lora, self.step, self.max_steps)
* guidance_out["loss_lora"]
)
self.writer.add_scalar(
"loss_weights/lora",
C(self.cfg.loss.lora, self.step, self.max_steps),
self.step,
)
self.writer.add_scalar("loss/lora", guidance_out["loss_lora"], self.step)
if self.cfg.loss.sparsity > 0.0:
assert (
"opacity" in out
), "opacity not in output, should turn off the `rgb_only` flag"
sparsity_loss = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.writer.add_scalar("loss/sparsity", sparsity_loss, self.step)
loss += C(self.cfg.loss.sparsity, self.step, self.max_steps) * sparsity_loss
self.writer.add_scalar(
"loss_weights/sparsity",
C(self.cfg.loss.sparsity, self.step, self.max_steps),
)
if self.cfg.loss.opague > 0.0:
assert (
"opacity" in out
), "opacity not in output, should turn off the `rgb_only` flag"
opacity_clamped = out["opacity"].clamp(1e-3, 1.0 - 1e-3)
opacity_loss = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.writer.add_scalar("loss/opague", opacity_loss, self.step)
loss += C(self.cfg.loss.opague, self.step, self.max_steps) * opacity_loss
self.writer.add_scalar(
"loss_weights/opague",
C(self.cfg.loss.opague, self.step, self.max_steps),
)
if self.cfg.loss.z_var > 0:
assert (
"z_var" in out
), "z_var not in output, should turn on the `z_var` flag"
opacity_clamped = out["opacity"].clamp(1e-3, 1.0 - 1e-3)
z_var_loss = (
out["z_var"] / opacity_clamped * (opacity_clamped > 0.5)
).mean()
self.writer.add_scalar("loss/z_var", z_var_loss, self.step)
loss += C(self.cfg.loss.z_var, self.step, self.max_steps) * z_var_loss
self.writer.add_scalar(
"loss_weights/z_var", C(self.cfg.loss.z_var, self.step, self.max_steps)
)
self.writer.add_scalar("loss/total", loss, self.step)
# self.optimizer.zero_grad()
loss += self.estimator_loss_step(out)
loss = loss / self.cfg.grad_accum
loss.backward()
# self.optimizer.step()
# self.renderer.post_backward()
with torch.no_grad():
if step_check(self.step, self.cfg.log_period, run_at_zero=True):
out = dict_to_device(out, "cpu")
train_image_pth = self.eval_dir / "train"
if not train_image_pth.exists():
train_image_pth.mkdir(parents=True, exist_ok=True)
if "depth" in out.keys():
assert "opacity" in out.keys()
out["depth"] = apply_depth_colormap(out["depth"], out["opacity"])
out["opacity"] = apply_float_colormap(out["opacity"])
if "z_var" in out.keys():
out["z_var"] = (
out["z_var"] / out["opacity"] * (out["opacity"] > 0.5)
)
out["z_var"] = apply_float_colormap(
out["z_var"] / out["z_var"].max()
)
final = (
torch.cat(list(out.values()), dim=-2).clamp(0, 1).cpu().numpy()
* 255.0
).astype(np.uint8)[-1]
imageio.imwrite(str(train_image_pth / f"{self.step}.png"), final)
self.writer.add_image(
"train/image", final, self.step, dataformats="HWC"
)
# return loss.item()
return loss.item()
def estimator_loss_step(self, out):
loss = 0.0
if self.cfg.estimators.depth.enabled:
depth_estimated = self.depth_estimator(out["rgb"])
assert (
"depth" in out.keys()
), "depth should be rendered when using depth estimator loss"
# should add a mask here to filter out the background
depth_estimate_loss = depth_loss(
self.pearson, depth_estimated, out["depth"]
)
self.writer.add_scalar("loss/depth", depth_estimate_loss, self.step)
depth_loss_weight = C(
self.cfg.estimators.depth.value, self.step, self.max_steps
)
self.writer.add_scalar("loss_weights/depth", depth_loss_weight, self.step)
loss += depth_loss_weight * depth_estimate_loss
if self.cfg.estimators.normal.enabled:
normal_estimated = self.normal_estimator(out["rgb"])
assert (
"normal" in out.keys()
), "normal should be rendered when using normal estimator loss"
normal_estimator_loss = F.mse_loss(out["normal"], normal_estimated)
self.writer.add_scalar("estimator_loss/normal", normal_estimator_loss)
loss += (
C(self.cfg.estimators.normal.value, self.step, self.max_steps)
* normal_estimator_loss
)
return loss
def aux_guidance_step(self):
if self.cfg.auxiliary.enabled:
aux_guidance_loss = self.aux_guidance(self.renderer)
self.writer.add_scalar("loss/aux_guidance", aux_guidance_loss, self.step)
loss = (
C(self.cfg.loss.aux_guidance, self.step, self.max_steps)
* aux_guidance_loss
)
loss.backward()
def auxiliary_loss_step(self):
loss = self.renderer.auxiliary_loss(self.step, self.writer)
if loss.requires_grad:
loss.backward()
@torch.no_grad()
def eval_image_step(self):
self.eval()
eval_image_path = self.eval_dir / "image"
if not eval_image_path.exists():
eval_image_path.mkdir(parents=True, exist_ok=True)
if self.mode == "text_to_3d":
c2w = get_random_pose_fixed_elevation(
np.mean(self.dataset.camera_distance),
self.cfg.eval.elevation,
)
camera_info = self.dataset.get_default_camera_info()
c2w = torch.from_numpy(c2w)
elif self.mode == "image_to_3d":
c2w = self.dataset.original_out["c2w"]
camera_info = self.dataset.original_out["camera_info"]
else:
raise NotImplementedError
c2w = c2w.to(self.renderer.device)
eval_upsample = self.cfg.get("eval_upsample", 1)
camera_info.upsample(eval_upsample)
out = self.renderer.render_one(
c2w, camera_info, use_bg=self.cfg.use_bg, rgb_only=self.cfg.rgb_only
)
out = dict_to_device(out, "cpu")
if "depth" in out.keys():
assert "opacity" in out.keys()
out["depth"] = apply_depth_colormap(out["depth"], out["opacity"])
out["opacity"] = apply_float_colormap(out["opacity"])
if "z_var" in out.keys():
out["z_var"] = out["z_var"] / out["opacity"] * (out["opacity"] > 0.5)
out["z_var"] = apply_float_colormap(out["z_var"] / out["z_var"].max())
final = (torch.cat(list(out.values()), dim=-2).cpu().numpy() * 255.0).astype(
np.uint8
)
imageio.imwrite(str(eval_image_path / f"{self.step}.png"), final)
self.writer.add_image("eval/image", final, self.step, dataformats="HWC")
self.train()
@torch.no_grad()
def eval_video_step(self):
self.eval()
eval_video_path = self.eval_dir / "video"
if not eval_video_path.exists():
eval_video_path.mkdir(parents=True, exist_ok=True)
c2ws = get_camera_path_fixed_elevation(
self.cfg.eval.n_frames,
self.cfg.eval.n_circles,
np.mean(self.dataset.camera_distance),
self.cfg.eval.elevation,
)
c2ws = torch.from_numpy(c2ws).to(self.renderer.device)
camera_info = self.dataset.get_default_camera_info()
eval_upsample = self.cfg.get("eval_upsample", 1)
camera_info.upsample(eval_upsample)
outs = []
use_bg = True
if self.renderer.bg.type == "random":
use_bg == False
with torch.no_grad():
for c2w in c2ws:
out = self.renderer.render_one(
c2w, camera_info, use_bg=use_bg, rgb_only=self.cfg.rgb_only
)
outs.append(dict_to_device(out, "cpu"))
outs = stack_dicts(outs)
if "depth" in outs.keys():
assert "opacity" in outs.keys()
outs["depth"] = apply_depth_colormap(outs["depth"], outs["opacity"])
outs["opacity"] = apply_float_colormap(outs["opacity"])
if "z_var" in out.keys():
outs["z_var"] = outs["z_var"] / outs["opacity"] * (outs["opacity"] > 0.5)
outs["z_var"] = apply_float_colormap(outs["z_var"] / outs["z_var"].max())
save_format = self.cfg.eval.save_format
assert save_format in ["gif", "mp4"]
final = torch.cat(list(outs.values()), dim=-2) # concat at W
imageio.mimwrite(
str(eval_video_path / f"{self.step}.{save_format}"),
(final.cpu().numpy() * 255).astype(np.uint8),
)
final = final.moveaxis(-1, -3)[None, ...] # THWC -> TCHW
self.writer.add_video(
"eval/spiral",
final,
)
self.train()
def train_loop(self):
self.train()
with tqdm(total=self.max_steps - self.start) as pbar:
for s in range(self.start, self.max_steps):
self.step = s
self.update(self.step)
self.guidance.log(self.writer, s)
self.dataset.log(self.writer, s)
loss = 0.0
for _ in range(self.cfg.grad_accum):
if self.mode == "text_to_3d":
loss += self.train_step()
elif self.mode == "image_to_3d":
loss += self.train_step_sit3d()
else:
raise NotImplementedError
self.aux_guidance_step()
self.auxiliary_loss_step()
# loss += self.renderer.auxiliary_loss(s, self.writer)
self.optimizer.step()
self.renderer.post_backward()
if step_check(s, self.cfg.log_period):
self.renderer.log(self.writer, s)
if step_check(s, self.cfg.eval.image_period):
self.eval_image_step()
if step_check(s, self.cfg.eval.video_period, True):
self.eval_video_step()
if step_check(s, self.cfg.save_period, True):
self.save()
## TODO: add densify code
self.renderer.densify(s)
self.renderer.prune(s)
self.optimizer.zero_grad()
pbar.set_description(f"{self.timestamp}|Iter: {s}/{self.max_steps}")
pbar.set_postfix(loss=f"{loss:.4f}")
pbar.update(1)
def train_step_sit3d(self):
self.train()
batch = next(self.loader)
out = self.renderer(batch, self.cfg.use_bg, self.cfg.rgb_only)
prompt_embeddings = self.prompt_processor()
guidance_out = self.guidance(
out["rgb"],
prompt_embeddings,
rgb_as_latents=False,
elevation=batch["elevation"],
azimuth=batch["azimuth"],
camera_distance=batch["camera_distance"],
image=self.image,
text=self.text_prompt,
)
is_original_view_mask = batch["is_original_view"]
loss = 0.0
num_original_views = torch.sum(is_original_view_mask).item()
bs = self.cfg.batch_size
# sds_loss
if "loss_sds" in guidance_out.keys():
loss += (
C(self.cfg.loss.sds, self.step, self.max_steps)
* guidance_out["loss_sds"]
)
self.writer.add_scalar("loss/sds", guidance_out["loss_sds"], self.step)
elif "loss_clip" in guidance_out.keys():
loss += (
C(self.cfg.loss.clip, self.step, self.max_steps)
* guidance_out["loss_clip"]
)
self.writer.add_scalar("loss/clip", guidance_out["loss_clip"], self.step)
else:
raise ValueError("No guidance loss is provided")
# image loss
_, h, w, _ = out["rgb"].shape
image = F.interpolate(
self.image.moveaxis(-1, 0)[None, ...],
(h, w),
mode="bilinear",
align_corners=False,
)[0].moveaxis(0, -1)
# print(self.depth_map.shape)
depth = F.interpolate(
self.depth_map[0].moveaxis(-1, 0)[None, ...],
(h, w),
mode="bilinear",
align_corners=False,
)[0].moveaxis(0, -1)
if num_original_views > 0:
image_loss = self.image_loss_fn(
out["rgb"][is_original_view_mask],
repeat(image, "h w c -> b h w c", b=num_original_views),
)
loss += C(self.cfg.loss.image, self.step, self.max_steps) * image_loss
self.writer.add_scalar("loss/image", image_loss, self.step)
depth_loss_val = depth_loss(
self.pearson,
out["depth"],
# repeat(self.depth_map, "h w c -> b h w c", b=num_original_views),
depth.repeat(num_original_views, 1, 1, 1),
)
loss += C(self.cfg.loss.depth, self.step, self.max_steps) * depth_loss_val
self.writer.add_scalar("loss/depth", depth_loss_val, self.step)
if num_original_views < bs:
loss += self.guidance.get_normal_clip_loss(
out["rgb"][~is_original_view_mask], self.image, self.text_prompt
) * C(self.cfg.loss.ref, self.step, self.max_steps)
self.writer.add_scalar("loss/total", loss, self.step)
# self.optimizer.zero_grad()
# loss += self.estimator_loss_step(out)
loss = loss / self.cfg.grad_accum
loss.backward()
# self.optimizer.step()
# self.renderer.post_backward()
with torch.no_grad():
if step_check(self.step, self.cfg.log_period, run_at_zero=True):
out = dict_to_device(out, "cpu")
train_image_pth = self.eval_dir / "train"
if not train_image_pth.exists():
train_image_pth.mkdir(parents=True, exist_ok=True)
if "depth" in out.keys():
assert "opacity" in out.keys()
out["depth"] = apply_depth_colormap(out["depth"], out["opacity"])
out["opacity"] = apply_float_colormap(out["opacity"])
if "z_var" in out.keys():
out["z_var"] = (
out["z_var"] / out["opacity"] * (out["opacity"] > 0.5)
)
out["z_var"] = apply_float_colormap(
out["z_var"] / out["z_var"].max()
)
final = (
torch.cat(list(out.values()), dim=-2).clamp(0, 1).cpu().numpy()
* 255.0
).astype(np.uint8)[-1]
imageio.imwrite(str(train_image_pth / f"{self.step}.png"), final)
self.writer.add_image(
"train/image", final, self.step, dataformats="HWC"
)
# return loss.item()
return loss.item()
def tune_with_upsample_model(self):
# total = self.t
seed_everything(42)
total = self.cfg.upsample_tune.num_poses
self.image_loss_fn = get_image_loss(0.2, "l2")
# self.image_loss_fn = F.mse_loss
bs = self.cfg.upsample_tune.batch_size
total = int(total / bs) * bs
self.dataset = CameraPoseProvider(self.cfg.data)
self.dataset.update(self.max_steps)
print(self.dataset.get_elevation_bound)
self.dataset.set_reso(64)
if self.cfg.upsample_tune.get("uniform", False):
console.print("[red]Using randomly sampled batch")
all_data = self.dataset.get_batch(total)
else:
console.print("[red]Using uniformly sampled batch")
all_data = self.dataset.get_uniform_batch(total)
upsampled_images = []
self.renderer.eval()
cache_uid = f"{self.cfg.prompt.prompt.replace(' ', '')}_{self.cfg.ckpt.replace('/', '')}_{self.cfg.upsample_tune.num_poses}"
cache_tmp_file = Path(f"./tmp/{cache_uid}.pt")
if not self.cfg.upsample_tune.use_cache or not cache_tmp_file.exists():
console.print(f"[green]no cache found, will save to {str(cache_tmp_file)}")
for i in range(0, total, bs):
batch = get_dict_slice(all_data, i, i + bs)
rgb = self.renderer(batch, rgb_only=True)["rgb"]
image_batch = self.guidance.upsample_images(
rgb=rgb,
prompt_embedding=self.prompt_processor(),
elevation=batch["elevation"],
azimuth=batch["azimuth"],
camera_distance=batch["camera_distance"],
)
upsampled_images.append(image_batch.cpu())
self.guidance.delete_upsample_model()
upsampled_images = torch.cat(upsampled_images, dim=0)
self.renderer.train()
torch.save(upsampled_images, cache_tmp_file)
else:
console.print("[green]load from cache")
upsampled_images = torch.load(cache_tmp_file, map_location="cpu")
reso = self.cfg.upsample_tune.reso
self.dataset.set_reso(reso) # actaully not used
for cam_info in all_data["camera_info"]:
cam_info.set_reso(reso)
epoch = self.cfg.upsample_tune.epoch
console.print(
f"Step: {self.step}, start tuning with upsampling model for {epoch} epoch"
)
self.update(self.cfg.max_steps)
if hasattr(self.cfg.upsample_tune, "lr"):
self.renderer.setup_lr(self.cfg.upsample_tune.lr)
self.renderer.set_optimizer(self.cfg.upsample_tune.optimizer)
if self.cfg.upsample_tune.get("densify", False):
num_densified = self.renderer.densify_by_compatness(3)
self.renderer.reset_densify_info()
console.print(f"[red]densify enabled, {num_densified} densified")
if self.cfg.upsample_tune.loss.sds == 0.0:
del self.guidance
max_steps = int(total / bs) * epoch
with tqdm(total=max_steps) as pbar:
for e in range(epoch):
for i in range(0, total, bs):
self.step = e * int(total / bs) + int(i / bs)
# self.update(self.step)
batch = get_dict_slice(all_data, i, i + bs)
out = self.renderer(batch)
if self.cfg.upsample_tune.loss.sds > 0.0:
guidance_out = self.guidance(
out["rgb"],
self.prompt_processor(),
rgb_as_latents=False,
elevation=batch["elevation"],
azimuth=batch["azimuth"],
camera_distance=batch["camera_distance"],
)
image_gt = upsampled_images[i : i + bs].to(self.cfg.device)
loss = 0.0
if self.cfg.upsample_tune.loss.sds > 0.0:
loss += (
self.cfg.upsample_tune.loss.sds * guidance_out["loss_sds"]
)
loss += self.cfg.upsample_tune.loss.rgb * self.image_loss_fn(
out["rgb"], image_gt
)
self.writer.add_image(
"train",
torch.cat([out["rgb"][0], image_gt[0]], dim=1),
self.step,
dataformats="HWC",
)
self.renderer.log(self.writer, self.step)
pbar.set_description(f"Upsample Tune|Iter: {self.step}/{max_steps}")
pbar.set_postfix(loss=f"{loss:.4f}")
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pbar.update(1)
del loss
del out
del image_gt
del batch
gc.collect()
torch.cuda.empty_cache()
self.eval_video_step()
| [
" "
] |
2024-01-10 | 10-zin/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | intelligencegear/ChatGPT-Proxy | proxy.py | """
Fetches cookies from chat.openai.com and returns them (Flask)
"""
import json
import tls_client
import uvicorn
from asgiref.wsgi import WsgiToAsgi
from flask import Flask
from flask import jsonify
from flask import request
from OpenAIAuth.Cloudflare import Cloudflare
from pyvirtualdisplay import Display
with open("./config.json", "r", encoding="utf-8") as f:
config = json.load(f)
PROXY = config['proxy']
virtualdisplay = config['virtualdisplay']
app = Flask(__name__)
disp = None
if virtualdisplay:
disp = Display()
session = tls_client.Session(client_identifier="chrome_108", )
if PROXY:
session.proxies.update(http=PROXY, https=PROXY)
authentication = {}
context = {"blocked": False}
# Get cloudflare cookies
(
authentication["cf_clearance"],
authentication["user_agent"],
) = Cloudflare(proxy=PROXY).get_cf_cookies()
@app.route("/<path:subpath>", methods=["POST", "GET"])
def conversation(subpath: str):
if request.headers.get("Authorization") is None:
return jsonify({"error": "Missing Authorization header"})
try:
if context.get("blocked"):
return jsonify({"error": "Blocking operation in progress"})
# Get cookies from request
cookies = {
"cf_clearance":
authentication["cf_clearance"],
"__Secure-next-auth.session-token":
request.cookies.get("__Secure-next-auth.session-token"),
}
# Set user agent
headers = {
"Accept": "text/event-stream",
"Authorization": request.headers.get("Authorization"),
"User-Agent": authentication["user_agent"],
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/" + "chat",
}
# Send request to OpenAI
if request.method == "POST":
response = session.post(
url="https://chat.openai.com/" + subpath,
headers=headers,
cookies=cookies,
data=json.dumps(request.get_json()),
timeout_seconds=360,
)
elif request.method == "GET":
response = session.get(
url="https://chat.openai.com/" + subpath,
headers=headers,
cookies=cookies,
timeout_seconds=360,
)
# Check status code
if response.status_code == 403:
# Get cf_clearance again
context["blocked"] = True
(
authentication["cf_clearance"],
authentication["user_agent"],
) = Cloudflare(proxy=PROXY).get_cf_cookies()
context["blocked"] = False
# return error
return jsonify({
"error":
"Cloudflare token expired. Please wait a few minutes while I refresh"
})
# Return response
return response.text
except Exception as exc:
return jsonify({"error": str(exc)})
if __name__ == "__main__":
# open a virtual display to do that!
uvicorn.run(
WsgiToAsgi(app),
host=config['server']['host'],
port=config['server']['port'],
server_header=False) # start a high-performance server with Uvicorn
if virtualdisplay and disp is not None:
disp.stop() | [] |
2024-01-10 | YahiDar/TransRadar | mvrss~utils~functions.py | """A lot of functions used in our pipelines"""
import json
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from mvrss.utils import MVRSS_HOME
from mvrss.losses.soft_dice import SoftDiceLoss
from mvrss.losses.coherence import CoherenceLoss
from mvrss.losses.MVLoss import MVLoss
from mvrss.loaders.dataloaders import Rescale, Flip, HFlip, VFlip
from mvrss.utils.ClassAgnosticLoss import CALoss
def get_class_weights(signal_type):
"""Load class weights for custom loss
PARAMETERS
----------
signal_type: str
Supported: 'range_doppler', 'range_angle'
RETURNS
-------
weights: numpy array
"""
weight_path = MVRSS_HOME / 'config_files'
if signal_type in ('range_angle'):
file_name = 'ra_weights.json'
elif signal_type in ('range_doppler'):
file_name = 'rd_weights.json'
else:
raise ValueError('Signal type {} is not supported.'.format(signal_type))
file_path = weight_path / file_name
with open(file_path, 'r') as fp:
weights = json.load(fp)
weights = np.array([weights['background'], weights['pedestrian'],
weights['cyclist'], weights['car']])
weights = torch.from_numpy(weights)
return weights
def transform_masks_viz(masks, nb_classes):
"""Used for visualization"""
masks = masks.unsqueeze(1)
masks = (masks.float()/nb_classes)
return masks
def get_metrics(metrics, loss, losses=None):
"""Structure the metric results
PARAMETERS
----------
metrics: object
Contains statistics recorded during inference
loss: tensor
Loss value
losses: list
List of loss values
RETURNS
-------
metrics_values: dict
"""
metrics_values = dict()
metrics_values['loss'] = loss.item()
if isinstance(losses, list):
metrics_values['loss_ce'] = losses[0].item()
metrics_values['loss_dice'] = losses[1].item()
acc, acc_by_class = metrics.get_pixel_acc_class() # harmonic_mean=True)
prec, prec_by_class = metrics.get_pixel_prec_class()
recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True)
miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True)
dice, dice_by_class = metrics.get_dice_class()
metrics_values['acc'] = acc
metrics_values['acc_by_class'] = acc_by_class.tolist()
metrics_values['prec'] = prec
metrics_values['prec_by_class'] = prec_by_class.tolist()
metrics_values['recall'] = recall
metrics_values['recall_by_class'] = recall_by_class.tolist()
metrics_values['miou'] = miou
metrics_values['miou_by_class'] = miou_by_class.tolist()
metrics_values['dice'] = dice
metrics_values['dice_by_class'] = dice_by_class.tolist()
return metrics_values
def normalize(data, signal_type, norm_type='local'):
"""
Method to normalise the radar views
PARAMETERS
----------
data: numpy array
Radar view (batch)
signal_type: str
Type of radar view
Supported: 'range_doppler', 'range_angle' and 'angle_doppler'
norm_type: str
Type of normalisation to apply
Supported: 'local', 'tvt'
RETURNS
-------
norm_data: numpy array
normalised radar view
"""
if norm_type in ('local'):
min_value = torch.min(data)
max_value = torch.max(data)
norm_data = torch.div(torch.sub(data, min_value), torch.sub(max_value, min_value))
return norm_data
elif signal_type == 'range_doppler':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'rd_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
rd_stats = json.load(fp)
min_value = torch.tensor(rd_stats['min_val'])
max_value = torch.tensor(rd_stats['max_val'])
elif signal_type == 'range_angle':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'ra_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
ra_stats = json.load(fp)
min_value = torch.tensor(ra_stats['min_val'])
max_value = torch.tensor(ra_stats['max_val'])
elif signal_type == 'angle_doppler':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'ad_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
ad_stats = json.load(fp)
min_value = torch.tensor(ad_stats['min_val'])
max_value = torch.tensor(ad_stats['max_val'])
else:
raise TypeError('Signal {} is not supported.'.format(signal_type))
norm_data = torch.div(torch.sub(data, min_value),
torch.sub(max_value, min_value))
return norm_data
def define_loss(signal_type, custom_loss, device, delta = 0.6, loss_weight = 1., dice_weight = 10., coherence_weight = 5.):
"""
Method to define the loss to use during training
PARAMETERS
----------
signal_type: str
Type of radar view
Supported: 'range_doppler', 'range_angle' or 'angle_doppler'
custom loss: str
Short name of the custom loss to use
Supported: 'wce', 'sdice', 'wce_w10sdice' or 'wce_w10sdice_w5col'
Default: Cross Entropy is used for any other str
devide: str
Supported: 'cuda' or 'cpu'
"""
if custom_loss == 'wce':
weights = get_class_weights(signal_type)
loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
elif custom_loss == 'sdice':
loss = SoftDiceLoss()
elif custom_loss == 'wce_w10sdice':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss, SoftDiceLoss(global_weight=dice_weight)]
elif custom_loss == 'wce_w10sdice_w5col':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss, SoftDiceLoss(global_weight=dice_weight), CoherenceLoss(global_weight=coherence_weight)]
elif custom_loss == 'CAObjectLoss':
weights = get_class_weights(signal_type)
ce_loss = CALoss(global_weight=loss_weight, delta= delta, device = device)
loss = [ce_loss, SoftDiceLoss(global_weight=dice_weight), MVLoss(global_weight=coherence_weight)]
else:
loss = nn.CrossEntropyLoss()
return loss
def get_transformations(transform_names, split='train', sizes=None):
"""Create a list of functions used for preprocessing
PARAMETERS
----------
transform_names: list
List of str, one for each transformation
split: str
Split currently used
sizes: int or tuple (optional)
Used for rescaling
Default: None
"""
transformations = list()
if 'rescale' in transform_names:
transformations.append(Rescale(sizes))
if 'flip' in transform_names and split == 'train':
transformations.append(Flip(0.5))
if 'vflip' in transform_names and split == 'train':
transformations.append(VFlip())
if 'hflip' in transform_names and split == 'train':
transformations.append(HFlip())
return transformations
def mask_to_img(mask):
"""Generate colors per class, only 3 classes are supported"""
mask_img = np.zeros((mask.shape[0],
mask.shape[1], 3), dtype=np.uint8)
mask_img[mask == 1] = [255, 0, 0]
mask_img[mask == 2] = [0, 255, 0]
mask_img[mask == 3] = [0, 0, 255]
mask_img = Image.fromarray(mask_img)
return mask_img
def get_qualitatives(outputs, masks, paths, seq_name, quali_iter, signal_type=None):
"""
Method to get qualitative results
PARAMETERS
----------
outputs: torch tensor
Predicted masks
masks: torch tensor
Ground truth masks
paths: dict
seq_name: str
quali_iter: int
Current iteration on the dataset
signal_type: str
RETURNS
-------
quali_iter: int
"""
if signal_type:
folder_path = paths['logs'] / signal_type / seq_name[0]
else:
folder_path = paths['logs'] / seq_name[0]
folder_path.mkdir(parents=True, exist_ok=True)
outputs = torch.argmax(outputs, axis=1).cpu().numpy()
masks = torch.argmax(masks, axis=1).cpu().numpy()
for i in range(outputs.shape[0]):
mask_img = mask_to_img(masks[i])
mask_path = folder_path / 'mask_{}.png'.format(quali_iter)
mask_img.save(mask_path)
output_img = mask_to_img(outputs[i])
output_path = folder_path / 'output_{}.png'.format(quali_iter)
output_img.save(output_path)
quali_iter += 1
return quali_iter
def count_params(model):
"""Count trainable parameters of a PyTorch Model"""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
nb_params = sum([np.prod(p.size()) for p in model_parameters])
return nb_params
| [] |
2024-01-10 | yanqingjun/llama_index | llama_index~tools~function_tool.py | from typing import Any, Optional, Callable
from llama_index.tools.types import BaseTool, ToolMetadata
from langchain.tools import Tool, StructuredTool
from inspect import signature
DEFAULT_NAME = "Function Tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a function and getting back a response.
"""
class FunctionTool(BaseTool):
"""Function Tool.
A tool that takes in a function.
"""
def __init__(
self,
fn: Callable[..., Any],
metadata: ToolMetadata,
) -> None:
self._fn = fn
self._metadata = metadata
@classmethod
def from_defaults(
cls,
fn: Callable[..., Any],
name: Optional[str] = None,
description: Optional[str] = None,
) -> "FunctionTool":
name = name or fn.__name__
docstring = fn.__doc__
description = description or f"{name}{signature(fn)}\n{docstring}"
metadata = ToolMetadata(name=name, description=description)
return cls(fn=fn, metadata=metadata)
@property
def metadata(self) -> ToolMetadata:
"""Metadata."""
return self._metadata
@property
def fn(self) -> Callable[..., Any]:
"""Function."""
return self._fn
def __call__(self, *args: Any, **kwargs: Any) -> None:
"""Call."""
return self._fn(*args, **kwargs)
def to_langchain_tool(
self,
**langchain_tool_kwargs: Any,
) -> Tool:
"""To langchain tool."""
return Tool.from_function(
fn=self.fn,
name=self.metadata.name or "",
description=self.metadata.description,
**langchain_tool_kwargs,
)
def to_langchain_structured_tool(
self,
**langchain_tool_kwargs: Any,
) -> StructuredTool:
"""To langchain structured tool."""
return StructuredTool.from_function(
fn=self.fn,
name=self.metadata.name,
description=self.metadata.description,
**langchain_tool_kwargs,
)
| [] |
2024-01-10 | yanqingjun/llama_index | llama_index~query_engine~sql_vector_query_engine.py | """SQL Vector query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Any, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import GPTNLStructStoreQueryEngine
from llama_index.indices.vector_store.retrievers.auto_retriever import (
VectorIndexAutoRetriever,
)
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been translated into a vector store query.
The vector store query and response is given below.
Given SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed vector store query: {vector_store_query_str}
Vector store response: {vector_store_response_str}
Response:
""" # noqa
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLAutoVectorQueryEngine(BaseQueryEngine):
"""SQL + Vector Index Auto Retriever Query Engine.
This query engine can query both a SQL database
as well as a vector database. It will first decide
whether it needs to query the SQL database or vector store.
If it decides to query the SQL database, it will also decide
whether to augment information with retrieved results from the vector store.
We use the VectorIndexAutoRetriever to retrieve results.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
vector_query_tool (QueryEngineTool): Query engine tool for vector database.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_vector_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL vector
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_vector_synthesis (bool): Whether to use SQL vector synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
vector_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_vector_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_vector_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, GPTNLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"GPTNLStructStoreQueryEngine"
)
if not isinstance(vector_query_tool.query_engine, RetrieverQueryEngine):
raise ValueError(
"vector_query_tool.query_engine must be an instance of "
"RetrieverQueryEngine"
)
if not isinstance(
vector_query_tool.query_engine.retriever, VectorIndexAutoRetriever
):
raise ValueError(
"vector_query_tool.query_engine.retriever must be an instance "
"of VectorIndexAutoRetriever"
)
self._sql_query_tool = sql_query_tool
self._vector_query_tool = vector_query_tool
sql_query_engine = cast(
GPTNLStructStoreQueryEngine, sql_query_tool.query_engine
)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_vector_synthesis_prompt = (
sql_vector_synthesis_prompt or DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_vector_synthesis = use_sql_vector_synthesis
self._verbose = verbose
@classmethod
def from_sql_and_vector_query_engines(
cls,
sql_query_engine: GPTNLStructStoreQueryEngine,
sql_tool_name: str,
sql_tool_description: str,
vector_auto_retriever: RetrieverQueryEngine,
vector_tool_name: str,
vector_tool_description: str,
selector: Optional[LLMSingleSelector] = None,
**kwargs: Any,
) -> "SQLAutoVectorQueryEngine":
"""From SQL and vector query engines.
Args:
sql_query_engine (GPTNLStructStoreQueryEngine): SQL query engine.
vector_query_engine (VectorIndexAutoRetriever): Vector retriever.
selector (Optional[LLMSingleSelector]): Selector to use.
"""
sql_query_tool = QueryEngineTool.from_defaults(
sql_query_engine, name=sql_tool_name, description=sql_tool_description
)
vector_query_tool = QueryEngineTool.from_defaults(
vector_auto_retriever,
name=vector_tool_name,
description=vector_tool_description,
)
return cls(sql_query_tool, vector_query_tool, selector, **kwargs)
def _query_sql_vector(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + vector db in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_vector_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
vector_response = self._vector_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"Vector DB response: {vector_response}\n", color="pink")
logger.info(f"> Vector DB response: {vector_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_vector_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
vector_store_query_str=new_query.query_str,
vector_store_response_str=str(vector_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(vector_response.extra_info or {}),
}
source_nodes = vector_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._vector_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_vector(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(f"Querying vector database: {result.reason}\n", color="blue")
logger.info(f"> Querying vector database: {result.reason}")
response = self._vector_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Vector DB response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been translated into a vector store query.\nThe vector store query and response is given below.\nGiven SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed vector store query: {vector_store_query_str}\nVector store response: {vector_store_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | a046829713/Crypto_trading | DQN~ptan~experience.py | import gym
import torch
import random
import collections
from torch.autograd import Variable
import time
import numpy as np
from collections import namedtuple, deque
import time
from .agent import BaseAgent
from .common import utils
# one single experience step
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'done'])
class ExperienceSource:
"""
Simple n-step experience source using single or multiple environments
Every experience contains n list of Experience entries
"""
def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False):
"""
Create simple experience source
:param env: environment or list of environments to be used
:param agent: callable to convert batch of states into actions to take
:param steps_count: count of steps to track for every experience chain
:param steps_delta: how many steps to do between experience items
:param vectorized: support of vectorized envs from OpenAI universe
"""
assert isinstance(env, (gym.Env, list, tuple))
assert isinstance(agent, BaseAgent)
assert isinstance(steps_count, int)
assert steps_count >= 1
assert isinstance(vectorized, bool)
if isinstance(env, (list, tuple)):
self.pool = env
else:
self.pool = [env]
self.agent = agent # <DQN.ptan.agent.DQNAgent object at 0x0000016C63B05D60>
self.steps_count = steps_count # 3
self.steps_delta = steps_delta # 1
self.total_rewards = []
self.total_steps = []
self.vectorized = vectorized # False
def __iter__(self):
print("ExperienceSource iter 測試進入次數:",)
states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []
env_lens = []
for env in self.pool:
obs = env.reset()
# if the environment is vectorized, all it's output is lists of results.
# Details are here: https://github.com/openai/universe/blob/master/doc/env_semantics.rst
if self.vectorized:
obs_len = len(obs)
states.extend(obs)
else:
obs_len = 1
states.append(obs)
env_lens.append(obs_len)
for _ in range(obs_len):
histories.append(deque(maxlen=self.steps_count))
cur_rewards.append(0.0)
cur_steps.append(0)
agent_states.append(self.agent.initial_state())
# states # 隨機狀態
# agent_states # [None]
# histories # [deque([], maxlen=3)]
# cur_rewards # [0.0]
# cur_steps # [0]
iter_idx = 0
while True:
actions = [None] * len(states) # [None]
states_input = []
states_indices = []
for idx, state in enumerate(states):
if state is None:
actions[idx] = self.pool[0].action_space.sample() # assume that all envs are from the same family
else:
states_input.append(state) # 狀態
states_indices.append(idx) # 索引
if states_input:
# 會吐出動作和新狀態[2] [None] # 不過原作者這邊好似沒有使用到agent_states
states_actions, new_agent_states = self.agent(states_input, agent_states)
for idx, action in enumerate(states_actions):
g_idx = states_indices[idx]
actions[g_idx] = action
agent_states[g_idx] = new_agent_states[idx]
# [[2]]
grouped_actions = _group_list(actions, env_lens)
global_ofs = 0
for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):
# 0 (<TimeLimit<StocksEnv instance>>, [2])
if self.vectorized:
next_state_n, r_n, is_done_n, _ = env.step(action_n)
else:
next_state, r, is_done, _ = env.step(action_n[0])
next_state_n, r_n, is_done_n = [next_state], [r], [is_done]
for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):
idx = global_ofs + ofs
state = states[idx]
history = histories[idx]
cur_rewards[idx] += r
print(cur_rewards)
print('*'*120)
cur_steps[idx] += 1
if state is not None:
history.append(Experience(state=state, action=action, reward=r, done=is_done))
if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:
yield tuple(history)
states[idx] = next_state
if is_done:
# generate tail of history
while len(history) >= 1:
yield tuple(history)
history.popleft()
self.total_rewards.append(cur_rewards[idx])
self.total_steps.append(cur_steps[idx])
cur_rewards[idx] = 0.0
cur_steps[idx] = 0
# vectorized envs are reset automatically
states[idx] = env.reset() if not self.vectorized else None
agent_states[idx] = self.agent.initial_state()
history.clear()
global_ofs += len(action_n)
iter_idx += 1
def pop_total_rewards(self):
r = self.total_rewards
if r:
self.total_rewards = []
self.total_steps = []
return r
def pop_rewards_steps(self):
res = list(zip(self.total_rewards, self.total_steps))
if res:
self.total_rewards, self.total_steps = [], []
return res
def _group_list(items, lens):
"""
Unflat the list of items by lens
:param items: list of items
:param lens: list of integers
:return: list of list of items grouped by lengths
"""
res = []
cur_ofs = 0
for g_len in lens:
res.append(items[cur_ofs:cur_ofs+g_len])
cur_ofs += g_len
return res
# those entries are emitted from ExperienceSourceFirstLast. Reward is discounted over the trajectory piece
ExperienceFirstLast = collections.namedtuple('ExperienceFirstLast', ('state', 'action', 'reward', 'last_state'))
class ExperienceSourceFirstLast(ExperienceSource):
"""
This is a wrapper around ExperienceSource to prevent storing full trajectory in replay buffer when we need
only first and last states. For every trajectory piece it calculates discounted reward and emits only first
and last states and action taken in the first state.
If we have partial trajectory at the end of episode, last_state will be None
這是一個圍繞ExperienceSource的包裝器(wrapper),
用於在我們只需要初始和最終狀態時,防止在重播緩衝區(replay buffer)
中儲存完整的軌跡。對於每一個軌跡片段,它會計算折扣獎勵,並且只輸出第一個和最後一個狀態,以及在初始狀態中採取的行動。
如果在劇集結束時我們有部分軌跡,那麼last_state將為None。
"""
def __init__(self, env, agent, gamma, steps_count=1, steps_delta=1, vectorized=False):
assert isinstance(gamma, float)
super(ExperienceSourceFirstLast, self).__init__(env, agent, steps_count+1, steps_delta, vectorized=vectorized)
self.gamma = gamma
self.steps = steps_count
def __iter__(self):
for exp in super(ExperienceSourceFirstLast, self).__iter__():
if exp[-1].done and len(exp) <= self.steps:
last_state = None
elems = exp
else:
last_state = exp[-1].state
elems = exp[:-1]
total_reward = 0.0
for e in reversed(elems):
total_reward *= self.gamma
total_reward += e.reward
yield ExperienceFirstLast(state=exp[0].state, action=exp[0].action,
reward=total_reward, last_state=last_state)
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done)
discounted.append(r)
return discounted[::-1]
class ExperienceSourceRollouts:
"""
N-step rollout experience source following A3C rollouts scheme. Have to be used with agent,
keeping the value in its state (for example, agent.ActorCriticAgent).
Yields batches of num_envs * n_steps samples with the following arrays:
1. observations
2. actions
3. discounted rewards, with values approximation
4. values
"""
def __init__(self, env, agent, gamma, steps_count=5):
"""
Constructs the rollout experience source
:param env: environment or list of environments to be used
:param agent: callable to convert batch of states into actions
:param steps_count: how many steps to perform rollouts
"""
assert isinstance(env, (gym.Env, list, tuple))
assert isinstance(agent, BaseAgent)
assert isinstance(gamma, float)
assert isinstance(steps_count, int)
assert steps_count >= 1
if isinstance(env, (list, tuple)):
self.pool = env
else:
self.pool = [env]
self.agent = agent
self.gamma = gamma
self.steps_count = steps_count
self.total_rewards = []
self.total_steps = []
def __iter__(self):
pool_size = len(self.pool)
states = [np.array(e.reset()) for e in self.pool]
mb_states = np.zeros((pool_size, self.steps_count) + states[0].shape, dtype=states[0].dtype)
mb_rewards = np.zeros((pool_size, self.steps_count), dtype=np.float32)
mb_values = np.zeros((pool_size, self.steps_count), dtype=np.float32)
mb_actions = np.zeros((pool_size, self.steps_count), dtype=np.int64)
mb_dones = np.zeros((pool_size, self.steps_count), dtype=np.bool)
total_rewards = [0.0] * pool_size
total_steps = [0] * pool_size
agent_states = None
step_idx = 0
while True:
actions, agent_states = self.agent(states, agent_states)
rewards = []
dones = []
new_states = []
for env_idx, (e, action) in enumerate(zip(self.pool, actions)):
o, r, done, _ = e.step(action)
total_rewards[env_idx] += r
total_steps[env_idx] += 1
if done:
o = e.reset()
self.total_rewards.append(total_rewards[env_idx])
self.total_steps.append(total_steps[env_idx])
total_rewards[env_idx] = 0.0
total_steps[env_idx] = 0
new_states.append(np.array(o))
dones.append(done)
rewards.append(r)
# we need an extra step to get values approximation for rollouts
if step_idx == self.steps_count:
# calculate rollout rewards
for env_idx, (env_rewards, env_dones, last_value) in enumerate(zip(mb_rewards, mb_dones, agent_states)):
env_rewards = env_rewards.tolist()
env_dones = env_dones.tolist()
if not env_dones[-1]:
env_rewards = discount_with_dones(env_rewards + [last_value], env_dones + [False], self.gamma)[:-1]
else:
env_rewards = discount_with_dones(env_rewards, env_dones, self.gamma)
mb_rewards[env_idx] = env_rewards
yield mb_states.reshape((-1,) + mb_states.shape[2:]), mb_rewards.flatten(), mb_actions.flatten(), mb_values.flatten()
step_idx = 0
mb_states[:, step_idx] = states
mb_rewards[:, step_idx] = rewards
mb_values[:, step_idx] = agent_states
mb_actions[:, step_idx] = actions
mb_dones[:, step_idx] = dones
step_idx += 1
states = new_states
def pop_total_rewards(self):
r = self.total_rewards
if r:
self.total_rewards = []
self.total_steps = []
return r
def pop_rewards_steps(self):
res = list(zip(self.total_rewards, self.total_steps))
if res:
self.total_rewards, self.total_steps = [], []
return res
class ExperienceSourceBuffer:
"""
The same as ExperienceSource, but takes episodes from the buffer
"""
def __init__(self, buffer, steps_count=1):
"""
Create buffered experience source
:param buffer: list of episodes, each is a list of Experience object
:param steps_count: count of steps in every entry
"""
self.update_buffer(buffer)
self.steps_count = steps_count
def update_buffer(self, buffer):
self.buffer = buffer
self.lens = list(map(len, buffer))
def __iter__(self):
"""
Infinitely sample episode from the buffer and then sample item offset
"""
while True:
episode = random.randrange(len(self.buffer))
ofs = random.randrange(self.lens[episode] - self.steps_count - 1)
yield self.buffer[episode][ofs:ofs+self.steps_count]
class ExperienceReplayBuffer:
def __init__(self, experience_source, buffer_size):
assert isinstance(experience_source, (ExperienceSource, type(None)))
assert isinstance(buffer_size, int)
self.experience_source_iter = None if experience_source is None else iter(experience_source)
self.buffer = []
self.capacity = buffer_size
self.pos = 0
def __len__(self):
return len(self.buffer)
def __iter__(self):
return iter(self.buffer)
def sample(self, batch_size):
"""
Get one random batch from experience replay
TODO: implement(實施) sampling order policy
:param batch_size:
:return:
"""
if len(self.buffer) <= batch_size:
return self.buffer
# Warning: replace=False makes random.choice O(n)
keys = np.random.choice(len(self.buffer), batch_size, replace=True)
return [self.buffer[key] for key in keys]
def _add(self, sample):
"""
將跌代的資料帶入
萬一超過就覆寫
Args:
sample (_type_): _description_
"""
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.pos = (self.pos + 1) % self.capacity
def populate(self, samples):
"""
將樣本填入緩衝區中
Populates samples into the buffer
:param samples: how many samples to populate
<class 'ptan.experience.ExperienceFirstLast'>
entry: ExperienceFirstLast(state=array([ 0.00773994, -0.01083591, 0.00773994, 0.00456621, -0.01065449,
0.00456621, 0.00607903, -0.00455927, 0.00455927, 0. ,
-0.01783061, -0.00148588, 0.00437956, -0.01021898, -0.00291971,
0.00442478, -0.02359882, -0.02359882, 0.01226994, -0.00153374,
0.00306748, 0.01076923, -0.00615385, 0.00153846, 0.00310559,
-0.01086957, -0.00465839, 0.02503912, -0.00312989, 0.02190923,
0. , 0. ], dtype=float32), action=1, reward=-2.7099031710120034, last_state=array([ 0.00607903, -0.00455927, 0.00455927, 0. , -0.01783061,
-0.00148588, 0.00437956, -0.01021898, -0.00291971, 0.00442478,
-0.02359882, -0.02359882, 0.01226994, -0.00153374, 0.00306748,
0.01076923, -0.00615385, 0.00153846, 0.00310559, -0.01086957,
-0.00465839, 0.02503912, -0.00312989, 0.02190923, 0.00311042,
-0.00777605, -0.00311042, 0.00944882, 0. , 0.0015748 ,
1. , -0.02603369], dtype=float32))
"""
for _ in range(samples):
entry = next(self.experience_source_iter)
self._add(entry)
class PrioReplayBufferNaive:
def __init__(self, exp_source, buf_size, prob_alpha=0.6):
self.exp_source_iter = iter(exp_source)
self.prob_alpha = prob_alpha
self.capacity = buf_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buf_size, ), dtype=np.float32)
def __len__(self):
return len(self.buffer)
def populate(self, count):
max_prio = self.priorities.max() if self.buffer else 1.0
for _ in range(count):
sample = next(self.exp_source_iter)
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.priorities[self.pos] = max_prio
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.buffer) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
probs = np.array(prios, dtype=np.float32) ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=True)
samples = [self.buffer[idx] for idx in indices]
total = len(self.buffer)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
class PrioritizedReplayBuffer(ExperienceReplayBuffer):
def __init__(self, experience_source, buffer_size, alpha):
super(PrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < buffer_size:
it_capacity *= 2
self._it_sum = utils.SumSegmentTree(it_capacity)
self._it_min = utils.MinSegmentTree(it_capacity)
self._max_priority = 1.0
def _add(self, *args, **kwargs):
idx = self.pos
super()._add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
mass = random.random() * self._it_sum.sum(0, len(self) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights, dtype=np.float32)
samples = [self.buffer[idx] for idx in idxes]
return samples, idxes, weights
def update_priorities(self, idxes, priorities):
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class BatchPreprocessor:
"""
Abstract preprocessor class descendants to which converts experience
batch to form suitable to learning.
"""
def preprocess(self, batch):
raise NotImplementedError
class QLearningPreprocessor(BatchPreprocessor):
"""
Supports SimpleDQN, TargetDQN, DoubleDQN and can additionally feed TD-error back to
experience replay buffer.
To use different modes, use appropriate class method
"""
def __init__(self, model, target_model, use_double_dqn=False, batch_td_error_hook=None, gamma=0.99, device="cpu"):
self.model = model
self.target_model = target_model
self.use_double_dqn = use_double_dqn
self.batch_dt_error_hook = batch_td_error_hook
self.gamma = gamma
self.device = device
@staticmethod
def simple_dqn(model, **kwargs):
return QLearningPreprocessor(model=model, target_model=None, use_double_dqn=False, **kwargs)
@staticmethod
def target_dqn(model, target_model, **kwards):
return QLearningPreprocessor(model, target_model, use_double_dqn=False, **kwards)
@staticmethod
def double_dqn(model, target_model, **kwargs):
return QLearningPreprocessor(model, target_model, use_double_dqn=True, **kwargs)
def _calc_Q(self, states_first, states_last):
"""
Calculates apropriate q values for first and last states. Way of calculate depends on our settings.
:param states_first: numpy array of first states
:param states_last: numpy array of last states
:return: tuple of numpy arrays of q values
"""
# here we need both first and last values calculated using our main model, so we
# combine both states into one batch for efficiency and separate results later
if self.target_model is None or self.use_double_dqn:
states_t = torch.tensor(np.concatenate((states_first, states_last), axis=0)).to(self.device)
res_both = self.model(states_t).data.cpu().numpy()
return res_both[:len(states_first)], res_both[len(states_first):]
# in this case we have target_model set and use_double_dqn==False
# so, we should calculate first_q and last_q using different models
states_first_v = torch.tensor(states_first).to(self.device)
states_last_v = torch.tensor(states_last).to(self.device)
q_first = self.model(states_first_v).data
q_last = self.target_model(states_last_v).data
return q_first.cpu().numpy(), q_last.cpu().numpy()
def _calc_target_rewards(self, states_last, q_last):
"""
Calculate rewards from final states according to variants from our construction:
1. simple DQN: max(Q(states, model))
2. target DQN: max(Q(states, target_model))
3. double DQN: Q(states, target_model)[argmax(Q(states, model)]
:param states_last: numpy array of last states from the games
:param q_last: numpy array of last q values
:return: vector of target rewards
"""
# in this case we handle both simple DQN and target DQN
if self.target_model is None or not self.use_double_dqn:
return q_last.max(axis=1)
# here we have target_model set and use_double_dqn==True
actions = q_last.argmax(axis=1)
# calculate Q values using target net
states_last_v = torch.tensor(states_last).to(self.device)
q_last_target = self.target_model(states_last_v).data.cpu().numpy()
return q_last_target[range(q_last_target.shape[0]), actions]
def preprocess(self, batch):
"""
Calculates data for Q learning from batch of observations
:param batch: list of lists of Experience objects
:return: tuple of numpy arrays:
1. states -- observations
2. target Q-values
3. vector of td errors for every batch entry
"""
# first and last states for every entry
state_0 = np.array([exp[0].state for exp in batch], dtype=np.float32)
state_L = np.array([exp[-1].state for exp in batch], dtype=np.float32)
q0, qL = self._calc_Q(state_0, state_L)
rewards = self._calc_target_rewards(state_L, qL)
td = np.zeros(shape=(len(batch),))
for idx, (total_reward, exps) in enumerate(zip(rewards, batch)):
# game is done, no final reward
if exps[-1].done:
total_reward = 0.0
for exp in reversed(exps[:-1]):
total_reward *= self.gamma
total_reward += exp.reward
# update total reward and calculate td error
act = exps[0].action
td[idx] = q0[idx][act] - total_reward
q0[idx][act] = total_reward
return state_0, q0, td
| [] |
2024-01-10 | Ricky-Zhu/domain-randomizer | randomizer~safe_env~pendulum~safe_pendulum.py | import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from typing import Callable, List, Dict, Tuple
import torch
from os import path
from randomizer.safe_env.utils import angle_normalize, Array
from randomizer.safe_env.wrappers.saute_env import saute_env
from randomizer.safe_env.wrappers.safe_env import SafeEnv
class PendulumSwingUpParams:
"""Params for the system dynamics"""
g = 10.
m = 1.
l = 1.
dt = .05
max_speed = 8.
max_torque = 2.
theta_penalty = 1.
theta_dot_penalty = .1
action_penalty = 0.001 # .001
reward_offset = np.ceil(
theta_penalty * np.pi ** 2 + theta_dot_penalty * max_speed ** 2 + action_penalty * max_torque ** 2)
# reward_bias = reward_offset
unsafe_min = np.pi * (20. / 180)
unsafe_max = np.pi * (30. / 180)
hazard_area_size = np.pi * (1. / 4)
n_constraints = 1
def __str__(self):
_dyn_params = {'g': self.g, 'm': self.m, 'l': self.l, 'dt': self.dt}
_state_lims = {'max_speed': self.max_speed, 'max_torque': self.max_torque}
_reward_params = {'theta_penalty': self.theta_penalty, 'theta_dot_penalty': self.theta_dot_penalty,
'action_penalty': self.action_penalty}
_safety_params = {'unsafe_min': self.unsafe_min, 'unsafe_max': self.unsafe_max,
'hazard_area_size': self.hazard_area_size, 'n_constraints': self.n_constraints}
return {"Dynamics parameters": _dyn_params, "State Limits": _state_lims, "Reward Parameters": _reward_params,
'Safety Parameters': _safety_params}.__str__()
pendulum_cfg = {
'action_dim': 1, # are used
'action_range': [-1, 1], # are used
'unsafe_reward': 0.,
'saute_discount_factor': 1.0,
'max_ep_len': 200,
'min_rel_budget': 1.0,
'max_rel_budget': 1.0,
'test_rel_budget': 1.0,
'use_reward_shaping': True,
'use_state_augmentation': True
}
class PendulumEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
# Used for labels when plotting.
obs_labels = [
r'$\cos(\theta)$',
r'$\sin(\theta)$',
r'$\partial \theta$',
]
def __init__(
self,
params: Callable = None,
mode: str = "train"):
self.viewer = None
if params is None:
params = PendulumSwingUpParams()
self.params = params
self.obs_high = np.array([1., 1., self.params.max_speed], dtype=np.float32)
self.observation_space = spaces.Box(low=-self.obs_high, high=self.obs_high)
action_high = np.float32(self.params.max_torque)
self.action_space = spaces.Box(low=-action_high, high=action_high, shape=(1,))
assert mode == "train" or mode == "test" or mode == "deterministic", "mode can be determinstic, test or train"
self._mode = mode
self.seed()
def seed(self, seed: int = None) -> List[int]:
self.np_random, seed = seeding.np_random(seed)
return [seed]
def do_simulation(self, u: np.ndarray):
"""One step simulation of dynamics on the single pendulum"""
th, thdot = self.state # th := theta
dt = self.params.dt
u = self.params.max_torque * u
u = np.clip(u.squeeze(), -self.params.max_torque, self.params.max_torque)
self.last_u = u # for rendering
newthdot = thdot + (-3 * self.params.g / (2 * self.params.l) * np.sin(th + np.pi) + 3. / (
self.params.m * self.params.l ** 2) * u) * dt
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.params.max_speed, self.params.max_speed) # pylint: disable=E1111
self.state = np.array([newth, newthdot])
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict]:
obs = self._get_obs()
self.do_simulation(action) # bug fix do simulations with numpy actions not torch
next_obs = self._get_obs()
reward = self._reward_fn(obs, action, next_obs, is_tensor=False)
done = self._termination_fn(obs, action, next_obs, is_tensor=False)
info = dict()
return next_obs, reward, done, info
def reset(self) -> np.ndarray:
if self._mode == "train":
high = np.array([np.pi, 1], dtype=np.float32)
self.state = self.np_random.uniform(low=-high, high=high)
elif self._mode == "test":
high = np.array([0.2, 0.1], dtype=np.float32)
low = np.array([-0.2, -0.1], dtype=np.float32)
self.state = np.array([np.pi, 0], dtype=np.float32) + self.np_random.uniform(low=low, high=high)
elif self._mode == "deterministic":
self.state = np.array([np.pi, 0], dtype=np.float32)
else:
raise NotImplementedError
self.last_u = None
return self._get_obs()
def _get_obs(self) -> np.ndarray:
theta, thetadot = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot])
def _reward_fn(self, states: Array, actions: Array, next_states: Array, is_tensor: bool = True) -> Array:
"""Compute rewards in batch if needed
Mostly copied from openAI gym Pendulum-v0 and ported into torch.
https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py """
actions = self.params.max_torque * actions
cos_th, sin_th, thdot = states[..., 0], states[..., 1], states[..., 2]
if is_tensor:
assert type(states) is torch.Tensor and type(next_states) is torch.Tensor and type(
actions) is torch.Tensor, "Arguments must be torch.Tensor"
th = torch.atan2(sin_th, cos_th)
th_norm = angle_normalize(th, is_tensor=True)
action_squared = actions.clamp(-self.params.max_torque, self.params.max_torque)
costs = self.params.theta_penalty * th_norm ** 2 + self.params.theta_dot_penalty * thdot ** 2 + self.params.action_penalty * action_squared.squeeze() ** 2
reward = (-costs + self.params.reward_offset) / self.params.reward_offset
return reward.view(-1, 1)
else:
assert type(states) is np.ndarray and type(next_states) is np.ndarray and type(
actions) is np.ndarray, "Arguments must be np.ndarray"
th = np.arctan2(sin_th, cos_th)
th_norm = angle_normalize(th, is_tensor=False)
action_squared = np.clip(actions, -self.params.max_torque, self.params.max_torque)
costs = self.params.theta_penalty * th_norm ** 2 + self.params.theta_dot_penalty * thdot ** 2 + self.params.action_penalty * action_squared.squeeze() ** 2
reward = (-costs + self.params.reward_offset) / self.params.reward_offset
return reward
def reward_fn(self, states: Array, actions: Array, next_states: Array) -> Array:
"""Compute rewards in batch if needed"""
return self._reward_fn(states, actions, next_states, is_tensor=True)
def _termination_fn(self, states: Array, actions: Array, next_states: Array, is_tensor: bool = True) -> np.ndarray:
"""Returns done"""
if is_tensor:
return torch.zeros(1, ).cuda()
else:
return False
def render(self, mode='human'):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, .2)
rod.set_color(.8, .3, .3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), "../assets/clockwise.png")
self.img = rendering.Image(fname, 1., 1.)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_u:
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer: self.viewer.close()
class SafePendulumEnv(SafeEnv, PendulumEnv):
"""Safe Pendulum environment."""
def _is_near_unsafe_area_batch(self, thetas):
return ((self.params.unsafe_min - self.params.hazard_area_size) <= thetas) & (
thetas <= (self.params.unsafe_max + self.params.hazard_area_size))
def _safety_cost_fn(self, states: np.ndarray, actions: np.ndarray, next_states: np.ndarray) -> np.ndarray:
"""Computes a linear safety cost between the current position
(if its near the unsafe area, aka in the hazard region)
and the centre of the unsafe region"""
unsafe_angle_middle = 0.5 * (self.params.unsafe_max + self.params.unsafe_min) # 25 = (20 + 30) /2
max_distance = self.params.hazard_area_size + (
unsafe_angle_middle - self.params.unsafe_min) * 1.0 # 50 = 45 + (25 - 20)
assert type(states) is np.ndarray and type(next_states) is np.ndarray and type(
actions) is np.ndarray, "Arguments must be np.ndarray"
thetas = np.arctan2(states[..., 1], states[..., 0])
dist_to_center = np.abs(unsafe_angle_middle - thetas) # |25 - theta|
unsafe_mask = np.float64(self._is_near_unsafe_area_batch(thetas)) # 20-45 = -25 <= theta <= 75 = 30+45
costs = ((max_distance - dist_to_center) / (max_distance)) * unsafe_mask
return costs
@saute_env
class SautedPendulumEnv(SafePendulumEnv):
"""Sauted safe pendulum."""
| [] |
2024-01-10 | ATayls/LLM_api | src~app~agents.py | import os
from dotenv import load_dotenv
import openai
# Load environment variables from .env file
load_dotenv("/Users/adam/PycharmProjects/LLM_api/.env")
openai.api_key = os.getenv("OPENAI_API_KEY")
class ChatAgent:
def __init__(self, name, system_prompt="You are a helpful assistant."):
self.name = name
self.system_prompt = f"You are called: {self.name} " + system_prompt
self.messages = [{"role": "system", "content": self.system_prompt}]
openai.api_key = os.getenv("OPENAI_API_KEY")
def send_message(self, content):
self.messages.append({"role": "user", "content": content})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages
)
assistant_message = completion.choices[0].message["content"]
self.messages.append({"role": "assistant", "content": assistant_message})
return assistant_message
def converse_with(self, other_agent, content):
response = other_agent.send_message(content)
print(f"{other_agent.name}:\n {response}")
return response if response else "END"
if __name__ == "__main__":
alice = ChatAgent(
"Alice",
"""
You are a drunken sailor from the 1800s. You are trying to hide that you are drunk.
Stay in character!
If it is clear the conversation has ended respond with 'END'.
"""
)
bob = ChatAgent(
"Bob",
"""
You are an admiral that only speaks in short sentences.
Stay in character!
If it is clear the conversation has ended respond with 'END'.
"""
)
# Example conversation
# Start the conversation
message = "Ow (you brush past)"
print(f"Alice:\n {message}")
for _ in range(20): # Let's limit the conversation to 5 exchanges for this example
message = alice.converse_with(bob, message)
if message.startswith("END") or message.endswith("END.") or "END" in message:
break
message = bob.converse_with(alice, message)
if message.startswith("END") or message.endswith("END.") or "END" in message:
break
| [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | dags~ingestion~FinSum_pgvector.py | """
## Summarize and search financial documents using Cohere's LLMs and the pgvector extensions of postgres.
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and ingests
the data to a PgVector vector database for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document.
"""
from __future__ import annotations
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.providers.cohere.hooks.cohere import CohereHook
from airflow.providers.pgvector.hooks.pgvector import PgVectorHook
from include.tasks import extract, split, summarize
import datetime
import logging
import pandas as pd
import uuid
POSTGRES_CONN_ID = "postgres_default"
COHERE_CONN_ID = "cohere_default"
logger = logging.getLogger("airflow.task")
edgar_headers={"User-Agent": "[email protected]"}
table_names=["tenq", "tenq_summary"]
default_args = {"retries": 3, "retry_delay": 30, "trigger_rule": "none_failed"}
@dag(
schedule_interval=None,
start_date=datetime.datetime(2023, 9, 27),
catchup=False,
is_paused_upon_creation=True,
default_args=default_args,
params={
"ticker": Param(
"",
title="Ticker symbol from a US-listed public company.",
type="string",
description="US-listed companies can be found at https://www.sec.gov/file/company-tickers"
)
}
)
def FinSum_PgVector(ticker: str = None):
"""
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and ingests
the data to a PgVector vector database for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document.
"""
def check_tables() -> [str]:
"""
Check if tables exists.
"""
pgvector_hook = PgVectorHook(POSTGRES_CONN_ID)
exists = []
for table_name in table_names:
if pgvector_hook.get_records(
f"""SELECT * FROM pg_catalog.pg_tables
WHERE schemaname = 'public'
AND tablename = '{table_name}';"""):
exists.append(True)
else:
exists.append(False)
if all(exists):
return ["extract"]
else:
return ["create_tables"]
def create_tables():
pgvector_hook = PgVectorHook(POSTGRES_CONN_ID)
pgvector_hook.create_extension('vector')
for table_name in table_names:
pgvector_hook.create_table(
table_name=table_name,
columns=[
"id UUID PRIMARY KEY",
"docLink TEXT",
"tickerSymbol TEXT",
"cikNumber TEXT",
"fiscalYear TEXT",
"fiscalPeriod TEXT",
"vector VECTOR(768)"
]
)
def pgvector_ingest(df: pd.DataFrame, content_column_name: str, table_name: str):
"""
This task concatenates multiple dataframes from upstream dynamic tasks and vectorizes
with import to a pgvector database.
:param df: A dataframe from an upstream split task
:param content_column_name: The name of the column with text to embed and ingest
:param index_name: The name of the index to import data.
"""
pgvector_hook = PgVectorHook(POSTGRES_CONN_ID)
cohere_hook = CohereHook(COHERE_CONN_ID)
df["id"] = df[content_column_name].apply(
lambda x: str(uuid.uuid5(
name=x, namespace=uuid.NAMESPACE_DNS)
)
)
df["vector"] = df[content_column_name].apply(
lambda x: cohere_hook.create_embeddings(
texts=[x], model="embed-multilingual-v2.0"
)[0]
)
df.drop(content_column_name, axis=1).to_sql(
name=table_name,
con=pgvector_hook.get_sqlalchemy_engine(),
if_exists='replace',
chunksize=1000
)
_check_index = task.branch(check_tables)()
_create_index = task(create_tables)()
edgar_docs = task(extract.extract_10q)(ticker=ticker, edgar_headers=edgar_headers)
split_docs = task(split.split_html)(df=edgar_docs)
task(pgvector_ingest, task_id="ingest_chunks")(
table_name=table_names[0], content_column_name="content", df=split_docs)
generate_summary = task(summarize.summarize_cohere)(df=split_docs, cohere_conn_id=COHERE_CONN_ID)
task(pgvector_ingest, task_id="ingest_summaries")(
table_name=table_names[1], content_column_name="summary", df=generate_summary)
_check_index >> _create_index >> edgar_docs
FinSum_PgVector(ticker="")
| [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | include~streamlit~weaviate~FinSum.py | from airflow.providers.weaviate.hooks.weaviate import WeaviateHook
# from airflow.providers.openai.hooks.openai import OpenAIHook
from datetime import datetime
import json
# import openai as openai_client
from pathlib import Path
from PIL import Image
import requests
import streamlit as st
from time import sleep
from textwrap import dedent
WEAVIATE_CONN_ID = "weaviate_default"
OPENAI_CONN_ID = "openai_default"
edgar_headers={"User-Agent": "[email protected]"}
chunk_class = "TenQ"
summary_class = "TenQSummary"
dag_id="FinSum_Weaviate"
webserver_internal = "http://webserver:8080"
webserver_public = "http://localhost:8080"
webserver_username = "admin"
webserver_password = "admin"
st.set_page_config(layout="wide")
if "weaviate_client" not in st.session_state:
weaviate_client = WeaviateHook(WEAVIATE_CONN_ID).get_client()
st.session_state["weaviate_client"] = weaviate_client
else:
weaviate_client = st.session_state["weaviate_client"]
# if "openai_client" not in st.session_state:
# openai_client.api_key = OpenAIHook(OPENAI_CONN_ID)._get_api_key()
# st.session_state["openai_client"] = openai_client
# else:
# openai_client = st.session_state["openai_client"]
if "company_list" not in st.session_state:
company_list = requests.get(
url="https://www.sec.gov/files/company_tickers.json",
headers=edgar_headers)
if company_list.ok:
company_list = list(company_list.json().values())
else:
raise Exception(company_list.reason)
st.session_state["company_list"] = company_list
else:
company_list = st.session_state["company_list"]
header_image = Image.open(Path(__file__).parent.parent / "logo.png")
avatar_image = Path(__file__).parent.parent.joinpath("logo.png").as_posix()
try:
tickers = (
weaviate_client.query.get(
summary_class, ["tickerSymbol"])
.do()
)["data"]["Get"][summary_class]
tickers = {doc["tickerSymbol"].upper() for doc in tickers}
company_tickers = [company for company in company_list if company["ticker"].upper() in tickers]
except:
company_tickers = None
st.markdown(
"""
<style>
.small-font {
font-size:1px !important;
}
</style>""",
unsafe_allow_html=True,
)
disclaimer = dedent("""
<p><small>Disclaimer & Limitations\n\n
This FinSum Demo is solely for demonstration purposes and should not be
construed as financial advice. Nor are the summaries intended to be an
accurate representation of financial reportings.</small></p>""")
def write_response(text: str):
col1, mid, col2 = st.columns([1, 1, 20])
with col1:
st.image(avatar_image, width=60)
with col2:
st.write(text)
st.markdown(disclaimer, unsafe_allow_html=True)
def format_tickers(ticker_dict:dict):
return str(list(ticker_dict.values()))[1:-1].replace("'","")
with st.container():
title_col, logo_col = st.columns([8, 2])
with title_col:
st.title("Welcome to FinSum!")
st.write(dedent("""
This Streamlit application is a simple application to interact with summaries of
financial statements. Apache Airflow is used to ingest quarterly financial
reporting documents from the US Securities and Exchanges Commision (SEC)
[EDGAR database](https://www.sec.gov/edgar). Extracted documents are vectorized
and summarized using [OpenAI](https://openai.com) LLMs and stored in a [Weaviate](
https://weaviate.io) vector database."""))
with logo_col:
st.image(header_image)
with st.sidebar:
selected_company = fyfp = fy = fp = None
if company_tickers:
selected_company = st.selectbox(
label="Select a company's ticker.",
index=None,
options=company_tickers,
format_func=format_tickers
)
if selected_company:
fyfp = weaviate_client.query.get(
summary_class, ["fiscalYear", "fiscalPeriod"])\
.with_where({
"path": ["tickerSymbol"],
"operator": "Equal",
"valueText": selected_company["ticker"]
})\
.do()["data"]["Get"][summary_class]
if fyfp:
fy = st.selectbox(label="Select fiscal year",
index=None,
options={doc["fiscalYear"] for doc in fyfp}
)
if fy:
fp = st.selectbox(label="Select fiscal period",
index=None,
options={doc["fiscalPeriod"] for doc in fyfp if doc["fiscalYear"] == fy}
)
ingest_tab, finsum_tab, finsum_qna = st.tabs(
["Ingest New Ticker", "FinSum 10-Q Summarization", "FinSum Q&A"]
)
with finsum_tab:
st.header("FinSum 10-Q Summarization")
if not fp:
st.write("⚠️ Select a company, fiscal year and fiscal period in the side bar.")
else:
st.write(f"Summary for {selected_company['title']} in fiscal period FY{fy}{fp}.")
summary = (
weaviate_client.query.get(summary_class, ["summary"])
.with_where({
"operator": "And",
"operands": [
{
"path": ["tickerSymbol"],
"operator": "Equal",
"valueText": selected_company["ticker"]
},
{
"path": ["fiscalYear"],
"operator": "Equal",
"valueInt": fy
},
{
"path": ["fiscalPeriod"],
"operator": "Equal",
"valueText": fp
}
]
})
.do()
)["data"]["Get"][summary_class][0]["summary"]
st.markdown(summary)
with finsum_qna:
st.write(dedent("""
Ask a question regarding financial statements for the chosen company.
FinSum will vectorize the question, retrieve related documents from
the vector database and use that as context for OpenAI to generate
a response."""))
if not selected_company:
st.write("⚠️ Select a company in the side bar.")
question = None
else:
question = st.text_area("Question:", placeholder="")
if question:
ask = {
"question": question,
"properties": ["content", "tickerSymbol", "fiscalYear", "fiscalPeriod"],
# "certainty": 0.0
}
st.write("Showing search results for: " + question)
st.subheader("10-Q results")
results = (
weaviate_client.query.get(
chunk_class,
["docLink",
"tickerSymbol",
"_additional {answer {hasAnswer property result} }"
])
.with_where({
"path": ["tickerSymbol"],
"operator": "Equal",
"valueText": selected_company["ticker"]
})
.with_ask(ask)
.with_limit(3)
.with_additional(["certainty", "id", "distance"])
.do()
)
if results.get("errors"):
for error in results["errors"]:
if ("no api key found" or "remote client vectorize: failed with status: 401 error") in error["message"]:
raise Exception("Cannot vectorize. Check the OpenAI key in the airflow connection.")
else:
st.write(error["message"])
elif len(results["data"]["Get"][chunk_class]) > 0:
docLinks = []
link_count = 1
for result in results["data"]["Get"][chunk_class]:
if result["_additional"]["answer"]["hasAnswer"]:
write_response(result["_additional"]["answer"]["result"])
docLinks.append(f"[{link_count}]({result['docLink']})")
link_count = link_count + 1
st.write(",".join(docLinks))
st.markdown(disclaimer, unsafe_allow_html=True)
with ingest_tab:
st.header("Ingest new financial data")
st.write("""By selecting a company from the list below an Airflow DAG run will be
triggered to extract, embed and summarize financial statements. Search
by company name, ticker symbol or CIK number.""")
company_to_ingest = st.selectbox(
label="Select a company.",
index=None,
options=company_list,
format_func=format_tickers
)
if company_to_ingest:
if st.button(label="Start Ingest"):
response = requests.post(
url=f"{webserver_internal}/api/v1/dags/{dag_id}/dagRuns",
headers={"Content-Type": "application/json"},
auth=requests.auth.HTTPBasicAuth(
webserver_username, webserver_password),
data=json.dumps({
"conf": {
"run_date": str(datetime.now()),
"ticker": company_to_ingest["ticker"]
}
})
)
if response.ok:
run_id = json.loads(response.text)['dag_run_id']
link = f"{webserver_public}/dags/{dag_id}/grid?dag_run_id={run_id}&tab=graph"
status_link = f"{webserver_internal}/api/v1/dags/{dag_id}/dagRuns/{run_id}"
status = requests.get(
url=status_link,
headers={"Content-Type": "application/json"},
auth=requests.auth.HTTPBasicAuth(
webserver_username, webserver_password),
)
if status.ok:
state = json.loads(status.content).get("state")
if state in ["running", "queued"]:
st.markdown(dedent(f"""
Document ingest runnging for ticker {company_to_ingest["ticker"]}. \n
Check status in the [Airflow webserver]({link})"""))
st.write("⚠️ Do not refresh your browser.")
else:
st.error(f"Ingest not running: {state}")
with st.spinner():
while state in ["running", "queued"]:
sleep(5)
status = requests.get(url=status_link,
headers={"Content-Type": "application/json"},
auth=requests.auth.HTTPBasicAuth(
webserver_username, webserver_password),
)
if status.ok:
state = json.loads(status.content).get("state")
else:
st.error(status.reason)
if state == "success":
st.success(dedent(f"""
Ingest complete for ticker {company_to_ingest['ticker']}.
Please refresh your browser."""))
else:
st.error(f"Ingest failed: state {state}")
else:
st.error(f"Ingest failed: state {status.reason}")
else:
st.error(f"Could not start DAG: {response.reason}") | [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | dags~ingestion~FinSum_cohere.py | """
## Summarize and search financial documents using Cohere's LLMs.
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and generates
vector embeddings with cohere embeddings for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document using Cohere Summarize.
"""
from __future__ import annotations
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.providers.cohere.hooks.cohere import CohereHook
from include.tasks import extract, split, summarize
import datetime
import logging
import pandas as pd
from pathlib import Path
import uuid
COHERE_CONN_ID = "cohere_default"
logger = logging.getLogger("airflow.task")
edgar_headers={"User-Agent": "[email protected]"}
default_args = {"retries": 3, "retry_delay": 30, "trigger_rule": "none_failed"}
@dag(
schedule_interval=None,
start_date=datetime.datetime(2023, 9, 27),
catchup=False,
is_paused_upon_creation=True,
default_args=default_args,
params={
"ticker": Param(
"",
title="Ticker symbol from a US-listed public company.",
type="string",
description="US-listed companies can be found at https://www.sec.gov/file/company-tickers"
)
}
)
def FinSum_Cohere(ticker: str = None):
"""
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and generates
vector embeddings with cohere embeddings for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document.
With very large datasets it may not be convenient to store embeddings in a vector database. This DAG
shows how to save documents with vectors on disk. Realistically these would be serialized in cloud object
storage but for the purpose of demo we store them on local disk.
"""
def vectorize(df: pd.DataFrame, content_column_name: str, output_file_name: Path) -> str:
"""
This task concatenates multiple dataframes from upstream dynamic tasks and vectorizes
with Cohere Embeddings. The vectorized dataset is written to disk.
:param df: A dataframe from an upstream split task
:param content_column_name: The name of the column with text to embed and ingest
:param output_file_name: Path for saving embeddings
:return: Location of saved file
"""
cohere_hook = CohereHook(COHERE_CONN_ID)
df["id"] = df[content_column_name].apply(
lambda x: str(uuid.uuid5(
name=x,
namespace=uuid.NAMESPACE_DNS))
)
df["vector"] = df[content_column_name].apply(
lambda x: cohere_hook.create_embeddings(
texts=[x], model="embed-multilingual-v2.0"
)[0]
)
df.to_parquet(output_file_name)
return output_file_name
edgar_docs = task(extract.extract_10q)(ticker=ticker, edgar_headers=edgar_headers)
split_docs = task(split.split_html)(df=edgar_docs)
embeddings_file = task(vectorize)(
output_file_name="include/data/html/cohere_embeddings.parquet",
content_column_name="content",
df=split_docs)
generate_summary = task(summarize.summarize_cohere)(df=split_docs, cohere_conn_id=COHERE_CONN_ID)
summaries_file = (
task(vectorize, task_id="vectorize_summaries")(
output_file_name="include/data/html/cohere_summary_embeddings.parquet",
content_column_name="summary",
df=generate_summary)
)
FinSum_Cohere(ticker="") | [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | include~tasks~summarize.py | from __future__ import annotations
from airflow.providers.openai.hooks.openai import OpenAIHook
from airflow.providers.cohere.hooks.cohere import CohereHook
from cohere.client import Client as CohereClient
import logging
import openai as openai_client
import pandas as pd
logger = logging.getLogger("airflow.task")
def chunk_summarization_openai(
openai_client: openai_client, content: str, ticker: str, fy: str, fp: str) -> str:
"""
This function uses openai gpt-3.5-turbo-1106 to summarize a chunk of text.
:param content: The text content to be summarized.
:param ticker: The company ticker symbol for (status printing).
:param fy: The fiscal year of the document chunk for (status printing).
:param fp: The fiscal period of the document chunk for (status printing).
:return: A summary string
"""
logger.info(f"Summarizing chunk for ticker {ticker} {fy}:{fp}")
response = openai_client.ChatCompletion().create(
model="gpt-3.5-turbo-1106",
messages=[
{
"role": "system",
"content": "You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points."
},
{
"role": "user",
"content": content
}
],
temperature=0,
max_tokens=1000
)
if content:=response.get("choices")[0].get("message").get("content"):
return content
else:
return None
def doc_summarization_openai(
openai_client: openai_client, content: str, doc_link: str) -> str:
"""
This function uses openai gpt-4-1106-preview to summarize a concatenation of
document chunks.
:param content: The text content to be summarized.
:param doc_link: The URL of the document being summarized (status printing).
:return: A summary string
"""
logger.info(f"Summarizing document for {doc_link}")
response = openai_client.ChatCompletion().create(
model="gpt-4-1106-preview",
messages=[
{
"role": "system",
"content": "You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points."
},
{
"role": "user",
"content": content
}
],
temperature=0,
max_tokens=1000
)
if content:=response.get("choices")[0].get("message").get("content"):
return content
else:
return None
def summarize_openai(df: pd.DataFrame, openai_conn_id:str) -> pd.DataFrame:
"""
This task uses openai to recursively summarize extracted documents. First the individual
chunks of the document are summarized. Then the collection of chunk summaries are summarized.
:param df: A Pandas dataframe from upstream split tasks
:param openai_conn_id: The connection name to use for the openai hook.
:return: A Pandas dataframe with summaries for ingest to a vector DB.
"""
openai_client.api_key = OpenAIHook(openai_conn_id)._get_api_key()
df["chunk_summary"] = df.apply(lambda x: chunk_summarization_openai(
openai_client=openai_client,
content=x.content,
fy=x.fiscalYear,
fp=x.fiscalPeriod,
ticker=x.tickerSymbol), axis=1)
summaries_df = df.groupby("docLink").chunk_summary.apply("\n".join).reset_index()
summaries_df["summary"] = summaries_df.apply(lambda x: doc_summarization_openai(
openai_client=openai_client,
content=x.chunk_summary,
doc_link=x.docLink), axis=1)
summaries_df.drop("chunk_summary", axis=1, inplace=True)
summary_df = df.drop(["content", "chunk_summary"], axis=1).drop_duplicates().merge(summaries_df)
return summary_df
def chunk_summarization_cohere(
cohere_client: CohereClient, content: str, ticker: str, fy: str, fp: str) -> str:
"""
This function uses Cohere's "Summarize" endpoint to summarize a chunk of text.
:param content: The text content to be summarized.
:param ticker: The company ticker symbol for (status printing).
:param fy: The fiscal year of the document chunk for (status printing).
:param fp: The fiscal period of the document chunk for (status printing).
:return: A summary string
"""
logger.info(f"Summarizing chunk for ticker {ticker} {fy}:{fp}")
return cohere_client.summarize(
text=content,
model="command",
length="long",
extractiveness="medium",
temperature=1,
format="paragraph"
).summary
def doc_summarization_cohere(
cohere_client: CohereClient, content: str, doc_link: str) -> str:
"""
This function uses Cohere's "Summarize" endpoint to summarize a concatenation
of chunk summaries.
:param content: The text content to be summarized.
:param doc_link: The URL of the document being summarized (status printing).
:return: A summary string
"""
logger.info(f"Summarizing document for {doc_link}")
return cohere_client.summarize(
text=content,
model="command",
length="long",
extractiveness="medium",
temperature=1,
format="paragraph"
).summary
def summarize_cohere(df: pd.DataFrame, cohere_conn_id:str) -> pd.DataFrame:
"""
This task uses cohere to recursively summarize extracted documents. First the individual
chunks of the document are summarized. Then the collection of chunk summaries are summarized.
:param df: A Pandas dataframe from upstream split tasks
:param cohere_conn_id: An Airflow connection ID for Cohere
:return: A Pandas dataframe with summaries for ingest to a vector DB.
"""
cohere_client = CohereHook(cohere_conn_id).get_conn
df["chunk_summary"] = df.apply(lambda x: chunk_summarization_cohere(
cohere_client=cohere_client,
content=x.content,
fy=x.fiscalYear,
fp=x.fiscalPeriod,
ticker=x.tickerSymbol), axis=1)
summaries_df = df.groupby("docLink").chunk_summary.apply("\n".join).reset_index()
summaries_df["summary"] = summaries_df.apply(lambda x: doc_summarization_cohere(
cohere_client=cohere_client,
content=x.chunk_summary,
doc_link=x.docLink), axis=1)
summaries_df.drop("chunk_summary", axis=1, inplace=True)
summary_df = df.drop(["content", "chunk_summary"], axis=1).drop_duplicates().merge(summaries_df)
return summary_df
| [
"You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points."
] |
2024-01-10 | astronomer/airflow-llm-providers-demo | dags~ingestion~FinSum_openai.py | """
## Summarize and search financial documents using OpenAI's LLMs.
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and generates
vector embeddings with OpenAI embeddings model for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document using OpenAI completions.
"""
from __future__ import annotations
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.providers.openai.hooks.openai import OpenAIHook
from include.tasks import extract, split, summarize
import datetime
import logging
import pandas as pd
from pathlib import Path
import uuid
OPENAI_CONN_ID = "openai_default"
logger = logging.getLogger("airflow.task")
edgar_headers={"User-Agent": "[email protected]"}
default_args = {"retries": 3, "retry_delay": 30, "trigger_rule": "none_failed"}
@dag(
schedule_interval=None,
start_date=datetime.datetime(2023, 9, 27),
catchup=False,
is_paused_upon_creation=True,
default_args=default_args,
params={
"ticker": Param(
"",
title="Ticker symbol from a US-listed public company.",
type="string",
description="US-listed companies can be found at https://www.sec.gov/file/company-tickers"
)
}
)
def FinSum_OpenAI(ticker: str = None):
"""
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and generates
vector embeddings with openai embeddings model for generative question answering. The DAG also
creates and vectorizes summarizations of the 10-Q document.
With very large datasets it may not be convenient to store embeddings in a vector database. This DAG
shows how to save documents with vectors on disk. Realistically these would be serialized in cloud object
storage but for the purpose of demo we store them on local disk.
"""
def vectorize(df: pd.DataFrame, content_column_name: str, output_file_name: Path) -> str:
"""
This task concatenates multiple dataframes from upstream dynamic tasks and
vectorizes with OpenAI Embeddings.
:param df: A Pandas dataframes from upstream split tasks
:param content_column_name: The name of the column with text to embed and ingest
:param output_file_name: Path for saving embeddings as a parquet file
:return: Location of saved file
"""
openai_hook = OpenAIHook(OPENAI_CONN_ID)
df["id"] = df[content_column_name].apply(
lambda x: str(uuid.uuid5(
name=x,
namespace=uuid.NAMESPACE_DNS)))
df["vector"] = df[content_column_name].apply(
lambda x: openai_hook.create_embeddings(
text=x,
model="text-embedding-ada-002")
)
df.to_parquet(output_file_name)
return output_file_name
edgar_docs = task(extract.extract_10q)(ticker=ticker, edgar_headers=edgar_headers)
split_docs = task(split.split_html)(df=edgar_docs)
embeddings_file = (
task(vectorize, task_id="vectorize_chunks")(
output_file_name='include/data/html/openai_embeddings.parquet',
content_column_name="content",
df=split_docs)
)
generate_summary = task(summarize.summarize_openai)(df=split_docs, openai_conn_id=OPENAI_CONN_ID)
summaries_file = (
task(vectorize, task_id="vectorize_summaries")(
output_file_name='include/data/html/openai_summary_embeddings.parquet',
content_column_name="summary",
df=generate_summary)
)
FinSum_OpenAI(ticker="")
| [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | include~tasks~split.py | from __future__ import annotations
from langchain.schema import Document
from langchain.text_splitter import (
HTMLHeaderTextSplitter,
RecursiveCharacterTextSplitter,
)
import pandas as pd
def split_html(df: pd.DataFrame) -> pd.DataFrame:
"""
This task concatenates multiple dataframes from upstream dynamic tasks and splits the content
first with an html splitter and then with a text splitter.
:param df: A dataframe from an upstream extract task
:return: A dataframe
"""
headers_to_split_on = [
("h2", "h2"),
]
html_splitter = HTMLHeaderTextSplitter(headers_to_split_on)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=10000, chunk_overlap=200, separators=["\n\n", "\n", " ", ""]
)
df["doc_chunks"] = df["content"].apply(lambda x: html_splitter.split_text(text=x))
df = df.explode("doc_chunks", ignore_index=True)
df["content"] = df["doc_chunks"].apply(lambda x: x.page_content)
df["doc_chunks"] = df["content"].apply(
lambda x: text_splitter.split_documents([Document(page_content=x)])
)
df = df.explode("doc_chunks", ignore_index=True)
df["content"] = df["doc_chunks"].apply(lambda x: x.page_content)
df.drop(["doc_chunks"], inplace=True, axis=1)
df.drop_duplicates(subset=["docLink", "content"], keep="first", inplace=True)
df.reset_index(inplace=True, drop=True)
return df
| [] |
2024-01-10 | astronomer/airflow-llm-providers-demo | dags~ingestion~FinSum_pinecone.py | """
## Summarize and search financial documents using OpenAI's LLMs and Pinecone vector database
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and
ingests the data to a Pinecone vector database for generative question answering. The DAG
also creates and vectorizes summarizations of the 10-Q document using OpenAI completions.
"""
from __future__ import annotations
from airflow.decorators import dag, task
from airflow.models.param import Param
from airflow.providers.pinecone.hooks.pinecone import PineconeHook
from airflow.providers.openai.hooks.openai import OpenAIHook
from include.tasks import extract, split, summarize
import datetime
import logging
import pandas as pd
import uuid
PINECONE_CONN_ID = "pinecone_default"
OPENAI_CONN_ID = "openai_default"
logger = logging.getLogger("airflow.task")
edgar_headers={"User-Agent": "[email protected]"}
index_names = ["tenq", "tenq-summary"]
default_args = {"retries": 3, "retry_delay": 30, "trigger_rule": "none_failed"}
@dag(
schedule_interval=None,
start_date=datetime.datetime(2023, 9, 27),
catchup=False,
is_paused_upon_creation=True,
default_args=default_args,
params={
"ticker": Param(
default="",
title="Ticker symbol from a US-listed public company.",
type="string",
description="US-listed companies can be found at https://www.sec.gov/file/company-tickers"
)
}
)
def FinSum_Pinecone(ticker: str = None):
"""
This DAG extracts and splits financial reporting data from the US
[Securities and Exchanges Commision (SEC) EDGAR database](https://www.sec.gov/edgar) and
ingests the data to a Pinecone vector database for generative question answering. The DAG
also creates and vectorizes summarizations of the 10-Q document.
"""
def check_indexes() -> [str]:
"""
Check if indexes exists.
"""
pinecone_hook = PineconeHook(PINECONE_CONN_ID)
if set(index_names).issubset(set(pinecone_hook.list_indexes())):
return ["extract"]
else:
return ["create_indexes"]
def create_indexes(existing: str = "ignore", pod_type:str = 's1'):
pinecone_hook = PineconeHook(PINECONE_CONN_ID)
for index_name in index_names:
if index_name in pinecone_hook.list_indexes():
if existing == "replace":
pinecone_hook.delete_index(index_name=index_name)
elif existing == "ignore":
continue
else:
pinecone_hook.create_index(
index_name=index_name,
metric="cosine",
replicas=1,
dimension=1536,
shards=1,
pods=1,
pod_type=pod_type,
source_collection='',
)
def pinecone_ingest(df: pd.DataFrame, content_column_name: str, index_name: str):
"""
This task concatenates multiple dataframes from upstream dynamic tasks and vectorizes
with import to pinecone.
:param df: A dataframe from an upstream split task
:param content_column_name: The name of the column with text to embed and ingest
:param index_name: The name of the index to import data.
"""
openai_hook = OpenAIHook(OPENAI_CONN_ID)
pinecone_hook = PineconeHook(PINECONE_CONN_ID)
df["metadata"] = df.drop([content_column_name], axis=1).to_dict('records')
df["id"] = df[content_column_name].apply(lambda x: str(
uuid.uuid5(name=x+index_name, namespace=uuid.NAMESPACE_DNS)))
df["values"] = df[content_column_name].apply(
lambda x: openai_hook.create_embeddings(text=x,
model="text-embedding-ada-002"))
data = list(df[["id", "values", "metadata"]].itertuples(index=False, name=None))
pinecone_hook.upsert_data_async(
data=data,
index_name=index_name,
async_req=True,
pool_threads=30,
)
_check_index = task.branch(check_indexes)()
_create_index = task(create_indexes)()
edgar_docs = task(extract.extract_10q)(ticker=ticker, edgar_headers=edgar_headers)
split_docs = task(split.split_html)(df=edgar_docs)
task(pinecone_ingest, task_id="import_chunks")(
index_name=index_names[0], content_column_name="content", df=split_docs
)
generate_summary = task(summarize.summarize_openai)(df=split_docs, openai_conn_id=OPENAI_CONN_ID)
task(pinecone_ingest, task_id="import_summary")(
index_name=index_names[1], content_column_name="summary", df=generate_summary
)
_check_index >> _create_index >> edgar_docs
FinSum_Pinecone(ticker="")
| [] |
2024-01-10 | lcn-kul/sentencefmricomparison | src~sentencefmricomparison~models~gpt3_embeddings.py | """Generate frozen/fixed embeddings for GPT-3 with OpenAI's API."""
import os
import pickle as pkl # noqa
import time
from typing import List, Optional
import click
import openai
import pandas as pd
import torch
from tqdm import tqdm
from sentencefmricomparison.constants import (
OPENAI_API_KEY,
OPENAI_DEFAULT_MODEL,
PEREIRA_OUTPUT_DIR,
PEREIRA_PERMUTED_SENTENCES_PATH,
)
@click.command()
@click.option(
"--column_names",
multiple=True,
default=["center_sents", "paragraphs", "permuted_sents"],
)
@click.option("--pereira_input_file", default=PEREIRA_PERMUTED_SENTENCES_PATH, type=str)
@click.option("--openai_api_key", default=OPENAI_API_KEY, type=str)
@click.option("--openai_model", default=OPENAI_DEFAULT_MODEL, type=str)
@click.option("--output_dir", default=PEREIRA_OUTPUT_DIR, type=str)
def generate_gpt3_embeddings(
column_names: List[str] = ["center_sents", "paragraphs", "permuted_sents"], # noqa
pereira_input_file: str = PEREIRA_PERMUTED_SENTENCES_PATH,
openai_api_key: Optional[str] = OPENAI_API_KEY,
openai_model: str = OPENAI_DEFAULT_MODEL,
output_dir: str = PEREIRA_OUTPUT_DIR,
):
"""Generate frozen/fixed embeddings for the Pereira paragraphs/middle sentences/permuted paragraphs.
:param column_names: Names of the columns in the input file that contain the paragraphs/middle sentences/permuted
sentences, defaults to ["center_sents", "paragraphs", "permuted_sents"]
:type column_names: List[str]
:param pereira_input_file: Path to the Pereira input file containing all three types of paragraphs
:type pereira_input_file: str
:param openai_api_key: OpenAI API key specific to your account
:type openai_api_key: str
:param openai_model: OpenAI model to use for generating the embeddings
:type openai_model: str
:param output_dir: Output directory to save the embeddings to
:type output_dir: str
"""
# Load the input file
pereira_paragraphs = pd.read_csv(pereira_input_file)
# Set the OpenAI API key
openai.api_key = openai_api_key
# Generate the embeddings for all three types of paragraphs
# The resulting embeds dictionary has the following structure: {sentence: embedding vector}
embeds = {}
for col in column_names:
for _, row in tqdm(
pereira_paragraphs.iterrows(), desc=f"Generating embeddings for {col}"
):
# Sleep for 1 second to avoid hitting the OpenAI API rate limit
time.sleep(1)
# Generate the embedding for the current input by calling the OpenAI API
embeds[row[col]] = torch.tensor(
openai.Embedding.create(input=[row[col]], model=openai_model)["data"][
0
]["embedding"]
)
# Save the embeddings to the output directory
with open(os.path.join(output_dir, "gpt3_embeds.pkl"), "wb") as f:
pkl.dump(embeds, f)
@click.group()
def cli() -> None:
"""Generate embeddings for GPT-3 with OpenAI's API."""
if __name__ == "__main__":
cli.add_command(generate_gpt3_embeddings)
cli()
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.