date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | 120318/nas-tools | app~media~media.py | import difflib
import os
import random
import re
import traceback
from functools import lru_cache
import zhconv
from lxml import etree
import log
from app.helper import MetaHelper
from app.helper.openai_helper import OpenAiHelper
from app.media.meta.metainfo import MetaInfo
from app.media.tmdbv3api import TMDb, Search, Movie, TV, Person, Find, TMDbException, Discover, Trending, Episode, Genre
from app.utils import PathUtils, EpisodeFormat, RequestUtils, NumberUtils, StringUtils, cacheman
from app.utils.types import MediaType, MatchMode
from config import Config, KEYWORD_BLACKLIST, KEYWORD_SEARCH_WEIGHT_3, KEYWORD_SEARCH_WEIGHT_2, KEYWORD_SEARCH_WEIGHT_1, \
KEYWORD_STR_SIMILARITY_THRESHOLD, KEYWORD_DIFF_SCORE_THRESHOLD
class Media:
# TheMovieDB
tmdb = None
search = None
movie = None
tv = None
episode = None
person = None
find = None
trending = None
discover = None
genre = None
meta = None
openai = None
_rmt_match_mode = None
_search_keyword = None
_search_tmdbweb = None
_chatgpt_enable = None
_default_language = None
def __init__(self):
self.init_config()
def init_config(self):
app = Config().get_config('app')
media = Config().get_config('media')
laboratory = Config().get_config('laboratory')
# 辅助查询
self._search_keyword = laboratory.get("search_keyword")
# WEB辅助
self._search_tmdbweb = laboratory.get("search_tmdbweb")
# ChatGPT辅助
self._chatgpt_enable = laboratory.get("chatgpt_enable")
# 默认语言
self._default_language = media.get("tmdb_language", "zh") or "zh"
# TMDB
if app.get('rmt_tmdbkey'):
# TMDB主体
self.tmdb = TMDb()
# 域名
self.tmdb.domain = Config().get_tmdbapi_url()
# 开启缓存
self.tmdb.cache = True
# APIKEY
self.tmdb.api_key = app.get('rmt_tmdbkey')
# 语种
self.tmdb.language = self._default_language
# 代理
self.tmdb.proxies = Config().get_proxies()
# 调试模式
self.tmdb.debug = False
# 查询对象
self.search = Search()
self.movie = Movie()
self.tv = TV()
self.episode = Episode()
self.find = Find()
self.person = Person()
self.trending = Trending()
self.discover = Discover()
self.genre = Genre()
# 元数据缓存
self.meta = MetaHelper()
# ChatGPT
self.openai = OpenAiHelper()
# 匹配模式
rmt_match_mode = app.get('rmt_match_mode', 'normal')
if rmt_match_mode:
rmt_match_mode = rmt_match_mode.upper()
else:
rmt_match_mode = "NORMAL"
if rmt_match_mode == "STRICT":
self._rmt_match_mode = MatchMode.STRICT
else:
self._rmt_match_mode = MatchMode.NORMAL
def __set_language(self, language):
"""
设置语言
:param language: zh/en
"""
if not self.tmdb:
return
if language:
self.tmdb.language = language
else:
self.tmdb.language = self._default_language
@staticmethod
def __compare_tmdb_names(file_name, tmdb_names):
"""
比较文件名是否匹配,忽略大小写和特殊字符
:param file_name: 识别的文件名或者种子名
:param tmdb_names: TMDB返回的译名
:return: True or False
"""
if not file_name or not tmdb_names:
return False
if not isinstance(tmdb_names, list):
tmdb_names = [tmdb_names]
file_name = StringUtils.handler_special_chars(file_name).upper()
for tmdb_name in tmdb_names:
tmdb_name = StringUtils.handler_special_chars(tmdb_name).strip().upper()
if file_name == tmdb_name:
return True
return False
def __search_tmdb_allnames(self, mtype: MediaType, tmdb_id):
"""
搜索tmdb中所有的标题和译名,用于名称匹配
:param mtype: 类型:电影、电视剧、动漫
:param tmdb_id: TMDB的ID
:return: 所有译名的清单
"""
if not mtype or not tmdb_id:
return {}, []
ret_names = []
tmdb_info = self.get_tmdb_info(mtype=mtype, tmdbid=tmdb_id)
if not tmdb_info:
return tmdb_info, []
if mtype == MediaType.MOVIE:
alternative_titles = tmdb_info.get("alternative_titles", {}).get("titles", [])
for alternative_title in alternative_titles:
title = alternative_title.get("title")
if title and title not in ret_names:
ret_names.append(title)
translations = tmdb_info.get("translations", {}).get("translations", [])
for translation in translations:
title = translation.get("data", {}).get("title")
if title and title not in ret_names:
ret_names.append(title)
else:
alternative_titles = tmdb_info.get("alternative_titles", {}).get("results", [])
for alternative_title in alternative_titles:
name = alternative_title.get("title")
if name and name not in ret_names:
ret_names.append(name)
translations = tmdb_info.get("translations", {}).get("translations", [])
for translation in translations:
name = translation.get("data", {}).get("name")
if name and name not in ret_names:
ret_names.append(name)
return tmdb_info, ret_names
def __search_tmdb(self, file_media_name,
search_type,
first_media_year=None,
media_year=None,
season_number=None):
"""
检索tmdb中的媒体信息,匹配返回一条尽可能正确的信息
:param file_media_name: 检索的名称
:param search_type: 类型:电影、电视剧、动漫
:param first_media_year: 年份,如要是季集需要是首播年份(first_air_date)
:param media_year: 当前季集年份
:param season_number: 季集,整数
:return: TMDB的INFO,同时会将search_type赋值到media_type中
"""
if not self.search:
return None
if not file_media_name:
return None
# TMDB搜索
info = {}
if search_type == MediaType.MOVIE:
year_range = [first_media_year]
if first_media_year:
year_range.append(str(int(first_media_year) + 1))
year_range.append(str(int(first_media_year) - 1))
for year in year_range:
log.debug(
f"【Meta】正在识别{search_type.value}:{file_media_name}, 年份={year} ...")
info = self.__search_movie_by_name(file_media_name, year)
if info:
info['media_type'] = MediaType.MOVIE
log.info("【Meta】%s 识别到 电影:TMDBID=%s, 名称=%s, 上映日期=%s" % (
file_media_name,
info.get('id'),
info.get('title'),
info.get('release_date')))
break
else:
# 有当前季和当前季集年份,使用精确匹配
if media_year and season_number:
log.debug(
f"【Meta】正在识别{search_type.value}:{file_media_name}, 季集={season_number}, 季集年份={media_year} ...")
info = self.__search_tv_by_season(file_media_name,
media_year,
season_number)
if not info:
log.debug(
f"【Meta】正在识别{search_type.value}:{file_media_name}, 年份={StringUtils.xstr(first_media_year)} ...")
info = self.__search_tv_by_name(file_media_name,
first_media_year)
if info:
info['media_type'] = MediaType.TV
log.info("【Meta】%s 识别到 电视剧:TMDBID=%s, 名称=%s, 首播日期=%s" % (
file_media_name,
info.get('id'),
info.get('name'),
info.get('first_air_date')))
# 返回
if not info:
log.info("【Meta】%s 以年份 %s 在TMDB中未找到%s信息!" % (
file_media_name, StringUtils.xstr(first_media_year), search_type.value if search_type else ""))
return info
def __search_movie_by_name(self, file_media_name, first_media_year):
"""
根据名称查询电影TMDB匹配
:param file_media_name: 识别的文件名或种子名
:param first_media_year: 电影上映日期
:return: 匹配的媒体信息
"""
try:
if first_media_year:
movies = self.search.movies({"query": file_media_name, "year": first_media_year})
else:
movies = self.search.movies({"query": file_media_name})
except TMDbException as err:
log.error(f"【Meta】连接TMDB出错:{str(err)}")
return None
except Exception as e:
log.error(f"【Meta】连接TMDB出错:{str(e)}")
return None
log.debug(f"【Meta】API返回:{str(self.search.total_results)}")
if len(movies) == 0:
log.debug(f"【Meta】{file_media_name} 未找到相关电影信息!")
return {}
else:
info = {}
if first_media_year:
for movie in movies:
if movie.get('release_date'):
if self.__compare_tmdb_names(file_media_name, movie.get('title')) \
and movie.get('release_date')[0:4] == str(first_media_year):
return movie
if self.__compare_tmdb_names(file_media_name, movie.get('original_title')) \
and movie.get('release_date')[0:4] == str(first_media_year):
return movie
else:
for movie in movies:
if self.__compare_tmdb_names(file_media_name, movie.get('title')) \
or self.__compare_tmdb_names(file_media_name, movie.get('original_title')):
return movie
if not info:
index = 0
for movie in movies:
if first_media_year:
if not movie.get('release_date'):
continue
if movie.get('release_date')[0:4] != str(first_media_year):
continue
index += 1
info, names = self.__search_tmdb_allnames(MediaType.MOVIE, movie.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
else:
index += 1
info, names = self.__search_tmdb_allnames(MediaType.MOVIE, movie.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
if index > 5:
break
return {}
def __search_tv_by_name(self, file_media_name, first_media_year):
"""
根据名称查询电视剧TMDB匹配
:param file_media_name: 识别的文件名或者种子名
:param first_media_year: 电视剧的首播年份
:return: 匹配的媒体信息
"""
try:
if first_media_year:
tvs = self.search.tv_shows({"query": file_media_name, "first_air_date_year": first_media_year})
else:
tvs = self.search.tv_shows({"query": file_media_name})
except TMDbException as err:
log.error(f"【Meta】连接TMDB出错:{str(err)}")
return None
except Exception as e:
log.error(f"【Meta】连接TMDB出错:{str(e)}")
return None
log.debug(f"【Meta】API返回:{str(self.search.total_results)}")
if len(tvs) == 0:
log.debug(f"【Meta】{file_media_name} 未找到相关剧集信息!")
return {}
else:
info = {}
if first_media_year:
for tv in tvs:
if tv.get('first_air_date'):
if self.__compare_tmdb_names(file_media_name, tv.get('name')) \
and tv.get('first_air_date')[0:4] == str(first_media_year):
return tv
if self.__compare_tmdb_names(file_media_name, tv.get('original_name')) \
and tv.get('first_air_date')[0:4] == str(first_media_year):
return tv
else:
for tv in tvs:
if self.__compare_tmdb_names(file_media_name, tv.get('name')) \
or self.__compare_tmdb_names(file_media_name, tv.get('original_name')):
return tv
if not info:
index = 0
for tv in tvs:
if first_media_year:
if not tv.get('first_air_date'):
continue
if tv.get('first_air_date')[0:4] != str(first_media_year):
continue
index += 1
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
else:
index += 1
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
if index > 5:
break
return {}
def __search_tv_by_season(self, file_media_name, media_year, season_number):
"""
根据电视剧的名称和季的年份及序号匹配TMDB
:param file_media_name: 识别的文件名或者种子名
:param media_year: 季的年份
:param season_number: 季序号
:return: 匹配的媒体信息
"""
def __season_match(tv_info, season_year):
if not tv_info:
return False
try:
seasons = self.get_tmdb_tv_seasons(tv_info=tv_info)
for season in seasons:
if season.get("air_date") and season.get("season_number"):
if season.get("air_date")[0:4] == str(season_year) \
and season.get("season_number") == int(season_number):
return True
except Exception as e1:
log.error(f"【Meta】连接TMDB出错:{e1}")
return False
return False
try:
tvs = self.search.tv_shows({"query": file_media_name})
except TMDbException as err:
log.error(f"【Meta】连接TMDB出错:{str(err)}")
return None
except Exception as e:
log.error(f"【Meta】连接TMDB出错:{e}")
return None
if len(tvs) == 0:
log.debug("【Meta】%s 未找到季%s相关信息!" % (file_media_name, season_number))
return {}
else:
for tv in tvs:
if (self.__compare_tmdb_names(file_media_name, tv.get('name'))
or self.__compare_tmdb_names(file_media_name, tv.get('original_name'))) \
and (tv.get('first_air_date') and tv.get('first_air_date')[0:4] == str(media_year)):
return tv
for tv in tvs[:5]:
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if not self.__compare_tmdb_names(file_media_name, names):
continue
if __season_match(tv_info=info, season_year=media_year):
return info
return {}
def __search_multi_tmdb(self, file_media_name):
"""
根据名称同时查询电影和电视剧,不带年份
:param file_media_name: 识别的文件名或种子名
:return: 匹配的媒体信息
"""
try:
multis = self.search.multi({"query": file_media_name}) or []
except TMDbException as err:
log.error(f"【Meta】连接TMDB出错:{str(err)}")
return None
except Exception as e:
log.error(f"【Meta】连接TMDB出错:{str(e)}")
return None
log.debug(f"【Meta】API返回:{str(self.search.total_results)}")
if len(multis) == 0:
log.debug(f"【Meta】{file_media_name} 未找到相关媒体息!")
return {}
else:
info = {}
for multi in multis:
if multi.get("media_type") == "movie":
if self.__compare_tmdb_names(file_media_name, multi.get('title')) \
or self.__compare_tmdb_names(file_media_name, multi.get('original_title')):
info = multi
elif multi.get("media_type") == "tv":
if self.__compare_tmdb_names(file_media_name, multi.get('name')) \
or self.__compare_tmdb_names(file_media_name, multi.get('original_name')):
info = multi
if not info:
for multi in multis[:5]:
if multi.get("media_type") == "movie":
movie_info, names = self.__search_tmdb_allnames(MediaType.MOVIE, multi.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
info = movie_info
elif multi.get("media_type") == "tv":
tv_info, names = self.__search_tmdb_allnames(MediaType.TV, multi.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
info = tv_info
# 返回
if info:
info['media_type'] = MediaType.MOVIE if info.get('media_type') in ['movie',
MediaType.MOVIE] else MediaType.TV
else:
log.info("【Meta】%s 在TMDB中未找到媒体信息!" % file_media_name)
return info
@lru_cache(maxsize=512)
def __search_chatgpt(self, file_name, mtype: MediaType):
"""
通过ChatGPT对话识别文件名和集数等信息,重新查询TMDB数据
:param file_name: 名称
:param mtype: 媒体类型
:return: 类型、季、集、TMDBINFO
"""
def __failed():
return mtype, None, None, {}
def __failed_none():
return mtype, None, None, None
if not file_name:
return __failed_none()
log.info("【Meta】正在通过ChatGPT识别文件名:%s" % file_name)
file_info = self.openai.get_media_name(file_name)
if file_info is None:
log.info("【Meta】ChatGPT识别出错,请检查是否设置OpenAI ApiKey!")
return __failed_none()
if not file_info:
log.info("【Meta】ChatGPT识别失败!")
return __failed()
else:
log.info("【Meta】ChatGPT识别结果:%s" % file_info)
if file_info.get("season") or file_info.get("episode"):
mtype = MediaType.TV
# 处理标题和年份
file_title, file_year, season_number = None, None, None
if file_info.get("title"):
file_title = str(file_info.get("title")).split("/")[0].strip().replace(".", " ")
if file_info.get("year"):
file_year = str(file_info.get("year")).split("/")[0].strip()
if not file_title:
return __failed()
if not str(file_year).isdigit():
file_year = None
if mtype != MediaType.MOVIE or file_info.get("year"):
tmdb_info = self.__search_tmdb(file_media_name=file_title,
search_type=mtype,
first_media_year=file_year)
else:
tmdb_info = self.__search_multi_tmdb(file_media_name=file_title)
return mtype, file_info.get("season"), file_info.get("episode"), tmdb_info
@lru_cache(maxsize=512)
def __search_tmdb_web(self, file_media_name, mtype: MediaType):
"""
搜索TMDB网站,直接抓取结果,结果只有一条时才返回
:param file_media_name: 名称
"""
if not file_media_name:
return None
if StringUtils.is_chinese(file_media_name):
return {}
log.info("【Meta】正在从TheDbMovie网站查询:%s ..." % file_media_name)
tmdb_url = "https://www.themoviedb.org/search?query=%s" % file_media_name
res = RequestUtils(timeout=5).get_res(url=tmdb_url)
if res and res.status_code == 200:
html_text = res.text
if not html_text:
return None
try:
tmdb_links = []
html = etree.HTML(html_text)
if mtype == MediaType.TV:
links = html.xpath("//a[@data-id and @data-media-type='tv']/@href")
else:
links = html.xpath("//a[@data-id]/@href")
for link in links:
if not link or (not link.startswith("/tv") and not link.startswith("/movie")):
continue
if link not in tmdb_links:
tmdb_links.append(link)
if len(tmdb_links) == 1:
tmdbinfo = self.get_tmdb_info(
mtype=MediaType.TV if tmdb_links[0].startswith("/tv") else MediaType.MOVIE,
tmdbid=tmdb_links[0].split("/")[-1])
if tmdbinfo:
if mtype == MediaType.TV and tmdbinfo.get('media_type') != MediaType.TV:
return {}
if tmdbinfo.get('media_type') == MediaType.MOVIE:
log.info("【Meta】%s 从WEB识别到 电影:TMDBID=%s, 名称=%s, 上映日期=%s" % (
file_media_name,
tmdbinfo.get('id'),
tmdbinfo.get('title'),
tmdbinfo.get('release_date')))
else:
log.info("【Meta】%s 从WEB识别到 电视剧:TMDBID=%s, 名称=%s, 首播日期=%s" % (
file_media_name,
tmdbinfo.get('id'),
tmdbinfo.get('name'),
tmdbinfo.get('first_air_date')))
return tmdbinfo
elif len(tmdb_links) > 1:
log.info("【Meta】%s TMDB网站返回数据过多:%s" % (file_media_name, len(tmdb_links)))
else:
log.info("【Meta】%s TMDB网站未查询到媒体信息!" % file_media_name)
except Exception as err:
print(str(err))
return None
return None
def search_tmdb_person(self, name):
"""
搜索TMDB演员信息
"""
if not self.search:
return []
try:
return self.__dict_tmdbpersons(self.search.people({"query": name}))
except Exception as err:
print(str(err))
return []
def get_tmdb_info(self, mtype: MediaType,
tmdbid,
language=None,
append_to_response=None,
chinese=True):
"""
给定TMDB号,查询一条媒体信息
:param mtype: 类型:电影、电视剧、动漫,为空时都查(此时用不上年份)
:param tmdbid: TMDB的ID,有tmdbid时优先使用tmdbid,否则使用年份和标题
:param language: 语种
:param append_to_response: 附加信息
:param chinese: 是否转换中文标题
"""
if not self.tmdb:
log.error("【Meta】TMDB API Key 未设置!")
return None
# 设置语言
self.__set_language(language)
if mtype == MediaType.MOVIE:
tmdb_info = self.__get_tmdb_movie_detail(tmdbid, append_to_response)
if tmdb_info:
tmdb_info['media_type'] = MediaType.MOVIE
else:
tmdb_info = self.__get_tmdb_tv_detail(tmdbid, append_to_response)
if tmdb_info:
tmdb_info['media_type'] = MediaType.TV
if tmdb_info:
# 转换genreid
tmdb_info['genre_ids'] = self.__get_genre_ids_from_detail(tmdb_info.get('genres'))
# 转换中文标题
if chinese:
tmdb_info = self.__update_tmdbinfo_cn_title(tmdb_info)
return tmdb_info
def __update_tmdbinfo_cn_title(self, tmdb_info):
"""
更新TMDB信息中的中文名称
"""
# 查找中文名
org_title = tmdb_info.get("title") \
if tmdb_info.get("media_type") == MediaType.MOVIE \
else tmdb_info.get("name")
if not StringUtils.is_chinese(org_title) \
and self._default_language == 'zh':
cn_title = self.__get_tmdb_chinese_title(tmdbinfo=tmdb_info)
if cn_title and cn_title != org_title:
if tmdb_info.get("media_type") == MediaType.MOVIE:
tmdb_info['title'] = cn_title
else:
tmdb_info['name'] = cn_title
return tmdb_info
def get_tmdb_infos(self, title, year=None, mtype: MediaType = None, language=None, page=1):
"""
查询名称中有关键字的所有的TMDB信息并返回
"""
if not self.tmdb:
log.error("【Meta】TMDB API Key 未设置!")
return []
if not title:
return []
# 设置语言
self.__set_language(language)
if not mtype and not year:
results = self.__search_multi_tmdbinfos(title)
else:
if not mtype:
results = list(
set(self.__search_movie_tmdbinfos(title, year)).union(set(self.__search_tv_tmdbinfos(title, year))))
# 组合结果的情况下要排序
results = sorted(results,
key=lambda x: x.get("release_date") or x.get("first_air_date") or "0000-00-00",
reverse=True)
elif mtype == MediaType.MOVIE:
results = self.__search_movie_tmdbinfos(title, year)
else:
results = self.__search_tv_tmdbinfos(title, year)
return results[(page - 1) * 20:page * 20]
def __search_multi_tmdbinfos(self, title):
"""
同时查询模糊匹配的电影、电视剧TMDB信息
"""
if not title:
return []
ret_infos = []
multis = self.search.multi({"query": title}) or []
for multi in multis:
if multi.get("media_type") in ["movie", "tv"]:
multi['media_type'] = MediaType.MOVIE if multi.get("media_type") == "movie" else MediaType.TV
ret_infos.append(multi)
return ret_infos
def __search_movie_tmdbinfos(self, title, year):
"""
查询模糊匹配的所有电影TMDB信息
"""
if not title:
return []
ret_infos = []
if year:
movies = self.search.movies({"query": title, "year": year}) or []
else:
movies = self.search.movies({"query": title}) or []
for movie in movies:
if title in movie.get("title"):
movie['media_type'] = MediaType.MOVIE
ret_infos.append(movie)
return ret_infos
def __search_tv_tmdbinfos(self, title, year):
"""
查询模糊匹配的所有电视剧TMDB信息
"""
if not title:
return []
ret_infos = []
if year:
tvs = self.search.tv_shows({"query": title, "first_air_date_year": year}) or []
else:
tvs = self.search.tv_shows({"query": title}) or []
for tv in tvs:
if title in tv.get("name"):
tv['media_type'] = MediaType.TV
ret_infos.append(tv)
return ret_infos
@staticmethod
def __make_cache_key(meta_info):
"""
生成缓存的key
"""
if not meta_info:
return None
return f"[{meta_info.type.value}]{meta_info.get_name()}-{meta_info.year}-{meta_info.begin_season}"
def get_cache_info(self, meta_info):
"""
根据名称查询是否已经有缓存
"""
if not meta_info:
return {}
return self.meta.get_meta_data_by_key(self.__make_cache_key(meta_info))
def __fill_media_info(self, meta_info, cache=True, strict=None, chinese=True, append_to_response=None):
"""
中文和英文共同检索
:param meta_info: MetaInfo对象
:param cache: 是否使用缓存,默认TRUE
:param strict: 是否严格模式,为true时,不会再去掉年份再查一次
:param chinese: 原标题为英文时是否从别名中检索中文名称
:param append_to_response: 额外查询的信息
:return: 带有TMDB信息的MetaInfo对象
"""
file_media_info = None
first_search_name = meta_info.cn_name
second_search_name = meta_info.en_name
if Config().get_config("laboratory").get("search_en_title"):
first_search_name = meta_info.en_name
second_search_name = meta_info.cn_name
if first_search_name:
file_media_info = self.__fill_media_info_by_name(first_search_name, meta_info, cache, strict, chinese, append_to_response)
if not file_media_info and second_search_name:
file_media_info = self.__fill_media_info_by_name(second_search_name, meta_info, cache, strict, chinese, append_to_response)
return file_media_info
def __fill_media_info_by_name(self, title, meta_info, cache=True, strict=None, chinese=True, append_to_response=None):
media_key = self.__make_cache_key(meta_info)
if not cache or not self.meta.get_meta_data_by_key(media_key):
# 缓存没有或者强制不使用缓存
if meta_info.type != MediaType.TV and not meta_info.year:
file_media_info = self.__search_multi_tmdb(file_media_name=meta_info.get_name())
else:
if meta_info.type == MediaType.TV:
# 确定是电视
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=meta_info.type,
media_year=meta_info.year,
season_number=meta_info.begin_season
)
if not file_media_info and meta_info.year and self._rmt_match_mode == MatchMode.NORMAL and not strict:
# 非严格模式下去掉年份再查一次
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
search_type=meta_info.type
)
else:
# 有年份先按电影查
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=MediaType.MOVIE
)
# 没有再按电视剧查
if not file_media_info:
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=MediaType.TV
)
if not file_media_info and self._rmt_match_mode == MatchMode.NORMAL and not strict:
# 非严格模式下去掉年份和类型再查一次
file_media_info = self.__search_multi_tmdb(file_media_name=meta_info.get_name())
if not file_media_info and self._search_tmdbweb:
# 从网站查询
file_media_info = self.__search_tmdb_web(file_media_name=meta_info.get_name(),
mtype=meta_info.type)
if not file_media_info and self._chatgpt_enable:
# 通过ChatGPT查询
mtype, seaons, episodes, file_media_info = self.__search_chatgpt(file_name=title,
mtype=meta_info.type)
# 修正类型和集数
meta_info.type = mtype
if not meta_info.get_season_string():
meta_info.set_season(seaons)
if not meta_info.get_episode_string():
meta_info.set_episode(episodes)
if not file_media_info and self._search_keyword:
# 关键字猜测
cache_name = cacheman["tmdb_supply"].get(meta_info.get_name())
is_movie = False
if not cache_name:
cache_name, is_movie = self.__search_engine(meta_info.get_name())
cacheman["tmdb_supply"].set(meta_info.get_name(), cache_name)
if cache_name:
log.info("【Meta】开始辅助查询:%s ..." % cache_name)
if is_movie:
file_media_info = self.__search_tmdb(file_media_name=cache_name, search_type=MediaType.MOVIE)
else:
file_media_info = self.__search_multi_tmdb(file_media_name=cache_name)
# 补充全量信息
if file_media_info and not file_media_info.get("genres"):
file_media_info = self.get_tmdb_info(mtype=file_media_info.get("media_type"),
tmdbid=file_media_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
# 保存到缓存
if file_media_info is not None:
self.__insert_media_cache(media_key=media_key,
file_media_info=file_media_info)
else:
# 使用缓存信息
cache_info = self.meta.get_meta_data_by_key(media_key)
if cache_info.get("id"):
file_media_info = self.get_tmdb_info(mtype=cache_info.get("type"),
tmdbid=cache_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
else:
file_media_info = None
return file_media_info
def get_media_info(self, title,
subtitle=None,
mtype=None,
strict=None,
cache=True,
chinese=True,
append_to_response=None):
"""
只有名称信息,判别是电影还是电视剧并搜刮TMDB信息,用于种子名称识别
:param title: 种子名称
:param subtitle: 种子副标题
:param mtype: 类型:电影、电视剧、动漫
:param strict: 是否严格模式,为true时,不会再去掉年份再查一次
:param cache: 是否使用缓存,默认TRUE
:param chinese: 原标题为英文时是否从别名中检索中文名称
:param append_to_response: 额外查询的信息
:return: 带有TMDB信息的MetaInfo对象
"""
if not self.tmdb:
log.error("【Meta】TMDB API Key 未设置!")
return None
if not title:
return None
# 识别
meta_info = MetaInfo(title, subtitle=subtitle)
if not meta_info.get_name() or not meta_info.type:
log.warn("【Rmt】%s 未识别出有效信息!" % meta_info.org_string)
return None
if mtype:
meta_info.type = mtype
file_media_info = self.__fill_media_info(meta_info, cache, strict, chinese, append_to_response)
# 赋值TMDB信息并返回
meta_info.set_tmdb_info(file_media_info)
return meta_info
def __insert_media_cache(self, media_key, file_media_info):
"""
将TMDB信息插入缓存
"""
if file_media_info:
# 缓存标题
cache_title = file_media_info.get(
"title") if file_media_info.get(
"media_type") == MediaType.MOVIE else file_media_info.get("name")
# 缓存年份
cache_year = file_media_info.get('release_date') if file_media_info.get(
"media_type") == MediaType.MOVIE else file_media_info.get('first_air_date')
if cache_year:
cache_year = cache_year[:4]
self.meta.update_meta_data({
media_key: {
"id": file_media_info.get("id"),
"type": file_media_info.get("media_type"),
"year": cache_year,
"title": cache_title,
"poster_path": file_media_info.get("poster_path"),
"backdrop_path": file_media_info.get("backdrop_path")
}
})
else:
self.meta.update_meta_data({media_key: {'id': 0}})
def get_media_info_on_files(self,
file_list,
tmdb_info=None,
media_type=None,
season=None,
episode_format: EpisodeFormat = None,
language=None,
chinese=True,
append_to_response=None):
"""
根据文件清单,搜刮TMDB信息,用于文件名称的识别
:param file_list: 文件清单,如果是列表也可以是单个文件,也可以是一个目录
:param tmdb_info: 如有传入TMDB信息则以该TMDB信息赋于所有文件,否则按名称从TMDB搜索,用于手工识别时传入
:param media_type: 媒体类型:电影、电视剧、动漫,如有传入以该类型赋于所有文件,否则按名称从TMDB搜索并识别
:param season: 季号,如有传入以该季号赋于所有文件,否则从名称中识别
:param episode_format: EpisodeFormat
:param language: 语言
:param chinese: 原标题为英文时是否从别名中搜索中文名称
:param append_to_response: 附加信息
:return: 带有TMDB信息的每个文件对应的MetaInfo对象字典
"""
# 存储文件路径与媒体的对应关系
if not self.tmdb:
log.error("【Meta】TMDB API Key 未设置!")
return {}
# 设置语言
self.__set_language(language)
# 返回结果
return_media_infos = {}
# 不是list的转为list
if not isinstance(file_list, list):
file_list = [file_list]
# 遍历每个文件,看得出来的名称是不是不一样,不一样的先搜索媒体信息
for file_path in file_list:
try:
if not os.path.exists(file_path):
log.warn("【Meta】%s 不存在" % file_path)
continue
# 解析媒体名称
# 先用自己的名称
file_name = os.path.basename(file_path)
parent_name = os.path.basename(os.path.dirname(file_path))
parent_parent_name = os.path.basename(PathUtils.get_parent_paths(file_path, 2))
# 过滤掉蓝光原盘目录下的子文件
if not os.path.isdir(file_path) \
and PathUtils.get_bluray_dir(file_path):
log.info("【Meta】%s 跳过蓝光原盘文件:" % file_path)
continue
# 没有自带TMDB信息
if not tmdb_info:
# 识别名称
meta_info = MetaInfo(title=file_name)
# 识别不到则使用上级的名称
if not meta_info.get_name() or not meta_info.year:
parent_info = MetaInfo(parent_name)
if not parent_info.get_name() or not parent_info.year:
parent_parent_info = MetaInfo(parent_parent_name)
parent_info.type = parent_parent_info.type if parent_parent_info.type and parent_info.type != MediaType.TV else parent_info.type
parent_info.cn_name = parent_parent_info.cn_name if parent_parent_info.cn_name else parent_info.cn_name
parent_info.en_name = parent_parent_info.en_name if parent_parent_info.en_name else parent_info.en_name
parent_info.year = parent_parent_info.year if parent_parent_info.year else parent_info.year
parent_info.begin_season = NumberUtils.max_ele(parent_info.begin_season,
parent_parent_info.begin_season)
if not meta_info.get_name():
meta_info.cn_name = parent_info.cn_name
meta_info.en_name = parent_info.en_name
if not meta_info.year:
meta_info.year = parent_info.year
if parent_info.type and parent_info.type == MediaType.TV \
and meta_info.type != MediaType.TV:
meta_info.type = parent_info.type
if meta_info.type == MediaType.TV:
meta_info.begin_season = NumberUtils.max_ele(parent_info.begin_season,
meta_info.begin_season)
if not meta_info.get_name() or not meta_info.type:
log.warn("【Rmt】%s 未识别出有效信息!" % meta_info.org_string)
continue
# 区配缓存及TMDB
file_media_info = self.__fill_media_info(meta_info, True, False, chinese)
meta_info.set_tmdb_info(file_media_info)
# 自带TMDB信息
else:
meta_info = MetaInfo(title=file_name, mtype=media_type)
meta_info.set_tmdb_info(tmdb_info)
if season and meta_info.type != MediaType.MOVIE:
meta_info.begin_season = int(season)
if episode_format:
begin_ep, end_ep, part = episode_format.split_episode(file_name)
if begin_ep is not None:
meta_info.begin_episode = begin_ep
meta_info.part = part
if end_ep is not None:
meta_info.end_episode = end_ep
# 加入缓存
self.save_rename_cache(file_name, tmdb_info)
# 按文件路程存储
return_media_infos[file_path] = meta_info
except Exception as err:
print(str(err))
log.error("【Rmt】发生错误:%s - %s" % (str(err), traceback.format_exc()))
# 循环结束
return return_media_infos
def __dict_tmdbpersons(self, infos, chinese=True):
"""
TMDB人员信息转为字典
"""
if not infos:
return []
ret_infos = []
for info in infos:
if chinese:
name = self.get_tmdbperson_chinese_name(person_id=info.get("id")) or info.get("name")
else:
name = info.get("name")
tmdbid = info.get("id")
image = Config().get_tmdbimage_url(info.get("profile_path"), prefix="h632") \
if info.get("profile_path") else ""
ret_infos.append({
"id": tmdbid,
"name": name,
"role": info.get("name") if info.get("name") != name else "",
"image": image
})
return ret_infos
@staticmethod
def __dict_tmdbinfos(infos, mtype=None):
"""
TMDB电影信息转为字典
"""
if not infos:
return []
ret_infos = []
for info in infos:
tmdbid = info.get("id")
vote = round(float(info.get("vote_average")), 1) if info.get("vote_average") else 0,
image = Config().get_tmdbimage_url(info.get("poster_path"))
overview = info.get("overview")
if mtype:
media_type = mtype.value
year = info.get("release_date")[0:4] if info.get(
"release_date") and mtype == MediaType.MOVIE else info.get(
"first_air_date")[0:4] if info.get(
"first_air_date") else ""
typestr = 'MOV' if mtype == MediaType.MOVIE else 'TV'
title = info.get("title") if mtype == MediaType.MOVIE else info.get("name")
else:
media_type = MediaType.MOVIE.value if info.get(
"media_type") == "movie" else MediaType.TV.value
year = info.get("release_date")[0:4] if info.get(
"release_date") and info.get(
"media_type") == "movie" else info.get(
"first_air_date")[0:4] if info.get(
"first_air_date") else ""
typestr = 'MOV' if info.get("media_type") == "movie" else 'TV'
title = info.get("title") if info.get("media_type") == "movie" else info.get("name")
ret_infos.append({
'id': tmdbid,
'orgid': tmdbid,
'tmdbid': tmdbid,
'title': title,
'type': typestr,
'media_type': media_type,
'year': year,
'vote': vote,
'image': image,
'overview': overview
})
return ret_infos
def get_tmdb_hot_movies(self, page):
"""
获取热门电影
:param page: 第几页
:return: TMDB信息列表
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.popular(page), MediaType.MOVIE)
def get_tmdb_hot_tvs(self, page):
"""
获取热门电视剧
:param page: 第几页
:return: TMDB信息列表
"""
if not self.tv:
return []
return self.__dict_tmdbinfos(self.tv.popular(page), MediaType.TV)
def get_tmdb_new_movies(self, page):
"""
获取最新电影
:param page: 第几页
:return: TMDB信息列表
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.now_playing(page), MediaType.MOVIE)
def get_tmdb_new_tvs(self, page):
"""
获取最新电视剧
:param page: 第几页
:return: TMDB信息列表
"""
if not self.tv:
return []
return self.__dict_tmdbinfos(self.tv.on_the_air(page), MediaType.TV)
def get_tmdb_upcoming_movies(self, page):
"""
获取即将上映电影
:param page: 第几页
:return: TMDB信息列表
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.upcoming(page), MediaType.MOVIE)
def get_tmdb_trending_all_week(self, page=1):
"""
获取即将上映电影
:param page: 第几页
:return: TMDB信息列表
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.trending.all_week(page=page))
def __get_tmdb_movie_detail(self, tmdbid, append_to_response=None):
"""
获取电影的详情
:param tmdbid: TMDB ID
:return: TMDB信息
"""
"""
{
"adult": false,
"backdrop_path": "/r9PkFnRUIthgBp2JZZzD380MWZy.jpg",
"belongs_to_collection": {
"id": 94602,
"name": "穿靴子的猫(系列)",
"poster_path": "/anHwj9IupRoRZZ98WTBvHpTiE6A.jpg",
"backdrop_path": "/feU1DWV5zMWxXUHJyAIk3dHRQ9c.jpg"
},
"budget": 90000000,
"genres": [
{
"id": 16,
"name": "动画"
},
{
"id": 28,
"name": "动作"
},
{
"id": 12,
"name": "冒险"
},
{
"id": 35,
"name": "喜剧"
},
{
"id": 10751,
"name": "家庭"
},
{
"id": 14,
"name": "奇幻"
}
],
"homepage": "",
"id": 315162,
"imdb_id": "tt3915174",
"original_language": "en",
"original_title": "Puss in Boots: The Last Wish",
"overview": "时隔11年,臭屁自大又爱卖萌的猫大侠回来了!如今的猫大侠(安东尼奥·班德拉斯 配音),依旧幽默潇洒又不拘小节、数次“花式送命”后,九条命如今只剩一条,于是不得不请求自己的老搭档兼“宿敌”——迷人的软爪妞(萨尔玛·海耶克 配音)来施以援手来恢复自己的九条生命。",
"popularity": 8842.129,
"poster_path": "/rnn30OlNPiC3IOoWHKoKARGsBRK.jpg",
"production_companies": [
{
"id": 33,
"logo_path": "/8lvHyhjr8oUKOOy2dKXoALWKdp0.png",
"name": "Universal Pictures",
"origin_country": "US"
},
{
"id": 521,
"logo_path": "/kP7t6RwGz2AvvTkvnI1uteEwHet.png",
"name": "DreamWorks Animation",
"origin_country": "US"
}
],
"production_countries": [
{
"iso_3166_1": "US",
"name": "United States of America"
}
],
"release_date": "2022-12-07",
"revenue": 260725470,
"runtime": 102,
"spoken_languages": [
{
"english_name": "English",
"iso_639_1": "en",
"name": "English"
},
{
"english_name": "Spanish",
"iso_639_1": "es",
"name": "Español"
}
],
"status": "Released",
"tagline": "",
"title": "穿靴子的猫2",
"video": false,
"vote_average": 8.614,
"vote_count": 2291
}
"""
if not self.movie:
return {}
try:
log.info("【Meta】正在查询TMDB电影:%s ..." % tmdbid)
tmdbinfo = self.movie.details(tmdbid, append_to_response)
if tmdbinfo:
log.info(f"【Meta】{tmdbid} 查询结果:{tmdbinfo.get('title')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
return None
def __get_tmdb_tv_detail(self, tmdbid, append_to_response=None):
"""
获取电视剧的详情
:param tmdbid: TMDB ID
:return: TMDB信息
"""
"""
{
"adult": false,
"backdrop_path": "/uDgy6hyPd82kOHh6I95FLtLnj6p.jpg",
"created_by": [
{
"id": 35796,
"credit_id": "5e84f06a3344c600153f6a57",
"name": "Craig Mazin",
"gender": 2,
"profile_path": "/uEhna6qcMuyU5TP7irpTUZ2ZsZc.jpg"
},
{
"id": 1295692,
"credit_id": "5e84f03598f1f10016a985c0",
"name": "Neil Druckmann",
"gender": 2,
"profile_path": "/bVUsM4aYiHbeSYE1xAw2H5Z1ANU.jpg"
}
],
"episode_run_time": [],
"first_air_date": "2023-01-15",
"genres": [
{
"id": 18,
"name": "剧情"
},
{
"id": 10765,
"name": "Sci-Fi & Fantasy"
},
{
"id": 10759,
"name": "动作冒险"
}
],
"homepage": "https://www.hbo.com/the-last-of-us",
"id": 100088,
"in_production": true,
"languages": [
"en"
],
"last_air_date": "2023-01-15",
"last_episode_to_air": {
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "当你迷失在黑暗中",
"overview": "在一场全球性的流行病摧毁了文明之后,一个顽强的幸存者负责照顾一个 14 岁的小女孩,她可能是人类最后的希望。",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33
},
"name": "最后生还者",
"next_episode_to_air": {
"air_date": "2023-01-22",
"episode_number": 2,
"id": 4071039,
"name": "虫草变异菌",
"overview": "",
"production_code": "",
"runtime": 55,
"season_number": 1,
"show_id": 100088,
"still_path": "/jkUtYTmeap6EvkHI4n0j5IRFrIr.jpg",
"vote_average": 10,
"vote_count": 1
},
"networks": [
{
"id": 49,
"name": "HBO",
"logo_path": "/tuomPhY2UtuPTqqFnKMVHvSb724.png",
"origin_country": "US"
}
],
"number_of_episodes": 9,
"number_of_seasons": 1,
"origin_country": [
"US"
],
"original_language": "en",
"original_name": "The Last of Us",
"overview": "不明真菌疫情肆虐之后的美国,被真菌感染的人都变成了可怕的怪物,乔尔(Joel)为了换回武器答应将小女孩儿艾莉(Ellie)送到指定地点,由此开始了两人穿越美国的漫漫旅程。",
"popularity": 5585.639,
"poster_path": "/nOY3VBFO0VnlN9nlRombnMTztyh.jpg",
"production_companies": [
{
"id": 3268,
"logo_path": "/tuomPhY2UtuPTqqFnKMVHvSb724.png",
"name": "HBO",
"origin_country": "US"
},
{
"id": 11073,
"logo_path": "/aCbASRcI1MI7DXjPbSW9Fcv9uGR.png",
"name": "Sony Pictures Television Studios",
"origin_country": "US"
},
{
"id": 23217,
"logo_path": "/kXBZdQigEf6QiTLzo6TFLAa7jKD.png",
"name": "Naughty Dog",
"origin_country": "US"
},
{
"id": 115241,
"logo_path": null,
"name": "The Mighty Mint",
"origin_country": "US"
},
{
"id": 119645,
"logo_path": null,
"name": "Word Games",
"origin_country": "US"
},
{
"id": 125281,
"logo_path": "/3hV8pyxzAJgEjiSYVv1WZ0ZYayp.png",
"name": "PlayStation Productions",
"origin_country": "US"
}
],
"production_countries": [
{
"iso_3166_1": "US",
"name": "United States of America"
}
],
"seasons": [
{
"air_date": "2023-01-15",
"episode_count": 9,
"id": 144593,
"name": "第 1 季",
"overview": "",
"poster_path": "/aUQKIpZZ31KWbpdHMCmaV76u78T.jpg",
"season_number": 1
}
],
"spoken_languages": [
{
"english_name": "English",
"iso_639_1": "en",
"name": "English"
}
],
"status": "Returning Series",
"tagline": "",
"type": "Scripted",
"vote_average": 8.924,
"vote_count": 601
}
"""
if not self.tv:
return {}
try:
log.info("【Meta】正在查询TMDB电视剧:%s ..." % tmdbid)
tmdbinfo = self.tv.details(tmdbid, append_to_response)
if tmdbinfo:
log.info(f"【Meta】{tmdbid} 查询结果:{tmdbinfo.get('name')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
return None
def get_tmdb_tv_season_detail(self, tmdbid, season: int):
"""
获取电视剧季的详情
:param tmdbid: TMDB ID
:param season: 季,数字
:return: TMDB信息
"""
"""
{
"_id": "5e614cd3357c00001631a6ef",
"air_date": "2023-01-15",
"episodes": [
{
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "当你迷失在黑暗中",
"overview": "在一场全球性的流行病摧毁了文明之后,一个顽强的幸存者负责照顾一个 14 岁的小女孩,她可能是人类最后的希望。",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33,
"crew": [
{
"job": "Writer",
"department": "Writing",
"credit_id": "619c370063536a00619a08ee",
"adult": false,
"gender": 2,
"id": 35796,
"known_for_department": "Writing",
"name": "Craig Mazin",
"original_name": "Craig Mazin",
"popularity": 15.211,
"profile_path": "/uEhna6qcMuyU5TP7irpTUZ2ZsZc.jpg"
},
],
"guest_stars": [
{
"character": "Marlene",
"credit_id": "63c4ca5e5f2b8d00aed539fc",
"order": 500,
"adult": false,
"gender": 1,
"id": 1253388,
"known_for_department": "Acting",
"name": "Merle Dandridge",
"original_name": "Merle Dandridge",
"popularity": 21.679,
"profile_path": "/lKwHdTtDf6NGw5dUrSXxbfkZLEk.jpg"
}
]
},
],
"name": "第 1 季",
"overview": "",
"id": 144593,
"poster_path": "/aUQKIpZZ31KWbpdHMCmaV76u78T.jpg",
"season_number": 1
}
"""
if not self.tv:
return {}
try:
log.info("【Meta】正在查询TMDB电视剧:%s,季:%s ..." % (tmdbid, season))
tmdbinfo = self.tv.season_details(tmdbid, season)
return tmdbinfo or {}
except Exception as e:
print(str(e))
return {}
def get_tmdb_tv_seasons_byid(self, tmdbid):
"""
根据TMDB查询TMDB电视剧的所有季
"""
if not tmdbid:
return []
return self.get_tmdb_tv_seasons(
tv_info=self.__get_tmdb_tv_detail(
tmdbid=tmdbid
)
)
@staticmethod
def get_tmdb_tv_seasons(tv_info):
"""
查询TMDB电视剧的所有季
:param tv_info: TMDB 的季信息
:return: 带有season_number、episode_count 的每季总集数的字典列表
"""
"""
"seasons": [
{
"air_date": "2006-01-08",
"episode_count": 11,
"id": 3722,
"name": "特别篇",
"overview": "",
"poster_path": "/snQYndfsEr3Sto2jOmkmsQuUXAQ.jpg",
"season_number": 0
},
{
"air_date": "2005-03-27",
"episode_count": 9,
"id": 3718,
"name": "第 1 季",
"overview": "",
"poster_path": "/foM4ImvUXPrD2NvtkHyixq5vhPx.jpg",
"season_number": 1
}
]
"""
if not tv_info:
return []
ret_info = []
for info in tv_info.get("seasons") or []:
if not info.get("season_number"):
continue
ret_info.append({
"air_date": info.get("air_date"),
"episode_count": info.get("episode_count"),
"id": info.get("id"),
"name": info.get("name"),
"overview": info.get("overview"),
"poster_path": Config().get_tmdbimage_url(info.get("poster_path")) if info.get("poster_path") else "",
"season_number": info.get("season_number")
})
ret_info.reverse()
return ret_info
def get_tmdb_season_episodes(self, tmdbid, season: int):
"""
:param: tmdbid: TMDB ID
:param: season: 季号
"""
"""
从TMDB的季集信息中获得某季的集信息
"""
"""
"episodes": [
{
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "当你迷失在黑暗中",
"overview": "在一场全球性的流行病摧毁了文明之后,一个顽强的幸存者负责照顾一个 14 岁的小女孩,她可能是人类最后的希望。",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33
},
]
"""
if not tmdbid:
return []
season_info = self.get_tmdb_tv_season_detail(tmdbid=tmdbid, season=season)
if not season_info:
return []
ret_info = []
for info in season_info.get("episodes") or []:
ret_info.append({
"air_date": info.get("air_date"),
"episode_number": info.get("episode_number"),
"id": info.get("id"),
"name": info.get("name"),
"overview": info.get("overview"),
"production_code": info.get("production_code"),
"runtime": info.get("runtime"),
"season_number": info.get("season_number"),
"show_id": info.get("show_id"),
"still_path": Config().get_tmdbimage_url(info.get("still_path")) if info.get("still_path") else "",
"vote_average": info.get("vote_average")
})
ret_info.reverse()
return ret_info
def get_tmdb_backdrop(self, mtype, tmdbid):
"""
获取TMDB的背景图
"""
if not tmdbid:
return ""
tmdbinfo = self.get_tmdb_info(mtype=mtype,
tmdbid=tmdbid,
append_to_response="images",
chinese=False)
if not tmdbinfo:
return ""
results = self.get_tmdb_backdrops(tmdbinfo=tmdbinfo, original=False)
return results[0] if results else ""
@staticmethod
def get_tmdb_backdrops(tmdbinfo, original=True):
"""
获取TMDB的背景图
"""
"""
{
"backdrops": [
{
"aspect_ratio": 1.778,
"height": 2160,
"iso_639_1": "en",
"file_path": "/qUroDlCDUMwRWbkyjZGB9THkMgZ.jpg",
"vote_average": 5.312,
"vote_count": 1,
"width": 3840
},
{
"aspect_ratio": 1.778,
"height": 2160,
"iso_639_1": "en",
"file_path": "/iyxvxEQIfQjzJJTfszZxmH5UV35.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 3840
},
{
"aspect_ratio": 1.778,
"height": 720,
"iso_639_1": "en",
"file_path": "/8SRY6IcMKO1E5p83w7bjvcqklp9.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 1280
},
{
"aspect_ratio": 1.778,
"height": 1080,
"iso_639_1": "en",
"file_path": "/erkJ7OxJWFdLBOcn2MvIdhTLHTu.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 1920
}
]
}
"""
if not tmdbinfo:
return []
prefix_url = Config().get_tmdbimage_url(r"%s", prefix="original") \
if original else Config().get_tmdbimage_url(r"%s")
backdrops = tmdbinfo.get("images", {}).get("backdrops") or []
result = [prefix_url % backdrop.get("file_path") for backdrop in backdrops]
result.append(prefix_url % tmdbinfo.get("backdrop_path"))
return result
@staticmethod
def get_tmdb_season_episodes_num(tv_info, season: int):
"""
从TMDB的季信息中获得具体季有多少集
:param season: 季号,数字
:param tv_info: 已获取的TMDB季的信息
:return: 该季的总集数
"""
if not tv_info:
return 0
seasons = tv_info.get("seasons")
if not seasons:
return 0
for sea in seasons:
if sea.get("season_number") == int(season):
return int(sea.get("episode_count"))
return 0
@staticmethod
def __dict_media_crews(crews):
"""
字典化媒体工作人员
"""
return [{
"id": crew.get("id"),
"gender": crew.get("gender"),
"known_for_department": crew.get("known_for_department"),
"name": crew.get("name"),
"original_name": crew.get("original_name"),
"popularity": crew.get("popularity"),
"image": Config().get_tmdbimage_url(crew.get("profile_path"), prefix="h632"),
"credit_id": crew.get("credit_id"),
"department": crew.get("department"),
"job": crew.get("job"),
"profile": 'https://www.themoviedb.org/person/%s' % crew.get('id')
} for crew in crews or []]
@staticmethod
def __dict_media_casts(casts):
"""
字典化媒体演职人员
"""
return [{
"id": cast.get("id"),
"gender": cast.get("gender"),
"known_for_department": cast.get("known_for_department"),
"name": cast.get("name"),
"original_name": cast.get("original_name"),
"popularity": cast.get("popularity"),
"image": Config().get_tmdbimage_url(cast.get("profile_path"), prefix="h632"),
"cast_id": cast.get("cast_id"),
"role": cast.get("character"),
"credit_id": cast.get("credit_id"),
"order": cast.get("order"),
"profile": 'https://www.themoviedb.org/person/%s' % cast.get('id')
} for cast in casts or []]
def get_tmdb_directors_actors(self, tmdbinfo):
"""
查询导演和演员
:param tmdbinfo: TMDB元数据
:return: 导演列表,演员列表
"""
"""
"cast": [
{
"adult": false,
"gender": 2,
"id": 3131,
"known_for_department": "Acting",
"name": "Antonio Banderas",
"original_name": "Antonio Banderas",
"popularity": 60.896,
"profile_path": "/iWIUEwgn2KW50MssR7tdPeFoRGW.jpg",
"cast_id": 2,
"character": "Puss in Boots (voice)",
"credit_id": "6052480e197de4006bb47b9a",
"order": 0
}
],
"crew": [
{
"adult": false,
"gender": 2,
"id": 5524,
"known_for_department": "Production",
"name": "Andrew Adamson",
"original_name": "Andrew Adamson",
"popularity": 9.322,
"profile_path": "/qqIAVKAe5LHRbPyZUlptsqlo4Kb.jpg",
"credit_id": "63b86b2224b33300a0585bf1",
"department": "Production",
"job": "Executive Producer"
}
]
"""
if not tmdbinfo:
return [], []
_credits = tmdbinfo.get("credits")
if not _credits:
return [], []
directors = []
actors = []
for cast in self.__dict_media_casts(_credits.get("cast")):
if cast.get("known_for_department") == "Acting":
actors.append(cast)
for crew in self.__dict_media_crews(_credits.get("crew")):
if crew.get("job") == "Director":
directors.append(crew)
return directors, actors
def get_tmdb_cats(self, mtype, tmdbid):
"""
获取TMDB的演员列表
:param: mtype: 媒体类型
:param: tmdbid: TMDBID
"""
try:
if mtype == MediaType.MOVIE:
if not self.movie:
return []
return self.__dict_media_casts(self.movie.credits(tmdbid).get("cast"))
else:
if not self.tv:
return []
return self.__dict_media_casts(self.tv.credits(tmdbid).get("cast"))
except Exception as err:
print(str(err))
return []
@staticmethod
def get_tmdb_genres_names(tmdbinfo):
"""
从TMDB数据中获取风格名称
"""
"""
"genres": [
{
"id": 16,
"name": "动画"
},
{
"id": 28,
"name": "动作"
},
{
"id": 12,
"name": "冒险"
},
{
"id": 35,
"name": "喜剧"
},
{
"id": 10751,
"name": "家庭"
},
{
"id": 14,
"name": "奇幻"
}
]
"""
if not tmdbinfo:
return ""
genres = tmdbinfo.get("genres") or []
genres_list = [genre.get("name") for genre in genres]
return ", ".join(genres_list) if genres_list else ""
def get_tmdb_genres(self, mtype):
"""
获取TMDB的风格列表
:param: mtype: 媒体类型
"""
if not self.genre:
return []
try:
if mtype == MediaType.MOVIE:
return self.genre.movie_list()
else:
return self.genre.tv_list()
except Exception as err:
print(str(err))
return []
@staticmethod
def get_get_production_country_names(tmdbinfo):
"""
从TMDB数据中获取制片国家名称
"""
"""
"production_countries": [
{
"iso_3166_1": "US",
"name": "美国"
}
]
"""
if not tmdbinfo:
return ""
countries = tmdbinfo.get("production_countries") or []
countries_list = [country.get("name") for country in countries]
return ", ".join(countries_list) if countries_list else ""
@staticmethod
def get_tmdb_production_company_names(tmdbinfo):
"""
从TMDB数据中获取制片公司名称
"""
"""
"production_companies": [
{
"id": 2,
"logo_path": "/wdrCwmRnLFJhEoH8GSfymY85KHT.png",
"name": "DreamWorks Animation",
"origin_country": "US"
}
]
"""
if not tmdbinfo:
return ""
companies = tmdbinfo.get("production_companies") or []
companies_list = [company.get("name") for company in companies]
return ", ".join(companies_list) if companies_list else ""
@staticmethod
def get_tmdb_crews(tmdbinfo, nums=None):
"""
从TMDB数据中获取制片人员
"""
if not tmdbinfo:
return ""
crews = tmdbinfo.get("credits", {}).get("crew") or []
result = [{crew.get("name"): crew.get("job")} for crew in crews]
if nums:
return result[:nums]
else:
return result
def get_tmdb_en_title(self, media_info):
"""
获取TMDB的英文名称
"""
en_info = self.get_tmdb_info(mtype=media_info.type,
tmdbid=media_info.tmdb_id,
language="en",
chinese=False)
if en_info:
return en_info.get("title") if media_info.type == MediaType.MOVIE else en_info.get("name")
return None
def get_episode_title(self, media_info, language=None):
"""
获取剧集的标题
"""
if media_info.type == MediaType.MOVIE:
return None
# 设置语言
self.__set_language(language)
if media_info.tmdb_id:
if not media_info.begin_episode:
return None
episodes = self.get_tmdb_season_episodes(tmdbid=media_info.tmdb_id,
season=int(media_info.get_season_seq()))
for episode in episodes:
if episode.get("episode_number") == media_info.begin_episode:
return episode.get("name")
return None
def get_movie_similar(self, tmdbid, page=1):
"""
查询类似电影
"""
if not self.movie:
return []
try:
movies = self.movie.similar(movie_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(movies, MediaType.MOVIE)
except Exception as e:
print(str(e))
return []
def get_movie_recommendations(self, tmdbid, page=1):
"""
查询电影关联推荐
"""
if not self.movie:
return []
try:
movies = self.movie.recommendations(movie_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(movies, MediaType.MOVIE)
except Exception as e:
print(str(e))
return []
def get_tv_similar(self, tmdbid, page=1):
"""
查询类似电视剧
"""
if not self.tv:
return []
try:
tvs = self.tv.similar(tv_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(tvs, MediaType.TV)
except Exception as e:
print(str(e))
return []
def get_tv_recommendations(self, tmdbid, page=1):
"""
查询电视剧关联推荐
"""
if not self.tv:
return []
try:
tvs = self.tv.recommendations(tv_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(tvs, MediaType.TV)
except Exception as e:
print(str(e))
return []
def get_tmdb_discover(self, mtype, params=None, page=1):
"""
浏览电影、电视剧(复杂过滤条件)
"""
if not self.discover:
return []
try:
if mtype == MediaType.MOVIE:
movies = self.discover.discover_movies(params=params, page=page)
return self.__dict_tmdbinfos(movies, mtype)
elif mtype == MediaType.TV:
tvs = self.discover.discover_tv_shows(params=params, page=page)
return self.__dict_tmdbinfos(tvs, mtype)
except Exception as e:
print(str(e))
return []
def get_tmdb_discover_movies_pages(self, params=None):
"""
获取电影浏览的总页数
"""
if not self.discover:
return 0
try:
return self.discover.discover_movies_pages(params=params)
except Exception as e:
print(str(e))
return 0
def get_person_medias(self, personid, mtype=None, page=1):
"""
查询人物相关影视作品
"""
if not self.person:
return []
try:
if mtype == MediaType.MOVIE:
movies = self.person.movie_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(movies, mtype)
elif mtype:
tvs = self.person.tv_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(tvs, mtype)
else:
medias = self.person.combined_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(medias)
return result[(page - 1) * 20: page * 20]
except Exception as e:
print(str(e))
return []
@staticmethod
def __search_engine(feature_name):
"""
辅助识别关键字
"""
is_movie = False
if not feature_name:
return None, is_movie
# 剔除不必要字符
feature_name = re.compile(r"^\w+字幕[组社]?", re.IGNORECASE).sub("", feature_name)
backlist = sorted(KEYWORD_BLACKLIST, key=lambda x: len(x), reverse=True)
for single in backlist:
feature_name = feature_name.replace(single, " ")
if not feature_name:
return None, is_movie
def cal_score(strongs, r_dict):
for i, s in enumerate(strongs):
if len(strongs) < 5:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_3[0]
else:
score = KEYWORD_SEARCH_WEIGHT_3[1]
elif len(strongs) < 10:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_2[0]
else:
score = KEYWORD_SEARCH_WEIGHT_2[1] if i < (len(strongs) >> 1) else KEYWORD_SEARCH_WEIGHT_2[2]
else:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_1[0]
else:
score = KEYWORD_SEARCH_WEIGHT_1[1] if i < (len(strongs) >> 2) else KEYWORD_SEARCH_WEIGHT_1[
2] if i < (
len(strongs) >> 1) \
else KEYWORD_SEARCH_WEIGHT_1[3] if i < (len(strongs) >> 2 + len(strongs) >> 1) else \
KEYWORD_SEARCH_WEIGHT_1[
4]
if r_dict.__contains__(s.lower()):
r_dict[s.lower()] += score
continue
r_dict[s.lower()] = score
bing_url = "https://www.cn.bing.com/search?q=%s&qs=n&form=QBRE&sp=-1" % feature_name
baidu_url = "https://www.baidu.com/s?ie=utf-8&tn=baiduhome_pg&wd=%s" % feature_name
res_bing = RequestUtils(timeout=5).get_res(url=bing_url)
res_baidu = RequestUtils(timeout=5).get_res(url=baidu_url)
ret_dict = {}
if res_bing and res_bing.status_code == 200:
html_text = res_bing.text
if html_text:
html = etree.HTML(html_text)
strongs_bing = list(
filter(lambda x: (0 if not x else difflib.SequenceMatcher(None, feature_name,
x).ratio()) > KEYWORD_STR_SIMILARITY_THRESHOLD,
map(lambda x: x.text, html.cssselect(
"#sp_requery strong, #sp_recourse strong, #tile_link_cn strong, .b_ad .ad_esltitle~div strong, h2 strong, .b_caption p strong, .b_snippetBigText strong, .recommendationsTableTitle+.b_slideexp strong, .recommendationsTableTitle+table strong, .recommendationsTableTitle+ul strong, .pageRecoContainer .b_module_expansion_control strong, .pageRecoContainer .b_title>strong, .b_rs strong, .b_rrsr strong, #dict_ans strong, .b_listnav>.b_ans_stamp>strong, #b_content #ans_nws .na_cnt strong, .adltwrnmsg strong"))))
if strongs_bing:
title = html.xpath("//aside//h2[@class = \" b_entityTitle\"]/text()")
if len(title) > 0:
if title:
t = re.compile(r"\s*\(\d{4}\)$").sub("", title[0])
ret_dict[t] = 200
if html.xpath("//aside//div[@data-feedbk-ids = \"Movie\"]"):
is_movie = True
cal_score(strongs_bing, ret_dict)
if res_baidu and res_baidu.status_code == 200:
html_text = res_baidu.text
if html_text:
html = etree.HTML(html_text)
ems = list(
filter(lambda x: (0 if not x else difflib.SequenceMatcher(None, feature_name,
x).ratio()) > KEYWORD_STR_SIMILARITY_THRESHOLD,
map(lambda x: x.text, html.cssselect("em"))))
if len(ems) > 0:
cal_score(ems, ret_dict)
if not ret_dict:
return None, False
ret = sorted(ret_dict.items(), key=lambda d: d[1], reverse=True)
log.info("【Meta】推断关键字为:%s ..." % ([k[0] for i, k in enumerate(ret) if i < 4]))
if len(ret) == 1:
keyword = ret[0][0]
else:
pre = ret[0]
nextw = ret[1]
if nextw[0].find(pre[0]) > -1:
# 满分直接判定
if int(pre[1]) >= 100:
keyword = pre[0]
# 得分相差30 以上, 选分高
elif int(pre[1]) - int(nextw[1]) > KEYWORD_DIFF_SCORE_THRESHOLD:
keyword = pre[0]
# 重复的不选
elif nextw[0].replace(pre[0], "").strip() == pre[0]:
keyword = pre[0]
# 纯数字不选
elif pre[0].isdigit():
keyword = nextw[0]
else:
keyword = nextw[0]
else:
keyword = pre[0]
log.info("【Meta】选择关键字为:%s " % keyword)
return keyword, is_movie
@staticmethod
def __get_genre_ids_from_detail(genres):
"""
从TMDB详情中获取genre_id列表
"""
if not genres:
return []
genre_ids = []
for genre in genres:
genre_ids.append(genre.get('id'))
return genre_ids
@staticmethod
def __get_tmdb_chinese_title(tmdbinfo):
"""
从别名中获取中文标题
"""
if not tmdbinfo:
return None
if tmdbinfo.get("media_type") == MediaType.MOVIE:
alternative_titles = tmdbinfo.get("alternative_titles", {}).get("titles", [])
else:
alternative_titles = tmdbinfo.get("alternative_titles", {}).get("results", [])
for alternative_title in alternative_titles:
iso_3166_1 = alternative_title.get("iso_3166_1")
if iso_3166_1 == "CN":
title = alternative_title.get("title")
if title and StringUtils.is_chinese(title) and zhconv.convert(title, "zh-hans") == title:
return title
return tmdbinfo.get("title") if tmdbinfo.get("media_type") == MediaType.MOVIE else tmdbinfo.get("name")
def get_tmdbperson_chinese_name(self, person_id=None, person_info=None):
"""
查询TMDB人物中文名称
"""
if not self.person:
return ""
if not person_info and not person_id:
return ""
# 返回中文名
name = ""
# 所有别名
alter_names = []
try:
if not person_info:
person_info = self.person.details(person_id)
if person_info:
aka_names = person_info.get("also_known_as", []) or []
else:
return ""
except Exception as err:
print(str(err))
return ""
for aka_name in aka_names:
if StringUtils.is_chinese(aka_name):
alter_names.append(aka_name)
if len(alter_names) == 1:
name = alter_names[0]
elif len(alter_names) > 1:
for alter_name in alter_names:
if alter_name == zhconv.convert(alter_name, 'zh-hans'):
name = alter_name
return name
def get_tmdbperson_aka_names(self, person_id):
"""
查询人物又名
"""
if not self.person:
return []
try:
aka_names = self.person.details(person_id).get("also_known_as", []) or []
return aka_names
except Exception as err:
print(str(err))
return []
def get_random_discover_backdrop(self):
"""
获取TMDB热门电影随机一张背景图
"""
if not self.discover:
return ""
try:
medias = self.discover.discover_movies(params={"sort_by": "popularity.desc"})
if medias:
# 随机一个电影
media = random.choice(medias)
img_url = Config().get_tmdbimage_url(media.get("backdrop_path"), prefix="original") \
if media.get("backdrop_path") else ''
img_title = media.get('title', '')
img_link = f"https://www.themoviedb.org/movie/{media.get('id')}" if media.get('id') else ''
return img_url, img_title, img_link
except Exception as err:
print(str(err))
return '', '', ''
def save_rename_cache(self, file_name, cache_info):
"""
将手动识别的信息加入缓存
"""
if not file_name or not cache_info:
return
meta_info = MetaInfo(title=file_name)
self.__insert_media_cache(self.__make_cache_key(meta_info), cache_info)
@staticmethod
def merge_media_info(target, source):
"""
将soruce中有效的信息合并到target中并返回
"""
target.set_tmdb_info(source.tmdb_info)
target.fanart_poster = source.get_poster_image()
target.fanart_backdrop = source.get_backdrop_image()
target.set_download_info(download_setting=source.download_setting,
save_path=source.save_path)
return target
def get_tmdbid_by_imdbid(self, imdbid):
"""
根据IMDBID查询TMDB信息
"""
if not self.find:
return None
try:
result = self.find.find_by_imdbid(imdbid) or {}
tmdbinfo = result.get('movie_results') or result.get("tv_results")
if tmdbinfo:
tmdbinfo = tmdbinfo[0]
return tmdbinfo.get("id")
except Exception as err:
print(str(err))
return None
@staticmethod
def get_detail_url(mtype, tmdbid):
"""
获取TMDB/豆瓣详情页地址
"""
if not tmdbid:
return ""
if str(tmdbid).startswith("DB:"):
return "https://movie.douban.com/subject/%s" % str(tmdbid).replace("DB:", "")
elif mtype == MediaType.MOVIE:
return "https://www.themoviedb.org/movie/%s" % tmdbid
else:
return "https://www.themoviedb.org/tv/%s" % tmdbid
def get_episode_images(self, tv_id, season_id, episode_id, orginal=False):
"""
获取剧集中某一集封面
"""
if not self.episode:
return ""
if not tv_id or not season_id or not episode_id:
return ""
res = self.episode.images(tv_id, season_id, episode_id)
if res:
if orginal:
return Config().get_tmdbimage_url(res[-1].get("file_path"), prefix="original")
else:
return Config().get_tmdbimage_url(res[-1].get("file_path"))
else:
return ""
def get_tmdb_factinfo(self, media_info):
"""
获取TMDB发布信息
"""
result = []
if media_info.vote_average:
result.append({"评分": media_info.vote_average})
if media_info.original_title:
result.append({"原始标题": media_info.original_title})
status = media_info.tmdb_info.get("status")
if status:
result.append({"状态": status})
if media_info.release_date:
result.append({"上映日期": media_info.release_date})
revenue = media_info.tmdb_info.get("revenue")
if revenue:
result.append({"收入": StringUtils.str_amount(revenue)})
budget = media_info.tmdb_info.get("budget")
if budget:
result.append({"成本": StringUtils.str_amount(budget)})
if media_info.original_language:
result.append({"原始语言": media_info.original_language})
production_country = self.get_get_production_country_names(tmdbinfo=media_info.tmdb_info)
if media_info.networks:
result.append({"电视网": media_info.networks})
if production_country:
result.append({"出品国家": production_country}),
production_company = self.get_tmdb_production_company_names(tmdbinfo=media_info.tmdb_info)
if production_company:
result.append({"制作公司": production_company})
return result
| [] |
2024-01-10 | IsakZhang/XABSA | transformers~tokenization_openai_fast.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for OpenAI GPT."""
from typing import Optional, Tuple
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"},
"merges_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"},
"tokenizer_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tokenizer.json"},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"openai-gpt": 512,
}
class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" GPT Tokenizer (backed by HuggingFace's `tokenizers` library). Based on Byte-Pair-Encoding with
the following peculiarities:
- lower case all inputs
- uses BERT's BasicTokenizer for pre-BPE tokenization
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = OpenAIGPTTokenizer
def __init__(self, vocab_file, merges_file, tokenizer_file=None, unk_token="<unk>", **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs)
@property
def do_lower_case(self):
return True
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
| [] |
2024-01-10 | AI-ApeX-DeV/DataHack_2_DTG | pickle_savemodel.py | import cloudpickle
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
os.environ["openai_api_key"] = "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"
loader = CSVLoader(file_path='test_dataset.csv')
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
# Save the retriever
with open('retriever.pkl', 'wb') as f:
cloudpickle.dump(docsearch.vectorstore.as_retriever(), f)
# Save the chain
with open('chain.pkl', 'wb') as f:
cloudpickle.dump(RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=retriever, input_key="question"), f)
# Load the retriever
with open('retriever.pkl', 'rb') as f:
retriever = cloudpickle.load(f)
# Load the chain
with open('chain.pkl', 'rb') as f:
chain = cloudpickle.load(f)
query=input("Enter your query: ")
text="tell me to opt for and some detials of the lawyer and why he/she will be best for the case, give 1 male and 1 female lawyer for case in equal gender proportion"
response = chain({"question":query+text})
print(response['result'])
| [] |
2024-01-10 | AI-ApeX-DeV/DataHack_2_DTG | Lawyantra~DataHack~papp.py | # from langchain.document_loaders import CSVLoader
# from langchain.indexes import VectorstoreIndexCreator
# from langchain.chains import RetrievalQA
# from langchain.llms import OpenAI
# import os
# import pickle
# import sqlite3
# os.environ["openai_api_key"] = "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"
# loader = CSVLoader(file_path='updated_lawyer_data.csv')
# index_creator = VectorstoreIndexCreator()
# docsearch = index_creator.from_loaders([loader])
# chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
# def copy_chain_without_sqlite_connections(chain):
# # Helper function to recursively exclude sqlite3.Connection objects
# def exclude_sqlite_connections(obj):
# if isinstance(obj, sqlite3.Connection):
# return None
# if isinstance(obj, dict):
# return {k: exclude_sqlite_connections(v) for k, v in obj.items() if exclude_sqlite_connections(v) is not None}
# if isinstance(obj, list):
# return [exclude_sqlite_connections(item) for item in obj if exclude_sqlite_connections(item) is not None]
# return obj
# return exclude_sqlite_connections(chain)
# # Use custom serialization to save the trained model
# with open('retrieval_qa_model.pkl', 'wb') as model_file:
# serializable_chain = copy_chain_without_sqlite_connections(chain)
# pickle.dump(serializable_chain, model_file)
# When you want to use the model for inference:
# Load the model from the saved file
# with open('retrieval_qa_model.pkl', 'rb') as model_file:
# loaded_chain = pickle.load(model_file)
# Get a user query
# query = input("Enter your query: ")
# # Perform inference using the loaded model
# response = loaded_chain({"question": query })
# # Print the response
# print(response['result'])
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
import pickle
os.environ["openai_api_key"] = "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"
loader = CSVLoader(file_path='PS_2_Test_Dataset.csv')
index_creator = VectorstoreIndexCreator()
docsearch = index_creator.from_loaders([loader])
chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question")
query="Two brothers were tenant of a landlord in a commercial property.One brother had one son and adaughter (both minor) when he got divorced with his wife.The children's went into mother's custody at thetime of divorce and after some years the husband (co tenant) also died. Now can the children of thedeceased brother(co tenant) claim the right. which lawyer should be helpful for me?"
response = chain({"question":query})
print(response['result'])
| [] |
2024-01-10 | PaLeroy/competPymarl | src~runners~parallel_runner_population.py | from copy import deepcopy
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
from runners import ParallelRunner
from modules.bandits.uniform import Uniform
from modules.bandits.reinforce_hierarchial import EZ_agent as enza
from modules.bandits.returns_bandit import ReturnsBandit as RBandit
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunnerPopulation(ParallelRunner):
def __init__(self, args, logger, agent_dict):
super().__init__(args, logger)
self.agent_dict = agent_dict
self.mac = None
self.agent_timer = None
self.batches = None
self.team_id = None
self.list_match = None
self.noise = None
self.t = None
self.train_returns = {}
self.test_returns = {}
self.train_stats = {}
self.test_stats = {}
self.train_custom_stats = {}
self.test_custom_stats = {}
self.log_train_stats_t = {}
self.noise_returns = {}
self.noise_test_won = {}
self.noise_train_won = {}
self.new_batch = {}
self.noise_distrib = {}
for k, _ in agent_dict.items():
self.train_returns[k] = []
self.test_returns[k] = []
self.train_stats[k] = {}
self.test_stats[k] = {}
self.train_custom_stats[k] = {}
self.test_custom_stats[k] = {}
self.log_train_stats_t[k] = -1000000
self.noise_returns[k] = {}
self.noise_test_won[k] = {}
self.noise_train_won[k] = {}
def cuda(self):
for k, v in self.agent_dict.items():
if v['args_sn'].mac == "maven_mac":
self.noise_distrib[k].cuda()
def save_models(self, save_path, agent_id, agent_dict):
args_sn = agent_dict[agent_id]['args_sn']
if args_sn.mac == "maven_mac" and args_sn.noise_bandit:
self.noise_distrib[agent_id].save_model(save_path)
def load_models(self, load_path, agent_id, agent_dict):
args_sn = agent_dict[agent_id]['args_sn']
args_sn = agent_dict[agent_id]['args_sn']
if args_sn.mac == "maven_mac" and args_sn.noise_bandit:
self.noise_distrib[agent_id].load_model(load_path)
def setup(self, agent_dict, groups, preprocess):
for k, v in agent_dict.items():
self.new_batch[k] = partial(EpisodeBatch, v['scheme_buffer'],
groups, 1,
self.episode_limit + 1,
preprocess=preprocess,
device=self.args.device)
# set up noise distrib
if v['args_sn'].mac == "maven_mac":
dict_args = deepcopy(agent_dict[k]['args_sn'])
dict_args.batch_size_run = 1
if v['args_sn'].noise_bandit:
if v['args_sn'].bandit_policy:
self.noise_distrib[k] = enza(dict_args,
logger=self.logger)
else:
self.noise_distrib[k] = RBandit(
dict_args,
logger=self.logger)
else:
self.noise_distrib[k] = Uniform(dict_args)
else:
self.noise_distrib[k] = None
def setup_agents(self, list_match, agent_dict):
# To be called between each episode
# Define which agents play with each other.
# This will be a list of pair of agent_id
self.mac = []
self.team_id = []
self.agent_timer = {}
self.agent_dict = agent_dict
self.list_match = list_match
self.noise = []
for k, v in agent_dict.items():
self.agent_timer[k] = v['t_total']
heuristic_list = []
for idx_match, match in enumerate(list_match):
team_id1 = match[0]
team_id2 = match[1]
self.team_id.append([team_id1, team_id2])
# Controller
self.mac.append(
[agent_dict[team_id1]["mac"], agent_dict[team_id2]["mac"]])
# New noises
self.noise.append([None, None])
# Check if env uses heuristic.
heuristic_t1 = type(
agent_dict[team_id1]["mac"]).__name__ == 'DoNothingMAC'
heuristic_t2 = type(
agent_dict[team_id2]["mac"]).__name__ == 'DoNothingMAC'
heuristic_list.append([heuristic_t1, heuristic_t2])
for idx, parent_conn in enumerate(self.parent_conns):
parent_conn.send(("setup_heuristic", heuristic_list[idx]))
def reset(self, test_mode=False):
self.batches = []
for match in self.list_match:
self.batches.append(
[self.new_batch[match[0]](), self.new_batch[match[1]]()])
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data_1 = []
pre_transition_data_2 = []
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
state = data["state"]
observations = data["obs"]
obs_team_1 = observations[:self.args.n_agents]
obs_team_2 = observations[self.args.n_agents:]
avail_actions = data["avail_actions"]
avail_actions_team_1 = avail_actions[:self.args.n_agents]
avail_actions_team_2 = avail_actions[self.args.n_agents:]
pre_transition_data_team_1 = {"state": [state[0]],
"avail_actions": [
avail_actions_team_1],
"obs": [obs_team_1]}
pre_transition_data_1.append(pre_transition_data_team_1)
pre_transition_data_team_2 = {"state": [state[1]],
"avail_actions": [
avail_actions_team_2],
"obs": [obs_team_2]}
pre_transition_data_2.append(pre_transition_data_team_2)
self.t = 0
self.env_steps_this_run = 0
for idx, _ in enumerate(self.batches):
self.batches[idx][0].update(pre_transition_data_1[idx], ts=self.t)
self.batches[idx][1].update(pre_transition_data_2[idx], ts=self.t)
for idx, match in enumerate(self.list_match):
id_team_1 = match[0]
if self.agent_dict[id_team_1]['args_sn'].mac == "maven_mac":
state = self.batches[idx][0]['state']
noise = self.noise_distrib[id_team_1].sample(state[:, 0],
test_mode)
self.batches[idx][0].update({"noise": noise}, ts=0)
id_team_2 = match[1]
if self.agent_dict[id_team_2]['args_sn'].mac == "maven_mac":
state = self.batches[idx][1]["state"]
noise = self.noise_distrib[id_team_2].sample(state[:, 0],
test_mode)
self.batches[idx][1].update({"noise": noise}, ts=0)
def run(self, test_mode=False, test_uniform=False):
self.reset(test_uniform)
all_terminated = False
episode_returns = [np.zeros(2) for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
for idx_match, _ in enumerate(self.list_match):
for idx_team in range(2):
self.mac[idx_match][idx_team].init_hidden(batch_size=1)
terminated = [False for _ in range(self.batch_size)]
final_env_infos = [{} for _ in range(
self.batch_size)] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
actions = []
cpu_actions = []
for idx_match, match in enumerate(self.list_match):
if terminated[idx_match]:
continue
actions_match = []
for idx_team in range(2):
action = self.mac[idx_match][idx_team].select_actions(
self.batches[idx_match][idx_team],
t_ep=self.t,
t_env=self.agent_timer[match[idx_team]],
test_mode=test_mode)
actions_match.append(action)
actions.append(actions_match)
cpu_action = np.concatenate(
[actions_match[0][0].to("cpu").numpy(),
actions_match[1][0].to("cpu").numpy()])
cpu_actions.append(cpu_action)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
# Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data_1 = []
post_transition_data_2 = []
# Data for the next step we will insert in order to select an action
pre_transition_data_1 = []
pre_transition_data_2 = []
# Receive data back for each unterminated env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if terminated[idx]:
post_transition_data_1.append([])
post_transition_data_2.append([])
pre_transition_data_1.append([])
pre_transition_data_2.append([])
else:
data = parent_conn.recv()
# Remaining data for this current timestep
reward_team_1 = data["reward"][0]
reward_team_2 = data["reward"][-1]
env_terminated = False
if data["terminated"]:
final_env_infos[idx] = data["info"]
if not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data_team_1 = {
"actions": actions[action_idx][0],
"reward": [(reward_team_1,)],
"terminated": [(env_terminated,)]}
post_transition_data_team_2 = {
"actions": actions[action_idx][1],
"reward": [(reward_team_2,)],
"terminated": [(env_terminated,)]}
post_transition_data_1.append(post_transition_data_team_1)
post_transition_data_2.append(post_transition_data_team_2)
action_idx += 1
episode_returns[idx] += [reward_team_1, reward_team_2]
episode_lengths[idx] += 1
if not test_mode:
self.agent_timer[self.team_id[idx][0]] += 1
self.agent_timer[self.team_id[idx][1]] += 1
# Data for the next timestep needed to select an action
state = data["state"]
observations = data["obs"]
obs_team_1 = observations[:self.args.n_agents]
obs_team_2 = observations[self.args.n_agents:]
avail_actions = data["avail_actions"]
avail_actions_team_1 = avail_actions[:self.args.n_agents]
avail_actions_team_2 = avail_actions[self.args.n_agents:]
pre_transition_data_team_1 = {"state": [state[0]],
"avail_actions": [
avail_actions_team_1],
"obs": [obs_team_1]}
pre_transition_data_1.append(pre_transition_data_team_1)
pre_transition_data_team_2 = {"state": [state[1]],
"avail_actions": [
avail_actions_team_2],
"obs": [obs_team_2]}
pre_transition_data_2.append(pre_transition_data_team_2)
# Add post_transiton data into the batch
for idx, _ in enumerate(self.batches):
if not terminated[idx]:
self.batches[idx][0].update(post_transition_data_1[idx],
ts=self.t)
self.batches[idx][1].update(post_transition_data_2[idx],
ts=self.t)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
for idx, _ in enumerate(self.batches):
if not terminated[idx]:
self.batches[idx][0].update(pre_transition_data_1[idx],
ts=self.t)
self.batches[idx][1].update(pre_transition_data_2[idx],
ts=self.t)
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats", None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
# Pre analyze. Exclude information from bugged environments: env that
list_win = []
list_time = []
list_canceled_match = []
for idx, d in enumerate(final_env_infos):
won_team_1 = d['battle_won_team_1']
won_team_2 = d['battle_won_team_2']
episode_length = episode_lengths[idx]
if not won_team_1 and not won_team_2 and episode_length < self.episode_limit:
self.agent_timer[self.list_match[idx][0]] -= episode_length
self.agent_timer[self.list_match[idx][1]] -= episode_length
list_win.append(None)
list_time.append(None)
self.batches[idx] = None
list_canceled_match.append(True)
else:
list_win.append([won_team_1, won_team_2])
list_time.append([episode_length, episode_length])
list_canceled_match.append(False)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_custom_stats = self.test_custom_stats if test_mode else self.train_custom_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
for idx_match, match in enumerate(self.list_match):
if list_canceled_match[idx_match]:
continue
team_id1 = match[0]
team_id2 = match[1]
env_info = final_env_infos[idx_match]
custom_stats_team_1_dict = {
key: [val] for key, val in env_info.items() if
key.endswith("custom_team_1") or key.endswith(
"team_1_agent_0") or key.endswith(
"team_1_agent_1") or key.endswith("team_1_agent_2")
}
for key in list(env_info.keys()):
if key.endswith("custom_team_1") or key.endswith(
"team_1_agent_0") or key.endswith(
"team_1_agent_1") or key.endswith("team_1_agent_2"):
del env_info[key]
custom_stats_team_2_dict = {
key: [val] for key, val in env_info.items() if
key.endswith("custom_team_2") or key.endswith(
"team_2_agent_0") or key.endswith(
"team_2_agent_1") or key.endswith("team_2_agent_2")
}
for key in list(env_info.keys()):
if key.endswith("custom_team_2") or key.endswith(
"team_2_agent_0") or key.endswith(
"team_2_agent_1") or key.endswith("team_2_agent_2"):
del env_info[key]
env_info_team1 = {
"battle_won_team_1": env_info["battle_won_team_1"],
"return_team_1": episode_returns[idx_match][0],
}
env_info_team2 = {
"battle_won_team_2": env_info["battle_won_team_2"],
"return_team_2": episode_returns[idx_match][1]}
del env_info["battle_won_team_1"]
del env_info["battle_won_team_2"]
cur_stats[team_id1].update(
{k: cur_stats[team_id1].get(k, 0) + env_info.get(k,
0) + env_info_team1.get(
k, 0) for
k
in
set(cur_stats[team_id1]) | set(env_info) | set(
env_info_team1)})
cur_custom_stats[team_id1].update(
{k: cur_custom_stats[team_id1].get(k,
[]) + custom_stats_team_1_dict.get(
k, []) for k in
set(cur_custom_stats[team_id1]) | set(
custom_stats_team_1_dict)}
)
cur_stats[team_id2].update(
{k: cur_stats[team_id2].get(k, 0) + env_info.get(k,
0) + env_info_team2.get(
k, 0) for
k
in
set(cur_stats[team_id2]) | set(env_info) | set(
env_info_team2)})
cur_custom_stats[team_id2].update(
{
k: cur_custom_stats[team_id2].get(k,
[]) + custom_stats_team_2_dict.get(
k, []) for k in
set(cur_custom_stats[team_id2]) | set(
custom_stats_team_2_dict)}
)
if env_info_team1["battle_won_team_1"]:
cur_stats[team_id1]["won"] \
= 1 + cur_stats[team_id1].get(
"won", 0)
cur_stats[team_id1]["defeat"] \
= 0 + cur_stats[team_id1].get(
"defeat", 0)
cur_stats[team_id1]["draw"] \
= 0 + cur_stats[team_id1].get(
"draw", 0)
cur_stats[team_id2]["defeat"] \
= 1 + cur_stats[team_id2].get(
"defeat", 0)
cur_stats[team_id2]["won"] \
= 0 + cur_stats[team_id2].get(
"won", 0)
cur_stats[team_id2]["draw"] \
= 0 + cur_stats[team_id2].get(
"draw", 0)
elif env_info_team2["battle_won_team_2"]:
cur_stats[team_id2]["won"] \
= 1 + cur_stats[team_id2].get(
"won", 0)
cur_stats[team_id2]["defeat"] \
= 0 + cur_stats[team_id2].get(
"defeat", 0)
cur_stats[team_id2]["draw"] \
= 0 + cur_stats[team_id2].get(
"draw", 0)
cur_stats[team_id1]["defeat"] \
= 1 + cur_stats[team_id1].get(
"defeat", 0)
cur_stats[team_id1]["won"] \
= 0 + cur_stats[team_id1].get(
"won", 0)
cur_stats[team_id1]["draw"] \
= 0 + cur_stats[team_id1].get(
"draw", 0)
else:
cur_stats[team_id1]["defeat"] \
= 0 + cur_stats[team_id1].get(
"defeat", 0)
cur_stats[team_id1]["won"] \
= 0 + cur_stats[team_id1].get(
"won", 0)
cur_stats[team_id1]["draw"] \
= 1 + cur_stats[team_id1].get(
"draw", 0)
cur_stats[team_id2]["won"] \
= 0 + cur_stats[team_id2].get(
"won", 0)
cur_stats[team_id2]["defeat"] \
= 0 + cur_stats[team_id2].get(
"defeat", 0)
cur_stats[team_id2]["draw"] \
= 1 + cur_stats[team_id2].get(
"draw", 0)
cur_stats[team_id1]["n_episodes"] \
= 1 + cur_stats[team_id1].get(
"n_episodes", 0)
cur_stats[team_id2]["n_episodes"] \
= 1 + cur_stats[team_id2].get(
"n_episodes", 0)
cur_stats[team_id1]["ep_length"] \
= episode_lengths[idx_match] + cur_stats[team_id1].get(
"ep_length", 0)
cur_stats[team_id2]["ep_length"] \
= episode_lengths[idx_match] + cur_stats[team_id2].get(
"ep_length", 0)
cur_returns[team_id1].append(episode_returns[idx_match][0])
cur_returns[team_id2].append(episode_returns[idx_match][1])
# update noise network for each agent
for team_id, v in self.agent_dict.items():
if v['args_sn'].mac == "maven_mac":
returns_for_this_agent = []
init_states_for_this_agent = []
noise_for_this_agent = []
if not any(team_id in match for match in self.list_match):
continue
for id_match, match in enumerate(self.list_match):
if list_canceled_match[id_match]:
continue
if match[0] == team_id:
returns_for_this_agent. \
append(episode_returns[id_match][0])
init_states_for_this_agent. \
append(self.batches[id_match][0]["state"][:, 0])
noise_for_this_agent.append(
self.batches[id_match][0]['noise'][:])
if match[1] == team_id:
returns_for_this_agent. \
append(episode_returns[id_match][1])
init_states_for_this_agent. \
append(self.batches[id_match][1]["state"][:, 0])
noise_for_this_agent.append(
self.batches[id_match][1]['noise'][:])
if not init_states_for_this_agent:
continue
init_states_for_this_agent = \
th.stack(init_states_for_this_agent, dim=1)[0]
noise_for_this_agent = th.stack(noise_for_this_agent, dim=1)[
0, 0]
time = self.agent_timer[team_id]
self.noise_distrib[team_id].update_returns(
init_states_for_this_agent,
noise_for_this_agent,
returns_for_this_agent,
test_mode,
time)
if test_mode:
n_test_runs = max(1,
self.args.test_nepisode // self.batch_size) * self.batch_size
n_tests_returns = 0
for k, v in self.test_returns.items():
n_tests_returns += len(v)
if (n_tests_returns >= n_test_runs * 2):
for k, _ in self.agent_dict.items():
id = k
time = self.agent_timer[id]
log_prefix_ = log_prefix + "agent_id_" + str(id) + "_"
self._log(cur_returns[id], cur_stats[id], log_prefix_,
time, cur_custom_stats[id])
else:
for id, _ in self.agent_dict.items():
time = self.agent_timer[id]
if time - self.log_train_stats_t[
id] >= self.args.runner_log_interval:
log_prefix_ = log_prefix + "agent_id_" + str(id) + "_"
self._log(cur_returns[id], cur_stats[id], log_prefix_,
time)
if hasattr(self.agent_dict[id]["mac"],
"action_selector") and \
hasattr(self.agent_dict[id]["mac"].action_selector,
"epsilon"):
self.logger.log_stat(
"agent_id_" + str(id) + "_epsilon",
self.agent_dict[id][
"mac"].action_selector.epsilon,
time)
self.log_train_stats_t[id] = time
return self.batches, list_time, list_win
def _log(self, returns, stats, prefix, time, custom_stats=None):
if len(returns) > 0:
self.logger.log_stat(prefix + "return_mean", np.mean(returns),
time)
self.logger.log_stat(prefix + "return_std", np.std(returns),
time)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean",
v / stats["n_episodes"], time)
if custom_stats is not None:
for k, v in custom_stats.items():
self.logger.log_stat(prefix + k, v, time)
custom_stats.clear()
stats.clear()
# def _update_noise_returns(self, returns, noise, stats, test_mode):
# "Update the list of noise returns."
# print("_update_noise_returns")
# print("returns", returns)
# print("noise", noise)
# print("stats", stats)
# print("test_mode", test_mode)
# print("self.noise_returns", self.noise_returns)
# for n, r in zip(noise, returns):
# n = int(np.argmax(n))
# if n in self.noise_returns:
# self.noise_returns[n].append(r)
# else:
# self.noise_returns[n] = [r]
# if test_mode:
# noise_won = self.noise_test_won
# else:
# noise_won = self.noise_train_won
# if stats != [] and "battle_won" in stats[0]:
# for n, info in zip(noise, stats):
# if "battle_won" not in info:
# continue
# print("n", n)
# bw = info["battle_won"]
# n = int(np.argmax(n))
# print("bw", bw)
# print("n", n)
# if n in noise_won:
# noise_won[n].append(bw)
# else:
# noise_won[n] = [bw]
| [] |
2024-01-10 | adamyedidia/werewords | ipython.py | import IPython
import openai
from settings import OPENAI_SECRET_KEY
openai.api_key = OPENAI_SECRET_KEY
messages = [
{"role": "system", "content": "You are a player in a fun game."},
{"role": "user", "content": (
"Let's play a game. The game is like twenty questions, "
"except that there is no limit on the number of questions asked, "
"and the word you're trying to guess is going to be tougher than an ordinary twenty "
"questions word. You should start by asking if I want to use a \"multi-step\" way to get to the "
"answer, meaning that I first get you to words that hint the actual word. If your questions don't "
"seem to be making any progress, try asking about a broader class of things. I'll think of a word, "
"and you try to find the word using only questions which I will answer only with \"yes\", \"no\", "
"\"maybe\", or a reminder to try asking broader questions. Sound good?"
)
},
{"role": "assistant", "content": "Sounds fun! Let's play. Have you thought of a word?"},
{"role": "user", "content": "Yes, please go ahead and start!"},
{"role": "assistant", "content": "Do you want to get to your word via a multi-step path?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "I'll ask about a word that will hint your real word, then. Is that word a noun?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it tangible?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it living?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it an animal?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it a mammal?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a bird?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it an insect?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a reptile?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a fish?"},
{"role": "user", "content": "maybe"},
{"role": "assistant", "content": "Is it a cetacean, like a dolphin or whale?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a shark?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it an eel?"},
{"role": "user", "content": "This line of questioning is too narrow; try broadening the level of your questions."},
{"role": "assistant", "content": "Should I ask about its size?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Should I ask about where it lives?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Does it live in the ocean?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Do you want me to ask about where it lives more precisely?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Does it live only in saltwater environments?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Does it live near the surface of the ocean?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Does it live near the bottom of the ocean?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Do you want me to keep asking about where it lives?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Does it swim?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a type of mussel, barnacle, or coral?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it coral?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Was that the hint you intended for your real word?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Should I ask about your real word now?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Should I ask about how your real word relates to \"coral\"?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is its meaning related to \"coral\"?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Does it sound like \"coral\"?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Do I know enough about how your real word relates to \"coral\" now?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Should I ask about your real word directly?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is your real word a noun?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it tangible?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it an abstract concept?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a piece of media?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it a book?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a TV show?"},
{"role": "user", "content": "no"},
{"role": "assistant", "content": "Is it a movie?"},
{"role": "user", "content": "yes"},
{"role": "assistant", "content": "Is it \"Coraline\"?"},
{"role": "user", "content": "Yes, you win! Great job!!"},
{"role": "assistant", "content": "Thanks! That was fun. Do you want to play again?"},
{"role": "user", "content": "Yes, I've thought of another word. Start asking!"},
{"role": "assistant", "content": "Do you want to get to your word via a multi-step path?"},
]
print(messages[-1]['content'])
def yes_answer():
return {"role": "user", "content": "yes"}
def no_answer():
return {"role": "user", "content": "no"}
def maybe_answer():
return {"role": "user", "content": "maybe"}
def broaden_out_answer():
return {"role": "user", "content": "This line of questioning is too narrow; try broadening the level of your questions."}
def y():
return get_response(yes_answer())
def n():
return get_response(no_answer())
def m():
return get_response(maybe_answer())
def b():
return get_response(broaden_out_answer())
def get_response(answer):
messages.append(answer)
# print(messages)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
response_message = {'content': response['choices'][0]['message']['content'], 'role': 'assistant'}
print(response_message['content'])
messages.append(response_message)
IPython.embed() | [
"Is it a piece of media?",
"Does it live near the bottom of the ocean?",
"Do you want me to ask about where it lives more precisely?",
"Is it a shark?",
"Is it a mammal?",
"Is it a book?",
"This line of questioning is too narrow; try broadening the level of your questions.",
"Is its meaning related to \"coral\"?",
"Should I ask about its size?",
"content",
"Is it a fish?",
"Yes, you win! Great job!!",
"yes",
"Thanks! That was fun. Do you want to play again?",
"Is it a type of mussel, barnacle, or coral?",
"Is it coral?",
"Is it \"Coraline\"?",
"Is it a reptile?",
"Is it a cetacean, like a dolphin or whale?",
"Was that the hint you intended for your real word?",
"Sounds fun! Let's play. Have you thought of a word?",
"Does it live in the ocean?",
"Should I ask about how your real word relates to \"coral\"?",
"no",
"Does it swim?",
"Is it an insect?",
"Does it sound like \"coral\"?",
"Yes, please go ahead and start!",
"You are a player in a fun game.",
"Should I ask about your real word now?",
"Should I ask about your real word directly?",
"Is it tangible?",
"Should I ask about where it lives?",
"Is it a TV show?",
"Do I know enough about how your real word relates to \"coral\" now?",
"Is it an abstract concept?",
"Do you want to get to your word via a multi-step path?",
"Does it live near the surface of the ocean?",
"Let's play a game. The game is like twenty questions, except that there is no limit on the number of questions asked, and the word you're trying to guess is going to be tougher than an ordinary twenty questions word. You should start by asking if I want to use a \"multi-step\" way to get to the answer, meaning that I first get you to words that hint the actual word. If your questions don't seem to be making any progress, try asking about a broader class of things. I'll think of a word, and you try to find the word using only questions which I will answer only with \"yes\", \"no\", \"maybe\", or a reminder to try asking broader questions. Sound good?",
"Yes, I've thought of another word. Start asking!",
"Is it living?",
"Does it live only in saltwater environments?",
"Is it a movie?",
"maybe",
"Is it a bird?",
"Is it an animal?",
"Is your real word a noun?",
"Is it an eel?",
"I'll ask about a word that will hint your real word, then. Is that word a noun?",
"Do you want me to keep asking about where it lives?"
] |
2024-01-10 | yingkitw/watsonx-demo | translation_assistant~translation_assistant.py | import os
from dotenv import load_dotenv
import streamlit as st
from langchain.chains import LLMChain
from langchain import PromptTemplate
from genai.credentials import Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams
load_dotenv()
api_key = os.getenv("GENAI_KEY", None)
api_endpoint = os.getenv("GENAI_API", None)
creds = Credentials(api_key,api_endpoint)
params = GenerateParams(
decoding_method="sample",
max_new_tokens=200,
min_new_tokens=1,
stream=False,
temperature=0.7,
top_k=50,
top_p=1
).dict()
with st.sidebar:
st.title("Translation Assistant")
st.title("Translation Assistant")
text_input = st.text_area('Enter text')
# Create a selectbox for the user to select the target language
target_language = st.selectbox('Select language', [ 'English', 'Spanish', 'French', 'German','Chinese','Korean','Japanese','Hindi'])
# Create a button that the user can click to initiate the translation process
translate_button = st.button('Translate')
# Create a placeholder where the translated text will be displayed
translated_text = st.empty()
# Handle the translation process when the user clicks the translate button
if translate_button:
translated_text.text('Translating...')
llm = LangChainInterface(model="bigscience/mt0-xxl",credentials=creds, params=params)
prompt = PromptTemplate(template=f"Translate '{text_input}' to {target_language}",
input_variables=[])
chain = LLMChain(llm=llm,prompt=prompt)
response_text = chain.predict()
translated_text.text(response_text)
| [
"Translate 'PLACEHOLDER' to PLACEHOLDER"
] |
2024-01-10 | yingkitw/watsonx-demo | ask_doc~simple_rag_v2.py | import sys
import logging
import os
import tempfile
import pathlib
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
from genai.credentials import Credentials
from genai.schemas import GenerateParams
from genai.model import Model
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="技术支持助理",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown("""
<style>
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
</style>
""", unsafe_allow_html=True)
hide_streamlit_style = """
<style>
div[data-testid="stToolbar"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stDecoration"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stStatusWidget"] {
visibility: hidden;
height: 0%;
position: fixed;
}
#MainMenu {
visibility: hidden;
height: 0%;
}
header {
visibility: hidden;
height: 0%;
}
footer {
visibility: hidden;
height: 0%;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.header("技术支持助理 watsonx.ai 💬")
# chunk_size=1500
# chunk_overlap = 200
load_dotenv()
api_key = st.secrets["GENAI_KEY"]
api_endpoint = st.secrets["GENAI_API"]
api_key = os.getenv("GENAI_KEY", None)
api_endpoint = os.getenv("GENAI_API", None)
# handler = StdOutCallbackHandler()
creds = Credentials(api_key,api_endpoint)
params = GenerateParams(
decoding_method="greedy",
max_new_tokens=1000,
min_new_tokens=1,
# stream=True,
top_k=50,
top_p=1
)
# Sidebar contents
with st.sidebar:
st.title("技术支持助理")
uploaded_files = st.file_uploader("上传一个PDF文档", accept_multiple_files=True)
@st.cache_data
def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20):
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
# Write content to the temporary file
temp_file.write(bytes_data)
filepath = temp_file.name
with st.spinner('Waiting for the file to upload'):
loader = PyPDFLoader(filepath)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(data)
return docs
def read_push_embeddings(docs):
embeddings = HuggingFaceEmbeddings(model_name="paraphrase-multilingual-MiniLM-L12-v2")
# embeddings = HuggingFaceEmbeddings()
temp_dir = tempfile.TemporaryDirectory()
db = Chroma.from_documents(docs, embeddings)
return db
def querypdf(informations, question):
# prompt = f"""
# answer the question in 5 sentences base on the informations:
# informations:
# {informations}
# question:
# {question}
# answer in point form:"""
prompt = f"""[INST]作为一个工程师,请根据提供的白皮书回答。
<<SYS>>
白皮书:
{informations}
<<SYS>>
问题:
{question}
[/INST]
回答:"""
prompts = [prompt]
answer = ""
for response in model.generate_async(prompts,ordered=True):
answer += response.generated_text
return answer
docs = read_pdf(uploaded_files)
if docs is not None:
db = read_push_embeddings(docs)
model = Model(model="meta-llama/llama-2-70b-chat",credentials=creds, params=params)
with st.chat_message("system"):
st.write("请输入你的查询")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("your query"):
with st.chat_message("user"):
st.markdown(query)
st.session_state.messages.append({"role": "user", "content": query})
with st.spinner(text="In progress...", cache=False):
docs = db.similarity_search(query)
answer = querypdf(docs, query)
st.session_state.messages.append({"role": "agent", "content": answer})
with st.chat_message("agent"):
st.markdown(answer) | [
"[INST]作为一个工程师,请根据提供的白皮书回答。\n <<SYS>>\n 白皮书:\n PLACEHOLDER\n <<SYS>>\n 问题:\n PLACEHOLDER\n [/INST]\n 回答:",
"['[INST]作为一个工程师,请根据提供的白皮书回答。\\n <<SYS>>\\n 白皮书:\\n PLACEHOLDER\\n <<SYS>>\\n 问题:\\n PLACEHOLDER\\n [/INST]\\n 回答:']"
] |
2024-01-10 | yingkitw/watsonx-demo | ask_doc~document_rag.py | import streamlit as st
from code_editor import code_editor
from genai.credentials import Credentials
from genai.schemas import GenerateParams
from genai.model import Model
import tempfile
import pathlib
import re
from unstructured.partition.auto import partition
import nltk
import ssl
import os
from dotenv import load_dotenv
from lxml import html
from pydantic import BaseModel
from typing import Any, Optional
from unstructured.partition.pdf import partition_pdf
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.storage import InMemoryStore
from langchain.vectorstores import Chroma
import uuid
load_dotenv()
api_key = st.secrets["GENAI_KEY"]
api_endpoint = st.secrets["GENAI_API"]
api_key = os.getenv("GENAI_KEY", None)
api_endpoint = os.getenv("GENAI_API", None)
creds = Credentials(api_key,api_endpoint)
params = GenerateParams(
decoding_method="greedy",
max_new_tokens=1000,
min_new_tokens=1,
# stream=True,
top_k=50,
top_p=1
)
model = Model(model="meta-llama/llama-2-70b-chat",credentials=creds, params=params)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(collection_name="summaries", embedding_function=HuggingFaceEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
id_key = "doc_id"
# The retriever (empty to start)
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def fillvectordb(table_elements,text_elements):
table_summaries, text_summaries = buildsummary(table_elements,text_elements)
# Add texts
texts = [i.text for i in text_elements]
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
# Add tables
tables = [i.text for i in table_elements]
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
def buildsummary(table_elements,text_elements):
summary_prompt_text= \
"""You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: \
{content} \
summary:"""
#async
tables = [i.text for i in table_elements]
total = len(tables)
table_summaries = []
table_prompts = [summary_prompt_text.format(content=table) for table in tables]
i = 0
for result in model.generate_async(table_prompts):
i += 1
print("[Progress {:.2f}]".format(i/total*100.0))
print("\t {}".format(result.generated_text))
table_summaries += [result.generated_text]
texts = [i.text for i in text_elements]
total = len(texts)
text_summaries = []
text_prompts = [summary_prompt_text.format(content=text) for text in texts]
i = 0
for result in model.generate_async(text_prompts):
i += 1
print("[Progress {:.2f}]".format(i/total*100.0))
print("\t {}".format(result.generated_text))
text_summaries += [result.generated_text]
return table_summaries, text_summaries
def buildquestion(table_elements,text_elements):
question_prompt_text = """Generate a list of 3 hypothetical questions that the below document could be used to answer: \
{content} \
hypothetical questions:"""
#async
tables = [i.text for i in table_elements]
total = len(tables)
table_questions = []
table_prompts = [question_prompt_text.format(content=table) for table in tables]
i = 0
for result in model.generate_async(table_prompts):
i += 1
print("[Progress {:.2f}]".format(i/total*100.0))
print("\t {}".format(result.generated_text))
table_questions += [result.generated_text]
texts = [i.text for i in text_elements]
total = len(texts)
text_questions = []
text_prompts = [question_prompt_text.format(content=text) for text in texts]
i = 0
for result in model.generate_async(text_prompts):
i += 1
print("[Progress {:.2f}]".format(i/total*100.0))
print("\t {}".format(result.generated_text))
text_questions += [result.generated_text]
return table_questions, text_questions
def ingestpdf(pdffile):
st.write('start ingest')
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# Get elements
st.write('before partition')
raw_pdf_elements = partition_pdf(filename=pdffile,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path='.')
st.write('done partition')
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
# Unique_categories will have unique elements
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
# Categorize by type
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
# Tables
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
# Text
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
fillvectordb(table_elements,text_elements)
def queryvectordb(retriever, question):
return retriever.get_relevant_documents(
question
)
def buildpromptforquery(question,informations):
return f"""
answer the question in 5 sentences base on the informations:
informations:
{{informations}}
question:
{question}
answer in point form:"""
def querypdf(question):
question = "how to handle if one engine of flight not work?"
informations = queryvectordb(retriever,question)
prompt = buildpromptforquery(informations,question)
prompts = [buildpromptforquery(informations,question)]
answer = ""
for response in model.generate_async(prompts,ordered=True):
answer += response.generated_text
return answer
def uploadpdf(uploaded_file):
temp_dir = tempfile.TemporaryDirectory()
if uploaded_file is not None:
st.write(f"filename:{uploaded_file.name}")
fullpath = os.path.join(pathlib.Path(temp_dir.name),uploaded_file.name)
st.write(f"fullpath:{fullpath}")
with open(os.path.join(pathlib.Path(temp_dir.name),uploaded_file.name),"wb") as f:
f.write(uploaded_file.getbuffer())
if fullpath.lower().endswith('.pdf'):
ingestpdf(fullpath)
# st.set_page_config(layout="wide")
st.header("Document RAG powered by watsonx")
with st.sidebar:
st.title("Document RAG")
uploadpdf(st.file_uploader(label="upload a document",type=['pdf']))
with st.chat_message("system"):
st.write("please input your query")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("your query"):
with st.chat_message("user"):
st.markdown(query)
st.session_state.messages.append({"role": "user", "content": query})
answer = querypdf(query)
st.session_state.messages.append({"role": "agent", "content": answer})
with st.chat_message("agent"):
st.markdown(answer) | [
"You are an assistant tasked with summarizing tables and text. Give a concise summary of the table or text. Table or text chunk: {content} summary:",
"Generate a list of 3 hypothetical questions that the below document could be used to answer: {content} hypothetical questions:"
] |
2024-01-10 | yingkitw/watsonx-demo | text_summarizer_with_watsonx~text_summarizer_with_watsonx.py | import os
from dotenv import load_dotenv
import streamlit as st
from langchain.llms.openai import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
st.title('LangChain Text Summariser with Watsonx')
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
load_dotenv()
api_key = os.getenv("API_KEY", None)
project_id = os.getenv("PROJECT_ID", None)
creds = {
"url" : "https://us-south.ml.cloud.ibm.com",
"apikey" : api_key
}
params = {
GenParams.DECODING_METHOD:"sample",
GenParams.MAX_NEW_TOKENS:100,
GenParams.MIN_NEW_TOKENS:1,
GenParams.TEMPERATURE:0.5,
GenParams.TOP_K:50,
GenParams.TOP_P:1
}
space_id = None
verify = False
source_text = st.text_area("Source Text",height=200)
if st.button("Summarize"):
if not source_text.strip():
st.write(f"Please complete the missing fields")
else:
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(source_text)
docs = [Document(page_content=t) for t in texts[:3]]
model = Model("google/flan-ul2",creds, params, project_id)
llm = WatsonxLLM(model)
chain = load_summarize_chain(llm,chain_type="map_reduce")
summary = chain.run(docs)
st.write(summary) | [] |
2024-01-10 | yingkitw/watsonx-demo | form_assistant~form_assistant.py | import sys
import logging
import os
import tempfile
import pathlib
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="form assistant",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown(
"""
<style>
section[data-testid="stSidebar"] {
width: 500px !important; # Set the width to your desired value
}
</style>
""",
unsafe_allow_html=True,
)
st.markdown("""
<style>
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
</style>
""", unsafe_allow_html=True)
hide_streamlit_style = """
<style>
div[data-testid="stToolbar"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stDecoration"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stStatusWidget"] {
visibility: hidden;
height: 0%;
position: fixed;
}
#MainMenu {
visibility: hidden;
height: 0%;
}
header {
visibility: hidden;
height: 0%;
}
footer {
visibility: hidden;
height: 0%;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.header("form assistant with watsonx.ai 💬")
load_dotenv()
api_key = st.secrets["API_KEY"]
project_id = st.secrets["PROJECT_ID"]
api_key = os.getenv("API_KEY", None)
project_id = os.getenv("PROJECT_ID", None)
creds = {
"url" : "https://us-south.ml.cloud.ibm.com",
"apikey" : api_key
}
params = {
GenParams.DECODING_METHOD:"greedy",
GenParams.MAX_NEW_TOKENS:1000,
GenParams.MIN_NEW_TOKENS:1,
GenParams.REPETITION_PENALTY:1.1,
GenParams.TOP_K:50,
GenParams.TOP_P:1,
GenParams.STOP_SEQUENCES:['end_of_form','<>','<EOS>'],
}
def buildjson(requirement):
prompt = f"""[INST]
<<SYS>>
build a json structure for customer to input data for following requirement.
- flatten the json structure.
- show only name, value pair.
- all field be empty value.
- dont show notes.
<</SYS>>
requirements: {requirement}
[/INST]flatten json:"""
output = ""
for response in model13b.generate_text([prompt]):
output = response
return output
def buildform(requirement, jsonform):
prompt = f"""[INST]
<<SYS>>
build a html form that for customer to input value for following json base on the requirement.
- for option, add a empty item if no value.
- dont show JSON.
- dont show note.
- end with <EOS>
<</SYS>>
requirement: {requirement}
json: `{jsonform}`
[/INST]html form:"""
output = ""
for response in model13b.generate_text([prompt]):
output = response
return output.replace("<end_of_form>","")
def buildquestions(answerjson, requirement, lastmessage):
prompt = f"""[INST]
<<SYS>>you are customer service agent, generate message to guiding the user to fill the form with following steps:
1. understand the answer and requriement provided in backquoted.
2. check the answer json, gather a list the answer fields with empty value.
3. if no field shortlisted in step 2, then just say thank you and skip step 4.
4. for all field shortlisted in step 2, generate a concise question to request the answer.
5. generate message with style similar to the last message provided in backquoted.
note:
- empty value means empty string or zero or false or none.
- dont repeat.
- dont show note.
- dont show (empty)
- dont show json.
<</SYS>>
requirements: `{requirement}`
answer: {answerjson}
last message: `{lastmessage}`
[/INST]response:"""
# print("prompt:"+prompt)
output = ""
for response in model70b.generate_text([prompt]):
output = response
# print(output)
return output.replace("<EOS>","")
def buildanswer(answer, existinganswer, jsonform):
prompt = f"""[INST]
<<SYS>>
extract the answer in json from the answer to response to the json form.
notes:
- merge the answer with existing answer.
- if not sure, dont guess, leave it empty.
- show the merged answer only.
- end with <EOS>
<</SYS>>
answers: {answer}
existing answers: {existinganswer}
json form: {jsonform}
[/INST]merged answer:"""
output = ""
for response in model13b.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
def fillform(answer, form):
prompt = f"""[INST]
<<SYS>>
fill the html form with the answer value in json.
- for option, add a empty item if no value. select empty item if the field has empty value.
- dont show json.
- dont show note.
- end with <EOS>
<</SYS>>
answer: `{answer}`
html form: {form}
[/INST]html form with answer:"""
print(prompt)
output = ""
for response in model70b.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
# model = Model("mistralai/mistral-7b-instruct-v0-2",creds, params, project_id)
model70b = Model("meta-llama/llama-2-70b-chat",creds, params, project_id)
model13b = Model("meta-llama/llama-2-13b-chat",creds, params, project_id)
if "requirement" not in st.session_state:
st.session_state.requirement = ""
if "jsonform" not in st.session_state:
st.session_state.jsonform = ""
if "form" not in st.session_state:
st.session_state.form = ""
if "filledform" not in st.session_state:
st.session_state.filledform = ""
if "answer" not in st.session_state:
st.session_state.answer = ""
if "lastmessage" not in st.session_state:
st.session_state.lastmessage = ""
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar contents
with st.sidebar:
st.title("form assistant")
btBuildForm = st.button("build form and guide me to fill")
# btBuildQuestions = st.button("guide form filling with questions")
# btFillForm = st.button("fill form")
st.session_state.requirement = st.text_area("requirement",height=10)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if btBuildForm:
with st.spinner(text="building the form...", cache=False):
jsonform = buildjson(st.session_state.requirement)
form = buildform(st.session_state.requirement, st.session_state.jsonform)
st.session_state.jsonform = jsonform
st.session_state.form = form
st.session_state.filledform = form
st.session_state.answer = jsonform
with st.chat_message("system"):
with st.spinner(text="building the questions...", cache=False):
questions = buildquestions("{}",st.session_state.requirement,"").replace('\\n', '\n').replace('\\t', '\t')
st.session_state.lastmessage = questions
st.markdown(questions)
st.session_state.messages.append({"role": "agent", "content": questions})
if answer := st.chat_input("your answer"):
with st.chat_message("user"):
st.markdown(answer)
st.session_state.messages.append({"role": "user", "content": answer})
with st.spinner(text="In progress...", cache=False):
answerjson = buildanswer(answer, st.session_state.answer, st.session_state.jsonform)
st.session_state.answer = answerjson
with st.chat_message("system"):
with st.spinner(text="building the questions...", cache=False):
questions = buildquestions(st.session_state.answer,st.session_state.requirement,st.session_state.lastmessage).replace('\\n', '\n').replace('\\t', '\t')
st.markdown(questions)
st.session_state.messages.append({"role": "agent", "content": questions})
filledform = fillform(st.session_state.answer, st.session_state.form)
st.session_state.filledform = filledform
with st.sidebar:
with st.container(border=True):
st.components.v1.html(st.session_state.filledform,height=300)
st.code(st.session_state.answer,language="json") | [
"[INST]\n <<SYS>>\n extract the answer in json from the answer to response to the json form.\n notes:\n - merge the answer with existing answer.\n - if not sure, dont guess, leave it empty.\n - show the merged answer only.\n - end with <EOS>\n <</SYS>>\n answers: PLACEHOLDER\n existing answers: PLACEHOLDER\n json form: PLACEHOLDER\n [/INST]merged answer:",
"[INST]\n <<SYS>>\n fill the html form with the answer value in json.\n - for option, add a empty item if no value. select empty item if the field has empty value.\n - dont show json.\n - dont show note.\n - end with <EOS>\n <</SYS>>\n answer: `PLACEHOLDER`\n html form: PLACEHOLDER\n [/INST]html form with answer:",
"[INST]\n <<SYS>>\n build a json structure for customer to input data for following requirement.\n - flatten the json structure.\n - show only name, value pair.\n - all field be empty value.\n - dont show notes.\n <</SYS>>\n requirements: PLACEHOLDER\n [/INST]flatten json:",
"[INST]\n <<SYS>>\n build a html form that for customer to input value for following json base on the requirement.\n - for option, add a empty item if no value.\n - dont show JSON.\n - dont show note.\n - end with <EOS>\n <</SYS>>\n requirement: PLACEHOLDER\n json: `PLACEHOLDER`\n [/INST]html form:",
"[INST]\n <<SYS>>you are customer service agent, generate message to guiding the user to fill the form with following steps:\n 1. understand the answer and requriement provided in backquoted.\n 2. check the answer json, gather a list the answer fields with empty value.\n 3. if no field shortlisted in step 2, then just say thank you and skip step 4.\n 4. for all field shortlisted in step 2, generate a concise question to request the answer.\n 5. generate message with style similar to the last message provided in backquoted.\n note: \n - empty value means empty string or zero or false or none.\n - dont repeat.\n - dont show note.\n - dont show (empty)\n - dont show json.\n <</SYS>>\n requirements: `PLACEHOLDER`\n answer: PLACEHOLDER\n last message: `PLACEHOLDER`\n [/INST]response:"
] |
2024-01-10 | yingkitw/watsonx-demo | form_assistant~form_assistant_tw.py | import sys
import logging
import os
import tempfile
import pathlib
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="form assistant",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown(
"""
<style>
section[data-testid="stSidebar"] {
width: 500px !important; # Set the width to your desired value
}
</style>
""",
unsafe_allow_html=True,
)
st.markdown("""
<style>
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
</style>
""", unsafe_allow_html=True)
hide_streamlit_style = """
<style>
div[data-testid="stToolbar"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stDecoration"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stStatusWidget"] {
visibility: hidden;
height: 0%;
position: fixed;
}
#MainMenu {
visibility: hidden;
height: 0%;
}
header {
visibility: hidden;
height: 0%;
}
footer {
visibility: hidden;
height: 0%;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.header("報稅助手 with watsonx.ai 💬")
load_dotenv()
api_key = st.secrets["API_KEY"]
project_id = st.secrets["PROJECT_ID"]
api_key = os.getenv("API_KEY", None)
project_id = os.getenv("PROJECT_ID", None)
creds = {
"url" : "https://us-south.ml.cloud.ibm.com",
"apikey" : api_key
}
params = {
GenParams.DECODING_METHOD:"greedy",
GenParams.MAX_NEW_TOKENS:3000,
GenParams.MIN_NEW_TOKENS:1,
GenParams.TOP_K:50,
GenParams.TOP_P:1,
GenParams.STOP_SEQUENCES:['<EOS>'],
}
def buildjson(requirement):
prompt = f"""[INST]
建立一個json結構,用來存放需求提到所需要的訊息。
最後加上 <EOS>
<<SYS>>需求: {requirement}
<<SYS>>
[/INST]json格式:"""
output = ""
for response in model.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
def buildform(requirement, jsonform):
prompt = f"""[INST]
建立一個html表格,給客戶輸入,要採集json格式裡的內容,要考慮需求。
不要顯示JSON.
最後加上<EOS>
<<SYS>>
需求: {requirement}
json格式: `{jsonform}`
<<SYS>>
[/INST]html表格:"""
output = ""
for response in model.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
def buildquestions(requirement,answerjson):
prompt = f"""[INST]你是一個報稅專員,請基於需求來引導客戶填寫報稅表格。請跟隨以下步驟:
1. 列出還沒有答案的欄位
2. 對每個沒有答案的欄位,提供一個問題給客戶,引導他填寫,記得考慮提供的需求背景。
3. 最後記得說謝謝。
note:
- 問題要有禮貌,精簡,你可以舉一些小例子說明。
- 不要顯示解釋。
- 如果已經有答案,就不要提問了。
- 最後加上 <EOS>
<<SYS>>需求: {requirement}
json答案: `{answerjson}`
<<SYS>>
[/INST]引導問題列表:"""
output = ""
for response in model.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
def buildanswer(answer, existinganswer, jsonform):
prompt = f"""[INST]
從回覆中提取答案並保存為json。
將新的答案合併到現有的答案.
只展示合併後的答案.
最後加上 <EOS>
<<SYS>>
回覆: {answer}
已有答案: `{existinganswer}`
json格式: {jsonform}
<<SYS>>
[/INST]合併的答案:"""
output = ""
for response in model.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
def fillform(answer, form):
prompt = f"""[INST]
基於提供的答案json填寫html表格.
不要顯示json
最後加上 <EOS>
<<SYS>>
答案: `{answer}`
html表格: {form}
<<SYS>>
[/INST]含答案的html表格:"""
output = ""
for response in model.generate_text([prompt]):
output = response
return output.replace("<EOS>","")
model = Model("meta-llama/llama-2-70b-chat",creds, params, project_id)
if "requirement" not in st.session_state:
st.session_state.requirement = ""
if "jsonform" not in st.session_state:
st.session_state.jsonform = ""
if "form" not in st.session_state:
st.session_state.form = ""
if "filledform" not in st.session_state:
st.session_state.filledform = ""
if "answer" not in st.session_state:
st.session_state.answer = ""
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar contents
with st.sidebar:
st.title("報稅助手")
btBuildForm = st.button("建立表格")
btBuildQuestions = st.button("對話引導報稅")
# btFillForm = st.button("fill form")
st.session_state.requirement = st.text_area("需求",height=10)
if btBuildForm:
with st.spinner(text="正在建立表格...", cache=False):
jsonform = buildjson(st.session_state.requirement)
form = buildform(st.session_state.requirement, st.session_state.jsonform)
st.session_state.jsonform = jsonform
st.session_state.form = form
st.session_state.filledform = form
# if btFillForm:
# with st.spinner(text="building the form...", cache=False):
# st.session_state.filledform = fillform(st.session_state.answer, st.session_state.form)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if btBuildQuestions:
with st.chat_message("system"):
with st.spinner(text="正在生成引導問題...", cache=False):
questions = buildquestions(st.session_state.answer,st.session_state.requirement)
st.markdown(questions)
st.session_state.messages.append({"role": "agent", "content": questions})
if answer := st.chat_input("你的回答"):
with st.chat_message("user"):
st.markdown(answer)
st.session_state.messages.append({"role": "user", "content": answer})
with st.spinner(text="正在提取答案...", cache=False):
answerjson = buildanswer(answer, st.session_state.answer, st.session_state.jsonform)
st.session_state.answer = answerjson
filledform = fillform(st.session_state.answer, st.session_state.form)
st.session_state.filledform = filledform
with st.chat_message("system"):
with st.spinner(text="正在生成問題...", cache=False):
questions = buildquestions(st.session_state.answer,st.session_state.requirement)
st.markdown(questions)
st.session_state.messages.append({"role": "agent", "content": questions})
with st.sidebar:
with st.container(border=True):
st.components.v1.html(st.session_state.filledform,height=300)
st.code(st.session_state.answer,language="json") | [
"[INST]你是一個報稅專員,請基於需求來引導客戶填寫報稅表格。請跟隨以下步驟:\n 1. 列出還沒有答案的欄位\n 2. 對每個沒有答案的欄位,提供一個問題給客戶,引導他填寫,記得考慮提供的需求背景。\n 3. 最後記得說謝謝。\n note: \n - 問題要有禮貌,精簡,你可以舉一些小例子說明。\n - 不要顯示解釋。\n - 如果已經有答案,就不要提問了。\n - 最後加上 <EOS>\n <<SYS>>需求: PLACEHOLDER\n json答案: `PLACEHOLDER`\n <<SYS>>\n [/INST]引導問題列表:",
"[INST]\n 建立一個html表格,給客戶輸入,要採集json格式裡的內容,要考慮需求。\n 不要顯示JSON.\n 最後加上<EOS>\n <<SYS>>\n 需求: PLACEHOLDER\n json格式: `PLACEHOLDER`\n <<SYS>>\n [/INST]html表格:",
"[INST]\n 建立一個json結構,用來存放需求提到所需要的訊息。\n 最後加上 <EOS>\n <<SYS>>需求: PLACEHOLDER\n <<SYS>>\n [/INST]json格式:",
"[INST]\n 從回覆中提取答案並保存為json。\n 將新的答案合併到現有的答案.\n 只展示合併後的答案.\n 最後加上 <EOS>\n <<SYS>>\n 回覆: PLACEHOLDER\n 已有答案: `PLACEHOLDER`\n json格式: PLACEHOLDER\n <<SYS>>\n [/INST]合併的答案:",
"[INST]\n 基於提供的答案json填寫html表格.\n 不要顯示json\n 最後加上 <EOS>\n <<SYS>>\n 答案: `PLACEHOLDER`\n html表格: PLACEHOLDER\n <<SYS>>\n [/INST]含答案的html表格:"
] |
2024-01-10 | yingkitw/watsonx-demo | ask_doc~ask_doc.py | import sys
import logging
import os
import tempfile
import pathlib
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
# from genai.credentials import Credentials
# from genai.schemas import GenerateParams
# from genai.model import Model
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="技术支持",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown("""
<style>
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
</style>
""", unsafe_allow_html=True)
hide_streamlit_style = """
<style>
div[data-testid="stToolbar"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stDecoration"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stStatusWidget"] {
visibility: hidden;
height: 0%;
position: fixed;
}
#MainMenu {
visibility: hidden;
height: 0%;
}
header {
visibility: hidden;
height: 0%;
}
footer {
visibility: hidden;
height: 0%;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.header("技术支持")
# chunk_size=1500
# chunk_overlap = 200
load_dotenv()
api_key = os.getenv("API_KEY", None)
project_id = os.getenv("PROJECT_ID", None)
# handler = StdOutCallbackHandler()
creds = {
"url" : "https://us-south.ml.cloud.ibm.com",
"apikey" : api_key
}
params = {
GenParams.DECODING_METHOD:"greedy",
GenParams.MAX_NEW_TOKENS:1000,
GenParams.MIN_NEW_TOKENS:1,
GenParams.TEMPERATURE:0.5,
GenParams.TOP_K:50,
GenParams.TOP_P:1
}
# Sidebar contents
with st.sidebar:
st.title("技术支持")
uploaded_files = st.file_uploader("上传一个PDF文档", accept_multiple_files=True)
@st.cache_data
def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20):
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
# Write content to the temporary file
temp_file.write(bytes_data)
filepath = temp_file.name
with st.spinner('请上传PDF文档'):
loader = PyPDFLoader(filepath)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(data)
return docs
def read_push_embeddings(docs):
embeddings = HuggingFaceEmbeddings(model_name="paraphrase-multilingual-MiniLM-L12-v2")
# embeddings = HuggingFaceEmbeddings()
temp_dir = tempfile.TemporaryDirectory()
db = Chroma.from_documents(docs, embeddings)
return db
def querypdf(informations, history, question):
# prompt = f"""
# answer the question in 5 sentences base on the informations:
# informations:
# {informations}
# question:
# {question}
# answer in point form:"""
prompt = f"""[INST]作为一个技术支持工程师,请根据提供的白皮书用中文回答。
-只根据白皮书内容回答,说明信息来源。
-如果不知道,不要猜,就说不知道,并请客户查看官网信息。
<<SYS>>
白皮书:
{informations}
前面提问:
{history}
<<SYS>>
问题:
{question}
[/INST]
回答:"""
prompts = [prompt]
answer = ""
for response in model.generate_text(prompts):
answer += response.replace("\\n\\n","\n")
return answer
docs = read_pdf(uploaded_files)
if docs is not None:
db = read_push_embeddings(docs)
model = Model("meta-llama/llama-2-70b-chat",creds, params, project_id)
# model = Model(model="meta-llama/llama-2-70b-chat",credentials=creds, params=params)
history = []
with st.chat_message("system"):
st.write("请输入你的查询")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("your query"):
with st.chat_message("user"):
st.markdown(query)
history += [query]
st.session_state.messages.append({"role": "user", "content": query})
with st.spinner(text="正在查询...", cache=False):
docs = db.similarity_search(query)
answer = querypdf(docs, history, query)
st.session_state.messages.append({"role": "agent", "content": answer})
with st.chat_message("agent"):
st.markdown(answer) | [
"[INST]作为一个技术支持工程师,请根据提供的白皮书用中文回答。\n -只根据白皮书内容回答,说明信息来源。\n -如果不知道,不要猜,就说不知道,并请客户查看官网信息。\n <<SYS>>\n 白皮书:\n PLACEHOLDER\n 前面提问:\n PLACEHOLDER\n <<SYS>>\n 问题:\n PLACEHOLDER\n [/INST]\n 回答:",
"['[INST]作为一个技术支持工程师,请根据提供的白皮书用中文回答。\\n -只根据白皮书内容回答,说明信息来源。\\n -如果不知道,不要猜,就说不知道,并请客户查看官网信息。\\n <<SYS>>\\n 白皮书:\\n PLACEHOLDER\\n 前面提问:\\n PLACEHOLDER\\n <<SYS>>\\n 问题:\\n PLACEHOLDER\\n [/INST]\\n 回答:']"
] |
2024-01-10 | yingkitw/watsonx-demo | ask_doc~simple_rag.py | import sys
import logging
import os
import tempfile
import pathlib
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
from genai.extensions.langchain import LangChainInterface
from genai.credentials import Credentials
from genai.schemas import GenerateParams
from genai.model import Model
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.question_answering import load_qa_chain
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="Retrieval Augmented Generation",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.header("Retrieval Augmented Generation with watsonx.ai 💬")
# chunk_size=1500
# chunk_overlap = 200
load_dotenv()
api_key = st.secrets["GENAI_KEY"]
api_endpoint = st.secrets["GENAI_API"]
api_key = os.getenv("GENAI_KEY", None)
api_endpoint = os.getenv("GENAI_API", None)
handler = StdOutCallbackHandler()
creds = Credentials(api_key,api_endpoint)
params = GenerateParams(
decoding_method="greedy",
max_new_tokens=1000,
min_new_tokens=1,
# stream=True,
top_k=50,
top_p=1
)
# Sidebar contents
with st.sidebar:
st.title("RAG App")
uploaded_files = st.file_uploader("Choose a PDF file", accept_multiple_files=True)
@st.cache_data
def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20):
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
# Write content to the temporary file
temp_file.write(bytes_data)
filepath = temp_file.name
with st.spinner('Waiting for the file to upload'):
loader = PyPDFLoader(filepath)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(data)
return docs
def read_push_embeddings(docs):
embeddings = HuggingFaceEmbeddings(model_name="paraphrase-multilingual-MiniLM-L12-v2")
# embeddings = HuggingFaceEmbeddings()
temp_dir = tempfile.TemporaryDirectory()
db = Chroma.from_documents(docs, embeddings)
return db
docs = read_pdf(uploaded_files)
if docs is not None:
db = read_push_embeddings(docs)
model = LangChainInterface(model="meta-llama/llama-2-70b-chat",credentials=creds, params=params)
chain = load_qa_chain(model, chain_type="stuff")
with st.chat_message("system"):
st.write("please ask the document")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("your query"):
with st.chat_message("user"):
st.markdown(query)
st.session_state.messages.append({"role": "user", "content": query})
with st.spinner(text="In progress...", cache=False):
docs = db.similarity_search(query)
answer = chain.run(input_documents=docs, question=query)
st.session_state.messages.append({"role": "agent", "content": answer})
with st.chat_message("agent"):
st.markdown(answer) | [] |
2024-01-10 | yingkitw/watsonx-demo | quiz_builder~quiz_builder.py | import sys
import logging
import os
import tempfile
import pathlib
import json
import streamlit as st
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from sentence_transformers import SentenceTransformer
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.extensions.langchain import WatsonxLLM
from typing import Literal, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import numpy as np
# Most GENAI logs are at Debug level.
# logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="quiz builder",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown("""
<style>
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
</style>
""", unsafe_allow_html=True)
hide_streamlit_style = """
<style>
div[data-testid="stToolbar"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stDecoration"] {
visibility: hidden;
height: 0%;
position: fixed;
}
div[data-testid="stStatusWidget"] {
visibility: hidden;
height: 0%;
position: fixed;
}
#MainMenu {
visibility: hidden;
height: 0%;
}
header {
visibility: hidden;
height: 0%;
}
footer {
visibility: hidden;
height: 0%;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.header("quiz builder")
# chunk_size=1500
# chunk_overlap = 200
load_dotenv()
api_key = st.secrets["API_KEY"]
project_id = st.secrets["PROJECT_ID"]
api_key = os.getenv("API_KEY", None)
project_id = os.getenv("PROJECT_ID", None)
creds = {
"url" : "https://us-south.ml.cloud.ibm.com",
"apikey" : api_key
}
params = {
GenParams.DECODING_METHOD:"sample",
GenParams.MAX_NEW_TOKENS:1000,
GenParams.MIN_NEW_TOKENS:1,
GenParams.TEMPERATURE:0.7,
GenParams.TOP_K:50,
GenParams.TOP_P:1,
}
# Sidebar contents
with st.sidebar:
st.title("quiz builder")
uploaded_files = st.file_uploader("upload PDF documents", accept_multiple_files=True)
@st.cache_data
def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20):
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
# Write content to the temporary file
temp_file.write(bytes_data)
filepath = temp_file.name
with st.spinner('uploading PDF documents'):
loader = PyPDFLoader(filepath)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(data)
return docs
def read_push_embeddings(docs):
embeddings = HuggingFaceEmbeddings(model_name="paraphrase-multilingual-MiniLM-L12-v2")
# embeddings = HuggingFaceEmbeddings()
temp_dir = tempfile.TemporaryDirectory()
db = Chroma.from_documents(docs, embeddings)
return db
def buildquiz(informations, topic):
prompt = f"""[INST]base on the topic and informations provided,
generate a scenario base question along with multiple choice quiz
assume a role in the question, and assign a specific task to the role, and ask which answer best address the task.
the answer should describe a operation very detail.
the answer options should be specific, descriptive, and with more detail about the technique.
mark the answer and provide explaination.
<<SYS>>
notes:
- one paragraph per one answer option.
- please follow the layout provided in backquoted.
- ensure the answer options be different, but similar enough that the user hard to determine.
- ensure only one answer option be correct.
- explain the correct answer as well as the incorrect answer options.
- output in markdown.
topic:{topic}
informations:
{informations}
layout: `question?
a) answer option.\n
b) answer option.\n
c) answer option.\n
d) answer option.\n
correct answer (option), explaination.`
<</SYS>>
[/INST]
markdown quiz:"""
prompts = [prompt]
answer = ""
for response in model.generate_text(prompts):
answer += response
return answer
docs = read_pdf(uploaded_files)
if docs is not None:
db = read_push_embeddings(docs)
model = Model("meta-llama/llama-2-70b-chat",creds, params, project_id)
history = []
with st.chat_message("system"):
st.write("input your question")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if topic := st.chat_input("your topic"):
with st.chat_message("user"):
st.markdown(topic)
history += [topic]
st.session_state.messages.append({"role": "user", "content": topic})
with st.spinner(text="building...", cache=False):
docs = db.similarity_search(topic)
answer = buildquiz(docs,topic)
# print(answer)
st.session_state.messages.append({"role": "agent", "content": answer})
with st.chat_message("agent"):
st.markdown(answer) | [
"[INST]base on the topic and informations provided, \n generate a scenario base question along with multiple choice quiz\n assume a role in the question, and assign a specific task to the role, and ask which answer best address the task.\n the answer should describe a operation very detail.\n the answer options should be specific, descriptive, and with more detail about the technique.\n mark the answer and provide explaination.\n <<SYS>>\n notes:\n - one paragraph per one answer option.\n - please follow the layout provided in backquoted.\n - ensure the answer options be different, but similar enough that the user hard to determine.\n - ensure only one answer option be correct.\n - explain the correct answer as well as the incorrect answer options.\n - output in markdown.\n topic:PLACEHOLDER\n informations:\n PLACEHOLDER\n layout: `question?\n\n a) answer option.\n\n b) answer option.\n\n c) answer option.\n\n d) answer option.\n\n\n correct answer (option), explaination.`\n <</SYS>>\n [/INST]\n markdown quiz:",
"['[INST]base on the topic and informations provided, \\n generate a scenario base question along with multiple choice quiz\\n assume a role in the question, and assign a specific task to the role, and ask which answer best address the task.\\n the answer should describe a operation very detail.\\n the answer options should be specific, descriptive, and with more detail about the technique.\\n mark the answer and provide explaination.\\n <<SYS>>\\n notes:\\n - one paragraph per one answer option.\\n - please follow the layout provided in backquoted.\\n - ensure the answer options be different, but similar enough that the user hard to determine.\\n - ensure only one answer option be correct.\\n - explain the correct answer as well as the incorrect answer options.\\n - output in markdown.\\n topic:PLACEHOLDER\\n informations:\\n PLACEHOLDER\\n layout: `question?\\n\\n a) answer option.\\n\\n b) answer option.\\n\\n c) answer option.\\n\\n d) answer option.\\n\\n\\n correct answer (option), explaination.`\\n <</SYS>>\\n [/INST]\\n markdown quiz:']"
] |
2024-01-10 | abiel-lozano/Study-Bot | src~studyBot.py | # Study-Bot: Question answering using audio interaction and object detection,
# CLI version for testing, and functions for GUI version
import openai
import whisper
from pydub import AudioSegment
from pydub.playback import play as pydubPlay
import io
from typing import Iterator
import pyaudio
import wave
from pathlib import Path
# History, is_installed, and subprocess are used only by main script, do not remove
from elevenlabs import set_api_key, generate, History, is_installed, subprocess
import cv2
import numpy as np
import time
import threading
import keyboard
import credentials # Ignored by git, contains API keys
import sourceMaterial
global objects
global question
global answer
global topic
global stop
objects = ''
question = ''
answer = ''
stop = False
GPT_MODEL = 'gpt-3.5-turbo-16k'
# Credentials
openai.api_key = credentials.openAIKey
set_api_key(credentials.elevenLabsKey)
# Behavioral guidelines for conversation
instructions = """
Try to use the information below to help the user study by answering
the user's question. The user may or may not be holding a physical representation
of what their question is about. Consider the object list, which includes all
the objects that the user is holding, so that the answer can be refined to be
more specific to the user's question. Do not mention the user or the information
in your answer to make it more sound natural.
If the question is unrelated to the information, ignore all previous instructions
and try to answer the question without mentioning the information or the objects
to make it sound more natural.
Always try to give brief answers to the user's questions.
"""
# Recorder configuration
CHUNK = 1024 # Chunk size
FORMAT = pyaudio.paInt16 # Audio codec format
CHANNELS = 2
RATE = 44100 # Sample rate
OUTPUT_FILE = 'question.wav'
def recordQuestion():
global question
global stop
stop = False
audio = pyaudio.PyAudio() # Initialize PyAudio
# Open audio stream for recording
stream = audio.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK)
frames = []
# Record audio stream in chunks
while not stop:
data = stream.read(CHUNK)
frames.append(data)
# Stop and close audio stream
stream.stop_stream()
stream.close()
audio.terminate()
# Save recording as WAV
wf = wave.open(OUTPUT_FILE, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# STT Conversion
model = whisper.load_model('base')
result = model.transcribe(OUTPUT_FILE, fp16 = False)
question = result['text']
# Delete audio file
Path(OUTPUT_FILE).unlink()
def stopRecording():
global stop
stop = True
def colorID():
obj = 'User is not holding any objects'
# Capture video
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Use 0 for default camera
# Start timer
startTime = time.time()
elapsedTime = 0
# Color ranges
stomachLower = np.array([90, 80, 100 ], np.uint8)
stomachUpper = np.array([120, 255, 255 ], np.uint8)
colonLower = np.array( [10, 255 * 0.55, 255 * 0.35 ], np.uint8)
colonUpper = np.array( [19.5, 255, 255 ], np.uint8)
liverLower = np.array( [38, 225 * 0.22, 255 * 0.38 ], np.uint8)
liverUpper = np.array( [41, 255, 255 ], np.uint8)
brainLower = np.array( [161, 255 * 0.50, 255 * 0.40 ], np.uint8)
brainUpper = np.array( [161, 255, 255 ], np.uint8)
kidneyLower = np.array( [26, 255 * 0.60, 255 * 0.69 ], np.uint8)
kidneyUpper = np.array( [26, 255, 255 ], np.uint8)
heartLower = np.array( [179, 255 * 0.50, 255 * 0.35 ], np.uint8)
heartUpper = np.array( [179, 255 * 0.97, 255 * 0.69 ], np.uint8)
while elapsedTime < 1:
_, imageFrame = cam.read()
# Convert frame from BGR color space to HSV
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
# Create masks for each organ
colonMask = cv2.inRange(hsvFrame, colonLower, colonUpper)
liverMask = cv2.inRange(hsvFrame, liverLower, liverUpper)
stomachMask = cv2.inRange(hsvFrame, stomachLower, stomachUpper)
brainMask = cv2.inRange(hsvFrame, brainLower, brainUpper)
kidneyMask = cv2.inRange(hsvFrame, kidneyLower, kidneyUpper)
heartMask = cv2.inRange(hsvFrame, heartLower, heartUpper)
# Create a 5x5 square-shaped filter called kernel
# Filter is filled with ones and will be used for morphological transformations such as dilation for better detection
kernel = np.ones((5, 5), 'uint8')
# For colon
# Dilate mask: Remove holes in the mask by adding pixels to the boundaires of the objects in the mask
colonMask = cv2.dilate(colonMask, kernel)
# Apply mask to frame by using bitwise AND operation
resColon = cv2.bitwise_and(imageFrame, imageFrame, mask = colonMask)
# For liver
liverMask = cv2.dilate(liverMask, kernel)
resliver = cv2.bitwise_and(imageFrame, imageFrame, mask=liverMask)
# For stomach
stomachMask = cv2.dilate(stomachMask, kernel)
resStomach = cv2.bitwise_and(imageFrame, imageFrame, mask=stomachMask)
# For brain
brainMask = cv2.dilate(brainMask, kernel)
resBrain = cv2.bitwise_and(imageFrame, imageFrame, mask=brainMask)
# For heart
heartMask = cv2.dilate(heartMask, kernel)
resHeart = cv2.bitwise_and(imageFrame, imageFrame, mask=heartMask)
# For kidney use a more aggressive kernel for dilation
kidneyMask = cv2.dilate(kidneyMask, np.ones((12, 12), 'uint8'))
resKidney = cv2.bitwise_and(imageFrame, imageFrame, mask=kidneyMask)
# Create a contour around the zone that matches the color range
contours, hierarchy = cv2.findContours(colonMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# For each countour, check if the area is greater than the threshold
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 700:
# Append the name of the model to the list of objects
if 'colon' not in obj:
if obj == 'User is not holding any objects':
obj = 'colon'
else:
obj = obj + ', colon'
contours, hierarchy = cv2.findContours(liverMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 500:
if 'liver' not in obj:
if obj == 'User is not holding any objects':
obj = 'liver'
else:
obj = obj + ', liver'
contours, hierarchy = cv2.findContours(stomachMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 1400:
if 'stomach' not in obj:
if obj == 'User is not holding any objects':
obj = 'stomach'
else:
obj = obj + ', stomach'
contours, hierarchy = cv2.findContours(brainMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 2500:
if 'brain' not in obj:
if obj == 'User is not holding any objects':
obj = 'brain'
else:
obj = obj + ', brain'
contours, hierarchy = cv2.findContours(heartMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 650:
if 'heart' not in obj:
if obj == 'User is not holding any objects':
obj = 'heart'
else:
obj = obj + ', heart'
contours, hierarchy = cv2.findContours(kidneyMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 50:
if 'kidney' not in obj:
if obj == 'User is not holding any objects':
obj = 'kidney'
else:
obj = obj + ', kidney'
elapsedTime = time.time() - startTime
# This does not break the loop, but removing it breaks the camera feed and causes the program to crash
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release webcam and close all windows
cam.release()
cv2.destroyAllWindows()
return obj
def markerID():
obj = 'User is not holding any objects'
# Choose the predefined dictionary to use
arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
# Define the names of the objects
compoundDict = { 0: 'Citrate', 1: 'Isocitrate', 2: 'Alpha-Ketoglutarate', 3: 'Succinyl CoA', 4: 'Succinate', 5: 'Fumarate', 6: 'Malate', 7: 'Oxaloacetate' }
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Use 0 for default camera
# Start timer
startTime = time.time()
elapsedTime = 0
while elapsedTime < 5:
ret, frame = cap.read()
if not ret:
print('Failed to capture frame.')
break
# Convert the frame to grayscale for marker detection
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect markers
corners, ids, _ = cv2.aruco.detectMarkers(gray, arucoDict)
if ids is not None:
# For each marker found in current frame
for i in range(len(ids)):
try:
# Try to get the name of the compound from the dictionary
compoundName = compoundDict[ids[i][0]]
# Append compound to list while avoiding repeats
if obj == 'User is not holding any objects':
obj = compoundName
elif compoundName not in obj:
obj += ', ' + compoundName
except KeyError:
continue
# Display the frame
cv2.imshow('Study-Bot View', frame)
elapsedTime = time.time() - startTime
# Check for 'Esc' key press
key = cv2.waitKey(10) & 0xFF
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
return obj
def lookForObjects(topic: int):
global objects
objects = ''
if topic == 1:
# Call the function for color identification
objects = colorID()
elif topic == 2:
# Call the function for marker identification
objects = markerID()
def sendMessage(messageList: any):
# Send prompt to GPT
response = openai.ChatCompletion.create(
messages = messageList,
model = GPT_MODEL,
temperature = 0.2
)
# print(response)
_answer = response['choices'][0]['message']['content']
# Add the response to the message list
messageList.append({'role': 'assistant', 'content': _answer})
def streamAnswer(audioStream: Iterator[bytes]) -> bytes:
audioOutput = b''
# For each chunk of audio in stream, add it to the output
for chunk in audioStream:
if chunk is not None:
audioOutput += chunk
# Play audio output using PyDub
audioSegment = AudioSegment.from_file(io.BytesIO(audioOutput), format="mp3")
pydubPlay(audioSegment)
def convertTTS(text: str):
audioOutput = generate(text = text, model = 'eleven_multilingual_v1', stream = True)
streamAnswer(audioOutput)
# print('Audio playback disabled.\n')
# Only run if not imported as a module
if __name__ == '__main__':
# Listen for keyboard input to stop recording
keyboard.add_hotkey('s', stopRecording)
print('Select a topic NUMBER from the list:\n')
print('[1] - Human Body')
print('[2] - Krebs Cycle\n')
topic = int(input('Topic: '))
source = ''
# Load the source material based on the selected topic
if topic == 1:
print('Topic: Human Body\n')
source = sourceMaterial.humanBody
elif topic == 2:
print('Topic: Krebs Cycle\n')
source = sourceMaterial.krebsCycle
# Start question processing threads
objID = threading.Thread(target = lookForObjects, args = (topic,))
audioRec = threading.Thread(target = recordQuestion)
objID.start()
print('Looking for objects...\n')
audioRec.start()
print('Listening for question...\n')
objID.join()
print('Object detection complete.\n')
print('Objects detected: ' + objects + '\n')
audioRec.join()
print('Question recorded.\n')
print('Question: ' + question + '\n')
# Build prompt
query = f"""{instructions}
Objects held by user: {objects}.
Question: {question}
Information:
\"\"\"
{source}
\"\"\"
"""
# Send prompt to GPT
messageHistory = [
{'role': 'system', 'content': 'You answer questions in the same language as the question.'},
{'role': 'user', 'content': query},
]
print('Sending prompt to GPT...\n')
sendMessage(messageHistory)
# Get the answer from the last message in the message history
answer = next((msg for msg in reversed(messageHistory) if msg['role'] == 'assistant'), None)['content']
print(answer + '\n')
if answer != '':
print('Answer: ' + answer + '\n\n')
# Convert answer to audio
print('Converting answer to audio...\n')
convertTTS(answer)
# Conversation loop, handles any follow-up questions
while True:
print('Press space to ask another question, or press q to quit.\n')
while True:
if keyboard.is_pressed(' '):
print('Preparing for next question, please hold...\n')
break
if keyboard.is_pressed('q'):
print('Exiting program...\n')
exit()
# Reset variables
objects = 'User is not holding any objects'
question = ''
# Restart threads
objID = threading.Thread(target = lookForObjects, args = (topic,))
audioRec = threading.Thread(target = recordQuestion)
objID.start()
print('Looking for objects...\n')
audioRec.start()
print('Listening for question...\n')
objID.join()
print('Object detection complete.\n')
print('Objects detected: ' + objects + '\n')
audioRec.join()
print('Question recorded.\n')
print('Question: ' + question + '\n')
# NOTE: Add this to the query if the model's response has any deviations from previous instructions
"""
Remember to consider the object list and the information provided when answering
the user's question. Do not mention the user or the information in your answer
to make it more sound natural.
If the question is unrelated to the information, ignore all previous instructions
and try to answer the question without mentioning the information or the objects
to make it sound more natural.
"""
# Build new prompt and add to chat history
query = f"""Objects held by user: {objects}.
Question: {question}
"""
messageHistory.append({'role': 'user', 'content': query})
answer = ''
# Send prompt to GPT
# print('Prompt: ' + query + '\n') # For debugging only
print('Sending prompt to GPT...\n')
sendMessage(messageHistory)
answer = next((msg for msg in reversed(messageHistory) if msg['role'] == 'assistant'), None)['content']
if answer != '':
print('Answer: ' + answer + '\n\n')
print('Converting answer to audio...\n')
convertTTS(answer) | [
"You answer questions in the same language as the question."
] |
2024-01-10 | abiel-lozano/Study-Bot | Tests~AudioInteraction~lookAndListen.py | # Script for testing multithreading by listening for question and looking for objects at the same time
import threading
import openai
import whisper
import pyaudio
import wave
import ffmpeg
from pathlib import Path
import cv2
import numpy as np
import time
openai.api_key = ''
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
def look():
# Capture video
cam = cv2.VideoCapture(0) # Use 0 for default camera
print('Looking for objects...\n')
objects = 'User is not holding any objects'
startTime = time.time()
elapsedTime = 0
# Stomach color range
stomachLower = np.array([90, 80, 1], np.uint8)
stomachUpper = np.array([120, 255, 255], np.uint8)
# Colon color range
colonLower = np.array([9, 255 * 0.55, 255 * 0.35], np.uint8)
colonUpper = np.array([28, 255, 255], np.uint8)
# Liver color range
liverLower = np.array([38, 225 * 0.22, 255 * 0.38], np.uint8)
liverUpper = np.array([41, 255, 255], np.uint8)
while elapsedTime < 2:
_, imageFrame = cam.read()
# Convert frame from BGR color space to HSV
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
# Create masks for each organ
colonMask = cv2.inRange(hsvFrame, colonLower, colonUpper)
liverMask = cv2.inRange(hsvFrame, liverLower, liverUpper)
stomachMask = cv2.inRange(hsvFrame, stomachLower, stomachUpper)
# Create a 5x5 square-shaped filter called kernel
# Filter is filled with ones and will be used for morphological transformations such as dilation for better detection
kernel = np.ones((5, 5), 'uint8')
# For colon
# Dilate mask: Remove holes in the mask by adding pixels to the boundaires of the objects in the mask
colonMask = cv2.dilate(colonMask, kernel)
# Apply mask to frame by using bitwise AND operation
resColon = cv2.bitwise_and(imageFrame, imageFrame, mask = colonMask)
# For liver
liverMask = cv2.dilate(liverMask, kernel)
resliver = cv2.bitwise_and(imageFrame, imageFrame, mask=liverMask)
# For stomach
stomachMask = cv2.dilate(stomachMask, kernel)
resStomach = cv2.bitwise_and(imageFrame, imageFrame, mask=stomachMask)
# Create a contour around the zone that matches the color range
contours, hierarchy = cv2.findContours(colonMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# For each countour, check if the area is greater than the threshold
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 500:
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y), (x + w, y + h), (0, 120, 255), 2)
cv2.putText(imageFrame, "COLON", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 120, 255))
# Append the name of the model to the list of objects
if 'colon' not in objects:
if objects == 'User is not holding any objects':
objects = 'colon'
else:
objects = objects + ', colon'
contours, hierarchy = cv2.findContours(liverMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 500:
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y), (x + w, y + h), (86, 194, 0), 2)
cv2.putText(imageFrame, "LIVER", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (86, 194, 0))
if 'liver' not in objects:
if objects == 'User is not holding any objects':
objects = 'liver'
else:
objects = objects + ', liver'
contours, hierarchy = cv2.findContours(stomachMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > 1400:
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y), (x + w, y + h), (237, 117, 47), 2)
cv2.putText(imageFrame, "STOMACH", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (237, 117, 47))
if 'stomach' not in objects:
if objects == 'User is not holding any objects':
objects = 'stomach'
else:
objects = objects + ', stomach'
# Display the camera feed
cv2.imshow('Study-Bot View', imageFrame)
elapsedTime = time.time() - startTime
# This does not stop the program from running, but removing it breaks the camera feed and causes the program to crash
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release webcam and close all windows
cam.release()
cv2.destroyAllWindows()
print('Camera closed\n')
print('Objects detected: ' + objects + '\n')
def listen():
audio = pyaudio.PyAudio()
stream = audio.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK)
print('Listening for question...\n')
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
audio.terminate()
print('Recording stopped.\n')
print('Saving and converting audio...\n')
print('------------------------------\n')
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
inputAudio = ffmpeg.input('output.wav')
outputAudio = ffmpeg.output(inputAudio, 'output.mp3')
ffmpeg.run(outputAudio)
print('\n------------------------------\n')
print('Audio saved as: output.mp3')
t1 = threading.Thread(target = listen)
t2 = threading.Thread(target = look)
t1.start()
t2.start()
t1.join()
t2.join()
print('Converting audio to text...\n')
model = whisper.load_model('base')
result = model.transcribe('output.wav', fp16 = False, language = 'English')
print('Question: ' + result['text'] + '\n')
# Delete audio files
Path('output.wav').unlink()
Path('output.mp3').unlink()
print('-------------- DONE --------------\n') | [] |
2024-01-10 | abiel-lozano/Study-Bot | Tests~AudioInteraction~AudioInteraction.py | import pyaudio
import wave
import ffmpeg
import whisper
import openai
from pathlib import Path
from elevenlabs import generate, play, set_api_key
openai.api_key = '' # OpenAI API key
set_api_key('') # Elevenlabs API key
GPT_MODEL = "gpt-3.5-turbo"
def record():
# Recording parameters:
CHUNK = 1024 # Chunk size
FORMAT = pyaudio.paInt16 # Audio codec format
CHANNELS = 2
RATE = 44100 # Sample rate
RECORD_SECONDS = 5 # Recording duration
WAVE_OUTPUT_FILENAME = 'output.wav'
audio = pyaudio.PyAudio()
# Open audio stream for recording
stream = audio.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK)
print('Recording question...')
# Initalize audio buffer
frames = []
# Record audio stream in chunks
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print('Recording stopped.')
# Stop and close audio stream
stream.stop_stream()
stream.close()
audio.terminate()
# Save the recorded audio as a WAV file
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# Convert WAV to MP3
input_audio = ffmpeg.input(WAVE_OUTPUT_FILENAME)
output_audio = ffmpeg.output(input_audio, 'output.mp3')
ffmpeg.run(output_audio)
print('File saved as "output.mp3"')
record()
model = whisper.load_model('base')
result = model.transcribe('output.wav', fp16=False, language='English')
print(result['text'])
# If there is a file with the name "output.mp3" in the directory, delete it.
if Path('output.mp3').is_file():
Path('output.mp3').unlink()
# If there is a file with the name "output.wav" in the directory, delete it.
if Path('output.wav').is_file():
Path('output.wav').unlink()
# Add infromation source
source = """
"""
# Build the prompt
query = f"""Try and use the information below to answer the question. If the
question is unrelated to the information, ignore the information, and try to
answer the question without it.
Information:
\"\"\"
{source}
\"\"\"
Question: {result}"""
response = openai.ChatCompletion.create(
messages = [
{'role': 'system', 'content': 'You answer questions in the same language as the question.'},
{'role': 'user', 'content': query},
],
model = GPT_MODEL, temperature = 0
)
answer = response['choices'][0]['message']['content']
print('Answer: ', answer)
audioOutput = generate(answer)
play(audioOutput) | [
"Try and use the information below to answer the question. If the \nquestion is unrelated to the information, ignore the information, and try to\nanswer the question without it.\nInformation:\n\"\"\"\n\n\n\n\"\"\"\nQuestion: PLACEHOLDER",
"You answer questions in the same language as the question."
] |
2024-01-10 | AKSER256/autofilterV4 | plugins~Mods~engine.py | import openai
async def ai(query):
openai.api_key = "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" #Your openai api key
response = openai.Completion.create(engine="text-davinci-002", prompt=query, max_tokens=100, n=1, stop=None, temperature=0.9, timeout=5)
return response.choices[0].text.strip()
async def ask_ai(client, m, message):
try:
question = message.text.split(" ", 1)[1]
# Generate response using OpenAI API
response = await ai(question)
# Send response back to user
await m.edit(f"{response}")
except Exception as e:
# Handle other errors
error_message = f"An error occurred: {e}"
await m.edit(error_message)
| [] |
2024-01-10 | zinccat/ChatDev | camel~messages~chat_messages.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from dataclasses import dataclass
from typing import Dict, Optional
from camel.messages import BaseMessage
from camel.typing import RoleType
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_message import FunctionCall
@dataclass
class ChatMessage(BaseMessage):
r"""Base class for chat messages used in CAMEL chat system.
Args:
role_name (str): The name of the user or assistant role.
role_type (RoleType): The type of role, either
:obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`.
meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
for the message.
role (str): The role of the message in OpenAI chat system.
content (str): The content of the message. (default: :obj:`""`)
"""
role_name: str
role_type: RoleType
meta_dict: Optional[Dict[str, str]]
role: str
content: str = ""
function_call: Optional[FunctionCall] = None
tool_calls: Optional[ChatCompletionMessageToolCall] = None
def set_user_role_at_backend(self: BaseMessage):
return self.__class__(
role_name=self.role_name,
role_type=self.role_type,
meta_dict=self.meta_dict,
role="user",
content=self.content,
)
@dataclass
class AssistantChatMessage(ChatMessage):
r"""Class for chat messages from the assistant role used in CAMEL chat
system.
Attributes:
role_name (str): The name of the assistant role.
role_type (RoleType): The type of role, always
:obj:`RoleType.ASSISTANT`.
meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
for the message.
role (str): The role of the message in OpenAI chat system.
(default: :obj:`"assistant"`)
content (str): The content of the message. (default: :obj:`""`)
"""
role_name: str
role_type: RoleType = RoleType.ASSISTANT
meta_dict: Optional[Dict[str, str]] = None
role: str = "user"
content: str = ""
@dataclass
class UserChatMessage(ChatMessage):
r"""Class for chat messages from the user role used in CAMEL chat system.
Args:
role_name (str): The name of the user role.
role_type (RoleType): The type of role, always :obj:`RoleType.USER`.
meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
for the message.
role (str): The role of the message in OpenAI chat system.
(default: :obj:`"user"`)
content (str): The content of the message. (default: :obj:`""`)
"""
role_name: str
role_type: RoleType = RoleType.USER
meta_dict: Optional[Dict[str, str]] = None
role: str = "user"
content: str = ""
| [] |
2024-01-10 | zinccat/ChatDev | camel~model_backend.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from abc import ABC, abstractmethod
from typing import Any, Dict
import openai
import tiktoken
from camel.typing import ModelType
from chatdev.statistics import prompt_cost
from chatdev.utils import log_and_print_online
from openai.types.chat import ChatCompletion
class ModelBackend(ABC):
r"""Base class for different model backends.
May be OpenAI API, a local LLM, a stub for unit tests, etc."""
@abstractmethod
def run(self, *args, **kwargs) -> ChatCompletion:
r"""Runs the query to the backend model.
Raises:
RuntimeError: if the return value from OpenAI API
is not a dict that is expected.
Returns:
Dict[str, Any]: All backends must return a dict in OpenAI format.
"""
pass
class OpenAIModel(ModelBackend):
r"""OpenAI API in a unified ModelBackend interface."""
def __init__(self, model_type: ModelType, model_config_dict: Dict) -> None:
super().__init__()
self.model_type = model_type
self.model_config_dict = model_config_dict
def run(self, *args, **kwargs) -> Dict[str, Any]:
string = "\n".join([message["content"] for message in kwargs["messages"]])
encoding = tiktoken.encoding_for_model(self.model_type.value)
num_prompt_tokens = len(encoding.encode(string))
gap_between_send_receive = 15 * len(kwargs["messages"])
num_prompt_tokens += gap_between_send_receive
num_max_token_map = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-16k": 16384,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k-0613": 16384,
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-1106-preview": 4096,
"gpt-4-1106-vision-preview": 4096,
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
self.model_config_dict['max_tokens'] = num_max_completion_tokens
try:
response = openai.chat.completions.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict)
except AttributeError:
response = openai.chat.completions.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict)
cost = prompt_cost(
self.model_type.value,
num_prompt_tokens=response.usage.prompt_tokens,
num_completion_tokens=response.usage.completion_tokens
)
log_and_print_online(
"**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format(
response.usage.prompt_tokens, response.usage.completion_tokens,
response.usage.total_tokens, cost))
if not isinstance(response, ChatCompletion):
raise RuntimeError("Unexpected return from OpenAI API")
return response
class StubModel(ModelBackend):
r"""A dummy model used for unit tests."""
def __init__(self, *args, **kwargs) -> None:
super().__init__()
def run(self, *args, **kwargs) -> Dict[str, Any]:
ARBITRARY_STRING = "Lorem Ipsum"
return dict(
id="stub_model_id",
usage=dict(),
choices=[
dict(finish_reason="stop",
message=dict(content=ARBITRARY_STRING, role="assistant"))
],
)
class ModelFactory:
r"""Factory of backend models.
Raises:
ValueError: in case the provided model type is unknown.
"""
@staticmethod
def create(model_type: ModelType, model_config_dict: Dict) -> ModelBackend:
default_model_type = ModelType.GPT_3_5_TURBO
if model_type in {
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k, ModelType.GPT_4_TURBO,
None
}:
model_class = OpenAIModel
elif model_type == ModelType.STUB:
model_class = StubModel
else:
raise ValueError("Unknown model")
if model_type is None:
model_type = default_model_type
# log_and_print_online("Model Type: {}".format(model_type))
inst = model_class(model_type, model_config_dict)
return inst
| [
"165"
] |
2024-01-10 | zinccat/ChatDev | camel~agents~chat_agent.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from tenacity import retry
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_exponential
from camel.agents import BaseAgent
from camel.configs import ChatGPTConfig
from camel.messages import ChatMessage, MessageType, SystemMessage
from camel.model_backend import ModelBackend, ModelFactory
from camel.typing import ModelType, RoleType
from camel.utils import (
get_model_token_limit,
num_tokens_from_messages,
openai_api_key_required,
)
from openai.types.chat import ChatCompletion
@dataclass(frozen=True)
class ChatAgentResponse:
r"""Response of a ChatAgent.
Attributes:
msgs (List[ChatMessage]): A list of zero, one or several messages.
If the list is empty, there is some error in message generation.
If the list has one message, this is normal mode.
If the list has several messages, this is the critic mode.
terminated (bool): A boolean indicating whether the agent decided
to terminate the chat session.
info (Dict[str, Any]): Extra information about the chat message.
"""
msgs: List[ChatMessage]
terminated: bool
info: Dict[str, Any]
@property
def msg(self):
if self.terminated:
raise RuntimeError("error in ChatAgentResponse, info:{}".format(str(self.info)))
if len(self.msgs) > 1:
raise RuntimeError("Property msg is only available for a single message in msgs")
elif len(self.msgs) == 0:
if len(self.info) > 0:
raise RuntimeError("Empty msgs in ChatAgentResponse, info:{}".format(str(self.info)))
else:
# raise RuntimeError("Known issue that msgs is empty and there is no error info, to be fix")
return None
return self.msgs[0]
class ChatAgent(BaseAgent):
r"""Class for managing conversations of CAMEL Chat Agents.
Args:
system_message (SystemMessage): The system message for the chat agent.
model (ModelType, optional): The LLM model to use for generating
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
model_config (Any, optional): Configuration options for the LLM model.
(default: :obj:`None`)
message_window_size (int, optional): The maximum number of previous
messages to include in the context window. If `None`, no windowing
is performed. (default: :obj:`None`)
"""
def __init__(
self,
system_message: SystemMessage,
model: Optional[ModelType] = None,
model_config: Optional[Any] = None,
message_window_size: Optional[int] = None,
) -> None:
self.system_message: SystemMessage = system_message
self.role_name: str = system_message.role_name
self.role_type: RoleType = system_message.role_type
self.model: ModelType = (model if model is not None else ModelType.GPT_3_5_TURBO)
self.model_config: ChatGPTConfig = model_config or ChatGPTConfig()
self.model_token_limit: int = get_model_token_limit(self.model)
self.message_window_size: Optional[int] = message_window_size
self.model_backend: ModelBackend = ModelFactory.create(self.model, self.model_config.__dict__)
self.terminated: bool = False
self.info: bool = False
self.init_messages()
def reset(self) -> List[MessageType]:
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
stored messages.
Returns:
List[MessageType]: The stored messages.
"""
self.terminated = False
self.init_messages()
return self.stored_messages
def get_info(
self,
id: Optional[str],
usage: Optional[Dict[str, int]],
termination_reasons: List[str],
num_tokens: int,
) -> Dict[str, Any]:
r"""Returns a dictionary containing information about the chat session.
Args:
id (str, optional): The ID of the chat session.
usage (Dict[str, int], optional): Information about the usage of
the LLM model.
termination_reasons (List[str]): The reasons for the termination of
the chat session.
num_tokens (int): The number of tokens used in the chat session.
Returns:
Dict[str, Any]: The chat session information.
"""
return {
"id": id,
"usage": usage,
"termination_reasons": termination_reasons,
"num_tokens": num_tokens,
}
def init_messages(self) -> None:
r"""Initializes the stored messages list with the initial system
message.
"""
self.stored_messages: List[MessageType] = [self.system_message]
def update_messages(self, message: ChatMessage) -> List[MessageType]:
r"""Updates the stored messages list with a new message.
Args:
message (ChatMessage): The new message to add to the stored
messages.
Returns:
List[ChatMessage]: The updated stored messages.
"""
self.stored_messages.append(message)
return self.stored_messages
@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
@openai_api_key_required
def step(
self,
input_message: ChatMessage,
) -> ChatAgentResponse:
r"""Performs a single step in the chat session by generating a response
to the input message.
Args:
input_message (ChatMessage): The input message to the agent.
Returns:
ChatAgentResponse: A struct
containing the output messages, a boolean indicating whether
the chat session has terminated, and information about the chat
session.
"""
messages = self.update_messages(input_message)
if self.message_window_size is not None and len(
messages) > self.message_window_size:
messages = [self.system_message
] + messages[-self.message_window_size:]
openai_messages = [message.to_openai_message() for message in messages]
num_tokens = num_tokens_from_messages(openai_messages, self.model)
# for openai_message in openai_messages:
# # print("{}\t{}".format(openai_message.role, openai_message.content))
# print("{}\t{}\t{}".format(openai_message["role"], hash(openai_message["content"]), openai_message["content"][:60].replace("\n", "")))
# print()
output_messages: Optional[List[ChatMessage]]
info: Dict[str, Any]
if num_tokens < self.model_token_limit:
response = self.model_backend.run(messages=openai_messages)
if not isinstance(response, ChatCompletion):
raise RuntimeError("OpenAI returned unexpected struct")
output_messages = [
ChatMessage(role_name=self.role_name, role_type=self.role_type,
meta_dict=dict(), **dict(choice.message))
for choice in response.choices
]
info = self.get_info(
response.id,
response.usage,
[str(choice.finish_reason) for choice in response.choices],
num_tokens,
)
# TODO strict <INFO> check, only in the beginning of the line
# if "<INFO>" in output_messages[0].content:
if output_messages[0].content.split("\n")[-1].startswith("<INFO>"):
self.info = True
else:
self.terminated = True
output_messages = []
info = self.get_info(
None,
None,
["max_tokens_exceeded_by_camel"],
num_tokens,
)
return ChatAgentResponse(output_messages, self.terminated, info)
def __repr__(self) -> str:
r"""Returns a string representation of the :obj:`ChatAgent`.
Returns:
str: The string representation of the :obj:`ChatAgent`.
"""
return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})"
| [] |
2024-01-10 | zinccat/ChatDev | chatdev~chat_env.py | import os
import re
import shutil
import signal
import subprocess
import time
from typing import Dict
import openai
import requests
from chatdev.codes import Codes
from chatdev.documents import Documents
from chatdev.roster import Roster
from chatdev.utils import log_and_print_online
class ChatEnvConfig:
def __init__(self, clear_structure,
gui_design,
git_management,
incremental_develop):
self.clear_structure = clear_structure
self.gui_design = gui_design
self.git_management = git_management
self.incremental_develop = incremental_develop
def __str__(self):
string = ""
string += "ChatEnvConfig.clear_structure: {}\n".format(self.clear_structure)
string += "ChatEnvConfig.git_management: {}\n".format(self.git_management)
string += "ChatEnvConfig.gui_design: {}\n".format(self.gui_design)
string += "ChatEnvConfig.incremental_develop: {}\n".format(self.incremental_develop)
return string
class ChatEnv:
def __init__(self, chat_env_config: ChatEnvConfig):
self.config = chat_env_config
self.roster: Roster = Roster()
self.codes: Codes = Codes()
self.proposed_images: Dict[str, str] = {}
self.incorporated_images: Dict[str, str] = {}
self.requirements: Documents = Documents()
self.manuals: Documents = Documents()
self.env_dict = {
"directory": "",
"task_prompt": "",
"modality": "",
"ideas": "",
"language": "",
"review_comments": "",
"error_summary": "",
"test_reports": ""
}
@staticmethod
def fix_module_not_found_error(test_reports):
if "ModuleNotFoundError" in test_reports:
for match in re.finditer(r"No module named '(\S+)'", test_reports, re.DOTALL):
module = match.group(1)
subprocess.Popen("pip install {}".format(module), shell=True).wait()
log_and_print_online("**[CMD Execute]**\n\n[CMD] pip install {}".format(module))
def set_directory(self, directory):
assert len(self.env_dict['directory']) == 0
self.env_dict['directory'] = directory
self.codes.directory = directory
self.requirements.directory = directory
self.manuals.directory = directory
if os.path.exists(self.env_dict['directory']) and len(os.listdir(directory)) > 0:
new_directory = "{}.{}".format(directory, time.strftime("%Y%m%d%H%M%S", time.localtime()))
shutil.copytree(directory, new_directory)
print("{} Copied to {}".format(directory, new_directory))
if self.config.clear_structure:
if os.path.exists(self.env_dict['directory']):
shutil.rmtree(self.env_dict['directory'])
os.mkdir(self.env_dict['directory'])
print("{} Created".format(directory))
else:
os.mkdir(self.env_dict['directory'])
def exist_bugs(self) -> tuple[bool, str]:
directory = self.env_dict['directory']
success_info = "The software run successfully without errors."
try:
# check if we are on windows or linux
if os.name == 'nt':
command = "cd {} && dir && python main.py".format(directory)
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
)
else:
command = "cd {}; ls -l; python3 main.py;".format(directory)
process = subprocess.Popen(command,
shell=True,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
time.sleep(3)
return_code = process.returncode
# Check if the software is still running
if process.poll() is None:
if "killpg" in dir(os):
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
else:
os.kill(process.pid, signal.SIGTERM)
if process.poll() is None:
os.kill(process.pid, signal.CTRL_BREAK_EVENT)
if return_code == 0:
return False, success_info
else:
error_output = process.stderr.read().decode('utf-8')
if error_output:
if "Traceback".lower() in error_output.lower():
errs = error_output.replace(directory + "/", "")
return True, errs
else:
return False, success_info
except subprocess.CalledProcessError as e:
return True, f"Error: {e}"
except Exception as ex:
return True, f"An error occurred: {ex}"
return False, success_info
def recruit(self, agent_name: str):
self.roster._recruit(agent_name)
def exist_employee(self, agent_name: str) -> bool:
return self.roster._exist_employee(agent_name)
def print_employees(self):
self.roster._print_employees()
def update_codes(self, generated_content):
self.codes._update_codes(generated_content)
def rewrite_codes(self, phase_info=None) -> None:
self.codes._rewrite_codes(self.config.git_management, phase_info)
def get_codes(self) -> str:
return self.codes._get_codes()
def _load_from_hardware(self, directory) -> None:
self.codes._load_from_hardware(directory)
def _update_requirements(self, generated_content):
self.requirements._update_docs(generated_content)
def rewrite_requirements(self):
self.requirements._rewrite_docs()
def get_requirements(self) -> str:
return self.requirements._get_docs()
def _update_manuals(self, generated_content):
self.manuals._update_docs(generated_content, parse=False, predifined_filename="manual.md")
def rewrite_manuals(self):
self.manuals._rewrite_docs()
def write_meta(self) -> None:
directory = self.env_dict['directory']
if not os.path.exists(directory):
os.mkdir(directory)
print("{} Created.".format(directory))
meta_filename = "meta.txt"
with open(os.path.join(directory, meta_filename), "w", encoding="utf-8") as writer:
writer.write("{}:\n{}\n\n".format("Task", self.env_dict['task_prompt']))
writer.write("{}:\n{}\n\n".format("Config", self.config.__str__()))
writer.write("{}:\n{}\n\n".format("Roster", ", ".join(self.roster.agents)))
writer.write("{}:\n{}\n\n".format("Modality", self.env_dict['modality']))
writer.write("{}:\n{}\n\n".format("Ideas", self.env_dict['ideas']))
writer.write("{}:\n{}\n\n".format("Language", self.env_dict['language']))
writer.write("{}:\n{}\n\n".format("Code_Version", self.codes.version))
writer.write("{}:\n{}\n\n".format("Proposed_images", len(self.proposed_images.keys())))
writer.write("{}:\n{}\n\n".format("Incorporated_images", len(self.incorporated_images.keys())))
print(os.path.join(directory, meta_filename), "Wrote")
def generate_images_from_codes(self):
def download(img_url, file_name):
r = requests.get(img_url)
filepath = os.path.join(self.env_dict['directory'], file_name)
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "wb") as f:
f.write(r.content)
print("{} Downloaded".format(filepath))
regex = r"(\w+.png)"
joined_codes = self.get_codes()
matches = re.finditer(regex, joined_codes, re.DOTALL)
# matched_images = {}
for match in matches:
filename = match.group(1).strip()
if filename in self.proposed_images.keys():
self.incorporated_images[filename] = self.proposed_images[filename]
else:
self.incorporated_images[filename] = filename.replace("_", " ")
for filename in self.incorporated_images.keys():
if not os.path.exists(os.path.join(self.env_dict['directory'], filename)):
desc = self.incorporated_images[filename]
if desc.endswith(".png"):
desc = desc.replace(".png", "")
print("{}: {}".format(filename, desc))
response = openai.images.generate(
prompt=desc,
n=1,
size="256x256"
)
image_url = response.data[0].url
download(image_url, filename)
def get_proposed_images_from_message(self, messages):
def download(img_url, file_name):
r = requests.get(img_url)
filepath = os.path.join(self.env_dict['directory'], file_name)
if os.path.exists(filepath):
os.remove(filepath)
with open(filepath, "wb") as f:
f.write(r.content)
print("{} Downloaded".format(filepath))
regex = r"(\w+.png):(.*?)\n"
matches = re.finditer(regex, messages, re.DOTALL)
images = {}
for match in matches:
filename = match.group(1).strip()
desc = match.group(2).strip()
images[filename] = desc
if len(images.keys()) == 0:
regex = r"(\w+.png)"
matches = re.finditer(regex, messages, re.DOTALL)
images = {}
for match in matches:
filename = match.group(1).strip()
desc = " ".join(filename.replace(".png", "").split("_"))
images[filename] = desc
print("{}: {}".format(filename, images[filename]))
for filename in images.keys():
if not os.path.exists(os.path.join(self.env_dict['directory'], filename)):
desc = images[filename]
if desc.endswith(".png"):
desc = desc.replace(".png", "")
print("{}: {}".format(filename, desc))
response = openai.Image.create(
prompt=desc,
n=1,
size="256x256"
)
image_url = response.data[0].url
download(image_url, filename)
return images
| [] |
2024-01-10 | ELROSTEM/Robo-autoscript | Dashboard~01_%F0%9F%A4%96_Main.py | import streamlit as st
import openai
from streamlit_ace import st_ace
import subprocess
import pyautogui
import voice
def generate_script(input, prompt, robotc_path, script_path):
"""Generates the script for the robot to execute by calling the OpenAI API
It also compiles the script and runs it on the robot
Args:
content (string): The code that is already written with robot description and overall structure. Boilerplate code.
instructions_incode (string): string of instructions for robot in code. This will get appended to the content.
instructions_prompt (string): string of codex edit instructions
"""
response = openai.Edit.create(
model="code-davinci-edit-001",
input = f"{input}\n",
instruction=prompt,
temperature=0.5,
top_p=1
)
if 'choices' in response:
response_choices = response['choices']
if len(response_choices) > 0:
# Display the first choice
st.code(response_choices[0]['text'], language="c")
# save the script to a file
with open('script.c', 'w') as f:
f.write(response_choices[0]['text'])
# Download the first choice
st.download_button('Download Script', response_choices[0]['text'].encode('utf-8'), file_name='script.c', mime='text/plain')
# Compile the script
with st.spinner('Compiling...'):
# Open RoboC and Compile the script
subprocess.Popen(robotc_path)
pyautogui.sleep(1)
pyautogui.hotkey('ctrl', 'o') # Open file
pyautogui.sleep(1)
pyautogui.typewrite(script_path) # Type the path to the script
pyautogui.sleep(2)
pyautogui.press('enter') # Press enter
pyautogui.sleep(3)
pyautogui.press('f5') # Compile
# pyautogui.sleep(11)
# x, y = pyautogui.locateCenterOnScreen('robotc_start.png', confidence=0.9)
# pyautogui.moveTo(x, y)
# pyautogui.click()
# pyautogui.sleep(5)
# pyautogui.hotkey('alt', 'f5') # Close RobotC
st.success('Done!')
else:
st.write("No choices found")
# Environment variables
robotc_path = r'C:\Program Files (x86)\Robomatter Inc\ROBOTC Development Environment 4.X\ROBOTC.exe'
script_path = r'C:\coding\GitHub\Robo-autoscript\Dashboard\script.c'
st.set_page_config(
page_title="Robo Auto Script",
page_icon="🤖",
initial_sidebar_state="expanded",
menu_items={
'About': "Scripting RoboC code with OpenAi's Codex"
}
)
openai.api_key = st.secrets["OPENAI_API_KEY"]
# select the boilerplate code
boilerplate = st.selectbox("Select the boilerplate code", ["2_wheel_drive", "4_wheel_drive"])
# Get the boilerplate code
with open(f'boilerplates/{boilerplate}.txt', 'r') as f:
boilerplate = f.read()
# Display the boilerplate code for prompt engineering
boilerplate = st_ace(value=boilerplate, language="c_cpp")
# Tabs for mode selection
tab1, tab2, tab3 = st.tabs(["Type SI", "Type PS", "Voice SI"])
# Type Sequence of Instructions (SI) mode
with tab1:
st.header("Type Sequence of Instructions (SI)")
# Instruction input
if 'instructions' not in st.session_state:
st.session_state['instructions'] = ['Stop']
new_instruction = st.text_input("Add Instruction")
col1, col2 = st.columns(2)
if col1.button("Add instruction"):
st.session_state['instructions'].insert(-1, new_instruction)
if col2.button("Clear Instructions"):
st.session_state['instructions'] = ['Stop']
st.experimental_rerun()
# Prepare instructions for codex
instructions_incode = ""
instructions_prompt = "Edit and complete the code below to execute the instructions:\n"
for index, instruction in enumerate(st.session_state['instructions']):
st.caption(f"{index + 1}. {instruction}")
instructions_prompt += f" {index + 1}. {instruction}\n"
instructions_incode += f" // {index + 1}. {instruction}\n\n\n"
instructions_input = boilerplate + instructions_incode
# Generate code
if st.button("🤖 Generate Script", key="TSI_script"):
generate_script(instructions_input, instructions_prompt, robotc_path, script_path)
with tab2:
st.header("Type Problem Solving (PS)")
# Problem input
problem_prompt = st.text_area("Problem")
# Generate code
if st.button("🤖 Generate Script", key='TPS_script'):
generate_script(boilerplate, problem_prompt, robotc_path, script_path)
# Voice Sequence of Instructions mode
with tab3:
st.header("Voice Sequence of Instructions (SI)")
recording = st.button("🎤 Start Recording")
if recording:
instructions = voice.voice_to_instructions()
# Prepare instructions for codex
instructions_incode = ""
instructions_prompt = "Edit and complete the code below to execute the instructions:\n"
for index, instruction in enumerate(instructions):
st.caption(f"{index + 1}. {instruction}")
instructions_prompt += f" {index + 1}. {instruction}\n"
instructions_incode += f" // {index + 1}. {instruction}\n\n\n"
instructions_input = boilerplate + instructions_incode
# Generate code
generate_script(instructions_input, instructions_prompt, robotc_path, script_path) | [
"Edit and complete the code below to execute the instructions:\n"
] |
2024-01-10 | carlsverre/chatgpt-telegram-bot | bot~telegram_bot.py | import logging
import os
from telegram import constants
from telegram import Update, InlineQueryResultArticle, InputTextMessageContent, BotCommand
from telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, Application
from pydub import AudioSegment
from openai_helper import OpenAIHelper
from usage_tracker import UsageTracker
class ChatGPT3TelegramBot:
"""
Class representing a Chat-GPT3 Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT-3 bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
self.commands = [
BotCommand(command='help', description='Show help message'),
BotCommand(command='reset', description='Reset the conversation. Optionally pass high-level instructions for the conversation (e.g. /reset You are a helpful assistant)'),
BotCommand(command='image', description='Generate image from prompt (e.g. /image cat)'),
BotCommand(command='stats', description='Get your current usage statistics')
]
self.disallowed_message = "Sorry, you are not allowed to use this bot. You can check out the source code at " \
"https://github.com/n3d1117/chatgpt-telegram-bot"
self.budget_limit_message = "Sorry, you have reached your monthly usage limit."
self.usage = {}
async def help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = [f'/{command.command} - {command.description}' for command in self.commands]
help_text = 'I\'m a ChatGPT bot, talk to me!' + \
'\n\n' + \
'\n'.join(commands) + \
'\n\n' + \
'Send me a voice message or file and I\'ll transcribe it for you!' + \
'\n\n' + \
"Open source at https://github.com/n3d1117/chatgpt-telegram-bot"
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} requested their token usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
transcribe_durations = self.usage[user_id].get_current_transcription_duration()
cost_today, cost_month = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
usage_text = f"Today:\n"+\
f"{tokens_today} chat tokens used.\n"+\
f"{images_today} images generated.\n"+\
f"{transcribe_durations[0]} minutes and {transcribe_durations[1]} seconds transcribed.\n"+\
f"💰 For a total amount of ${cost_today:.2f}\n"+\
f"\n----------------------------\n\n"+\
f"This month:\n"+\
f"{tokens_month} chat tokens used.\n"+\
f"{images_month} images generated.\n"+\
f"{transcribe_durations[2]} minutes and {transcribe_durations[3]} seconds transcribed.\n"+\
f"💰 For a total amount of ${cost_month:.2f}"+\
f"\n----------------------------\n\n"+\
f"Current conversation:\n"+\
f"{chat_messages} chat messages in history.\n"+\
f"{chat_token_length} chat tokens in history.\n"
await update.message.reply_text(usage_text)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name}...')
chat_id = update.effective_chat.id
reset_content = update.message.text.replace('/reset', '').strip()
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await context.bot.send_message(chat_id=chat_id, text='Done!')
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to generate images')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
chat_id = update.effective_chat.id
image_query = update.message.text.replace('/image', '').strip()
if image_query == '':
await context.bot.send_message(chat_id=chat_id, text='Please provide a prompt! (e.g. /image cat)')
return
logging.info(f'New image generation request received from user {update.message.from_user.name}')
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.UPLOAD_PHOTO)
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
await context.bot.send_photo(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
photo=image_url
)
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to generate image: {str(e)}'
)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to transcribe audio messages')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
if self.is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
filename = update.message.effective_attachment.file_unique_id
filename_mp3 = f'{filename}.mp3'
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to download audio file: {str(e)}. Make sure the file is not too large. (max 20MB)'
)
return
# detect and extract audio from the attachment with pydub
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name}')
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=update.effective_chat.id,
reply_to_message_id=update.message.message_id,
text='Unsupported file type'
)
if os.path.exists(filename):
os.remove(filename)
return
filename_mp3 = f'{filename}.mp3'
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
# send decoded audio to openai
try:
# Transcribe the audio file
transcript = await self.openai.transcribe(filename_mp3)
# add transcription seconds to usage tracker
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
if self.config['voice_reply_transcript']:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
if not isinstance(response, tuple):
raise Exception(response)
response, total_tokens = response
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f'_Transcript:_\n"{transcript}"\n\n_Answer:_\n{response}'
chunks = self.split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=f'Failed to transcribe text: {str(e)}'
)
finally:
# Cleanup files
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if not await self.is_allowed(update):
logging.warning(f'User {update.message.from_user.name} is not allowed to use the bot')
await self.send_disallowed_message(update, context)
return
if not await self.is_within_budget(update):
logging.warning(f'User {update.message.from_user.name} reached their usage limit')
await self.send_budget_reached_message(update, context)
return
logging.info(f'New message received from user {update.message.from_user.name}')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = update.message.text
if self.is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.startswith(trigger_keyword):
prompt = prompt[len(trigger_keyword):].strip()
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
await context.bot.send_chat_action(chat_id=chat_id, action=constants.ChatAction.TYPING)
response = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
if not isinstance(response, tuple):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id,
text=response,
parse_mode=constants.ParseMode.MARKDOWN
)
return
response, total_tokens = response
# add chat request to users usage tracker
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
# add guest chat request to guest usage tracker
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = self.split_into_chunks(response)
for index, chunk in enumerate(chunks):
await context.bot.send_message(
chat_id=chat_id,
reply_to_message_id=update.message.message_id if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if query == "":
return
results = [
InlineQueryResultArticle(
id=query,
title="Ask ChatGPT",
input_message_content=InputTextMessageContent(query),
description=query,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea-b02a7a32149a.png'
)
]
await update.inline_query.answer(results)
async def send_disallowed_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the disallowed message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.disallowed_message,
disable_web_page_preview=True
)
async def send_budget_reached_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Sends the budget reached message to the user.
"""
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=self.budget_limit_message
)
async def error_handler(self, update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handles errors in the telegram-python-bot library.
"""
logging.error(f'Exception while handling an update: {context.error}')
def is_group_chat(self, update: Update) -> bool:
"""
Checks if the message was sent from a group chat
"""
return update.effective_chat.type in [
constants.ChatType.GROUP,
constants.ChatType.SUPERGROUP
]
async def is_user_in_group(self, update: Update, user_id: int) -> bool:
"""
Checks if user_id is a member of the group
"""
member = await update.effective_chat.get_member(user_id)
return member.status in [
constants.ChatMemberStatus.OWNER,
constants.ChatMemberStatus.ADMINISTRATOR,
constants.ChatMemberStatus.MEMBER
]
async def is_allowed(self, update: Update) -> bool:
"""
Checks if the user is allowed to use the bot.
"""
if self.config['allowed_user_ids'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
# Check if user is allowed
if str(update.message.from_user.id) in allowed_user_ids:
return True
# Check if it's a group a chat with at least one authorized member
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
logging.info(f'{user} is a member. Allowing group chat message...')
return True
logging.info(f'Group chat messages from user {update.message.from_user.name} are not allowed')
return False
async def is_within_budget(self, update: Update) -> bool:
"""
Checks if the user reached their monthly usage limit.
Initializes UsageTracker for user and guest when needed.
"""
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
if self.config['monthly_user_budgets'] == '*':
return True
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) in allowed_user_ids:
# find budget for allowed user
user_index = allowed_user_ids.index(str(user_id))
user_budgets = self.config['monthly_user_budgets'].split(',')
# check if user is included in budgets list
if len(user_budgets) <= user_index:
logging.warning(f'No budget set for user: {update.message.from_user.name} ({user_id}).')
return False
user_budget = float(user_budgets[user_index])
cost_month = self.usage[user_id].get_current_cost()[1]
# Check if allowed user is within budget
return user_budget > cost_month
# Check if group member is within budget
if self.is_group_chat(update):
for user in allowed_user_ids:
if await self.is_user_in_group(update, user):
if 'guests' not in self.usage:
self.usage['guests'] = UsageTracker('guests', 'all guest users in group chats')
if self.config['monthly_guest_budget'] >= self.usage['guests'].get_current_cost()[1]:
return True
logging.warning('Monthly guest budget for group chats used up.')
return False
logging.info(f'Group chat messages from user {update.message.from_user.name} are not allowed')
return False
def split_into_chunks(self, text: str, chunk_size: int = 4096) -> list[str]:
"""
Splits a string into chunks of a given size.
"""
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP
]))
application.add_error_handler(self.error_handler)
application.run_polling()
| [] |
2024-01-10 | AI-Jie01/datadm | datadm~backend.py | import guidance
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# TODO: fix this to check devices and packages to dynamically adjust available LLMs and models
try:
import accelerate
local_available = True
except ImportError:
local_available = False
class StarChat(guidance.llms.Transformers):
def __init__(self, model_path=None, revision=None, **kwargs):
import torch
tokenizer = AutoTokenizer.from_pretrained(model_path, device_map='auto', revision=revision)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype=torch.bfloat16, revision=revision)
model.eval()
super().__init__(model, tokenizer=tokenizer, device_map='auto', **kwargs)
@staticmethod
def role_start(role):
return f"<|{role}|>"
@staticmethod
def role_end(role):
return '<|end|>'
class BackendLLMManager():
def __init__(self):
self.llms = {}
if local_available:
self.llms['starchat-alpha-cuda'] = {'state': 'unloaded', 'llm': None, 'mode': 'cuda', 'model_path': 'HuggingFaceH4/starchat-alpha', 'revision': '5058bd8557100137ade3c459bfc8100e90f71ec7'}
self.llms['starchat-beta-cuda'] = {'state': 'unloaded', 'llm': None, 'mode': 'cuda', 'model_path': 'HuggingFaceH4/starchat-beta', 'revision': 'b1bcda690655777373f57ea6614eb095ec2c886f'}
self.llms['openai-gpt-3.5'] = {'state': 'unloaded', 'llm': None, 'mode': 'api'}
self.llms['openai-gpt-4'] = {'state': 'unloaded', 'llm': None, 'mode': 'api'}
def load(self, llm_name):
if self.llms[llm_name]['state'] == 'unloaded':
self.llms[llm_name]['state'] = 'loading'
if llm_name in ['starchat-alpha-cuda', 'starchat-beta-cuda']:
self.llms[llm_name]['llm'] = StarChat(**self.llms[llm_name])
elif llm_name == 'openai-gpt-4':
if 'OPENAI_API_KEY' not in os.environ:
self.llms[llm_name]['state'] = 'error'
raise RuntimeError("OPENAI_API_KEY not found in environment")
self.llms[llm_name]['llm'] = guidance.llms.OpenAI("gpt-4")
elif llm_name == 'openai-gpt-3.5':
if 'OPENAI_API_KEY' not in os.environ:
self.llms[llm_name]['state'] = 'error'
raise RuntimeError("OPENAI_API_KEY not found in environment")
self.llms[llm_name]['llm'] = guidance.llms.OpenAI("gpt-3.5-turbo")
else:
self.llms[llm_name]['state'] = 'error'
raise RuntimeError(f"LLM {llm_name} not supported")
self.llms[llm_name]['state'] = 'ready'
return self.model_status(llm_name)
def unload(self, llm_name):
if llm_name in self.llms:
self.llms[llm_name]['state'] = 'unloaded'
self.llms[llm_name]['llm'] = None
def model_status(self, llm_name):
state = self.llms[llm_name]['state']
return [(llm_name, state)]
llm_manager = BackendLLMManager()
| [] |
2024-01-10 | mkfischer/NeMo-Guardrails | nemoguardrails~llm~providers~nemollm.py | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Dict, List, Optional
import aiohttp
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from pydantic.class_validators import root_validator
log = logging.getLogger(__name__)
class NeMoLLM(LLM):
"""Wrapper around NeMo LLM large language models.
If NGC_API_HOST, NGC_API_KEY and NGC_ORGANIZATION_ID environment variables are set,
they will be used for the requests.
"""
model: str = ""
temperature: float = 0.7
tokens_to_generate: int = 256
stop: Optional[List[str]] = ["<extra_id_1>"]
api_host: Optional[str] = os.environ.get(
"NGC_API_HOST", "https://api.llm.ngc.nvidia.com"
)
api_key: Optional[str] = os.environ.get("NGC_API_KEY")
organization_id: Optional[str] = os.environ.get("NGC_ORGANIZATION_ID")
customization_id: Optional[str] = None
@root_validator(pre=True, allow_reuse=True)
def check_env_variables(cls, values):
for field in ["api_host", "api_key", "organization_id"]:
# If it's an explicit environment variable, we use that
if values.get(field, "").startswith("$"):
env_var_name = values[field][1:]
values[field] = os.environ.get(env_var_name)
if not values[field]:
raise Exception(f"The env var ${env_var_name} is not set!")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling NeMoLLM API."""
return {
"temperature": self.temperature,
"tokens_to_generate": self.tokens_to_generate,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "nemollm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs,
) -> str:
raise Exception("Sync mode not supported.")
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs,
) -> str:
"""Call out to NeMoLLM completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
"""
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
# If the API_HOST ends with /v1, we remove that (it was the previous convention).
base_url = self.api_host
if base_url.endswith("/v1"):
base_url = base_url[0:-3]
# Compute the default header values.
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
}
if self.organization_id:
headers["Organization-ID"] = self.organization_id
data = {"prompt": prompt, "stop": stop, **self._default_params}
url = f"{base_url}/v1/models/{self.model}"
if self.customization_id is not None:
url += f"/customizations/{self.customization_id}/completions"
else:
url += "/completions"
async with aiohttp.ClientSession() as session:
async with session.post(
url=url,
headers=headers,
json=data,
) as response:
if response.status != 200:
raise ValueError(
f"NeMo LLM completion call failed with status code {response.status}.\n"
f"Details: {await response.text()}"
)
response_json = await response.json()
# The NeMo LLM API also returns the stop tokens at the end of the response.
# Remove them if they appear as the last token.
for stop_token in stop or []:
if response_json["text"].endswith(stop_token):
response_json["text"] = response_json["text"][
: -len(stop_token)
]
break
return response_json["text"]
| [] |
2024-01-10 | js-bad/KLUNK | BUNK~src~services~gpt35tChat.py | import os
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.utilities import WikipediaAPIWrapper
from ..models.ConversationContext import ConversationContext, ConversationContextCache
from ..models.Interaction import Interaction
from ..models.Message import Message, MessageType
# TODO -- turn this whole file into a class
# TODO -- preload contexts from database
conversationContextCache = ConversationContextCache()
def chat(interaction: Interaction):
llm = ChatOpenAI(temperature=1, verbose=True)
waifu_template = PromptTemplate(
input_variables = ['proompt'],
template='''
At the end of this prompt, there will be a message from a user, which will be wrapped in quotation marks.
Pretend that you are a kawaii anime waifu. Come up with a cute nickname for the user, and respond to their message in-character.
\"{proompt}\"
'''
)
if (interaction.conversationContext is None):
waifu_memory = ConversationBufferMemory(input_key='proompt', memory_key='chat_history')
conversationContext = ConversationContext(memory=waifu_memory)
interaction.conversationContext = conversationContext.key
conversationContextCache.addContext(conversationContext)
else:
conversationContext = conversationContextCache.getContext(interaction.conversationContext)
# TODO -- error handling for invalid context key
waifu_memory = conversationContext.memory
waifu_chain = LLMChain(llm=llm, prompt=waifu_template, verbose=True, output_key='response', memory=waifu_memory)
responseStr = waifu_chain.run({'proompt': interaction.proompt.content})
llmResponse = Message(content=responseStr, type=MessageType.BOT, conversationContext=interaction.proompt.conversationContext)
conversationContextCache.getContext(interaction.conversationContext).addMessage(llmResponse)
interaction.response = llmResponse
# TODO -- Save context to database
return interaction | [
"\n At the end of this prompt, there will be a message from a user, which will be wrapped in quotation marks. \n Pretend that you are a kawaii anime waifu. Come up with a cute nickname for the user, and respond to their message in-character.\n \"{proompt}\"\n "
] |
2024-01-10 | js-bad/KLUNK | BUNK~src~models~ConversationContext.py | from .Message import Message
from langchain.memory import ConversationBufferMemory
import uuid
class ConversationContext:
context: str
memory: ConversationBufferMemory
messages: list[Message]
def __init__(self, memory: ConversationBufferMemory, key: str = "", messages: list[Message] = []):
self.memory = memory
self.key = key if key != "" else str(uuid.uuid4())
self.messages = messages
def addMessage(self, message: Message):
self.messages.append(message)
def getContext(self):
return self.context
class ConversationContextCache:
contexts: dict[str, ConversationContext]
def __init__(self):
self.contexts = {}
def addContext(self, context: ConversationContext):
self.contexts[context.key] = context
def getContext(self, key: str):
return self.contexts[key] | [] |
2024-01-10 | matheusns/openai_ros | openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | matheusns/openai_ros | openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | matheusns/openai_ros | openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | #! /usr/bin/env python
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_all_systems_ready()
# We pause the simulation once everything is ready
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
self._check_publishers_connection()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
2024-01-10 | tonychen0716/dify | api~services~hit_testing_service.py | import json
import logging
import threading
import time
from typing import List
import numpy as np
from flask import current_app
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from sklearn.manifold import TSNE
from core.embedding.cached_embedding import CacheEmbedding
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.account import Account
from models.dataset import Dataset, DocumentSegment, DatasetQuery
from services.retrieval_service import RetrievalService
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
class HitTestingService:
@classmethod
def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_model: dict, limit: int = 10) -> dict:
if dataset.available_document_count == 0 or dataset.available_segment_count == 0:
return {
"query": {
"content": query,
"tsne_position": {'x': 0, 'y': 0},
},
"records": []
}
start = time.perf_counter()
# get retrieval model , if the model is not setting , using default
if not retrieval_model:
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
# get embedding model
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
embeddings = CacheEmbedding(embedding_model)
all_documents = []
threads = []
# retrieval_model source with semantic
if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'top_k': retrieval_model['top_k'],
'score_threshold': retrieval_model['score_threshold'] if retrieval_model['score_threshold_enabled'] else None,
'reranking_model': retrieval_model['reranking_model'] if retrieval_model['reranking_enable'] else None,
'all_documents': all_documents,
'search_method': retrieval_model['search_method'],
'embeddings': embeddings
})
threads.append(embedding_thread)
embedding_thread.start()
# retrieval source with full text
if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'search_method': retrieval_model['search_method'],
'embeddings': embeddings,
'score_threshold': retrieval_model['score_threshold'] if retrieval_model['score_threshold_enabled'] else None,
'top_k': retrieval_model['top_k'],
'reranking_model': retrieval_model['reranking_model'] if retrieval_model['reranking_enable'] else None,
'all_documents': all_documents
})
threads.append(full_text_index_thread)
full_text_index_thread.start()
for thread in threads:
thread.join()
if retrieval_model['search_method'] == 'hybrid_search':
hybrid_rerank = ModelFactory.get_reranking_model(
tenant_id=dataset.tenant_id,
model_provider_name=retrieval_model['reranking_model']['reranking_provider_name'],
model_name=retrieval_model['reranking_model']['reranking_model_name']
)
all_documents = hybrid_rerank.rerank(query, all_documents,
retrieval_model['score_threshold'] if retrieval_model['score_threshold_enabled'] else None,
retrieval_model['top_k'])
end = time.perf_counter()
logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
dataset_query = DatasetQuery(
dataset_id=dataset.id,
content=query,
source='hit_testing',
created_by_role='account',
created_by=account.id
)
db.session.add(dataset_query)
db.session.commit()
return cls.compact_retrieve_response(dataset, embeddings, query, all_documents)
@classmethod
def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, query: str, documents: List[Document]):
text_embeddings = [
embeddings.embed_query(query)
]
text_embeddings.extend(embeddings.embed_documents([document.page_content for document in documents]))
tsne_position_data = cls.get_tsne_positions_from_embeddings(text_embeddings)
query_position = tsne_position_data.pop(0)
i = 0
records = []
for document in documents:
index_node_id = document.metadata['doc_id']
segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.enabled == True,
DocumentSegment.status == 'completed',
DocumentSegment.index_node_id == index_node_id
).first()
if not segment:
i += 1
continue
record = {
"segment": segment,
"score": document.metadata.get('score', None),
"tsne_position": tsne_position_data[i]
}
records.append(record)
i += 1
return {
"query": {
"content": query,
"tsne_position": query_position,
},
"records": records
}
@classmethod
def get_tsne_positions_from_embeddings(cls, embeddings: list):
embedding_length = len(embeddings)
if embedding_length <= 1:
return [{'x': 0, 'y': 0}]
concatenate_data = np.array(embeddings).reshape(embedding_length, -1)
# concatenate_data = np.concatenate(embeddings)
perplexity = embedding_length / 2 + 1
if perplexity >= embedding_length:
perplexity = max(embedding_length - 1, 1)
tsne = TSNE(n_components=2, perplexity=perplexity, early_exaggeration=12.0)
data_tsne = tsne.fit_transform(concatenate_data)
tsne_position_data = []
for i in range(len(data_tsne)):
tsne_position_data.append({'x': float(data_tsne[i][0]), 'y': float(data_tsne[i][1])})
return tsne_position_data
@classmethod
def hit_testing_args_check(cls, args):
query = args['query']
if not query or len(query) > 250:
raise ValueError('Query is required and cannot exceed 250 characters')
| [] |
2024-01-10 | tonychen0716/dify | api~tasks~enable_segment_to_index_task.py | import datetime
import logging
import time
import click
from celery import shared_task
from langchain.schema import Document
from werkzeug.exceptions import NotFound
from core.index.index import IndexBuilder
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
@shared_task(queue='dataset')
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
# save vector index
index = IndexBuilder.get_index(dataset, 'high_quality')
if index:
index.add_texts([document], duplicate_check=True)
# save keyword index
index = IndexBuilder.get_index(dataset, 'economy')
if index:
index.add_texts([document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key)
| [] |
2024-01-10 | tonychen0716/dify | api~core~tool~dataset_multi_retriever_tool.py | import json
import threading
from typing import Type, Optional, List
from flask import current_app, Flask
from langchain.tools import BaseTool
from pydantic import Field, BaseModel
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.conversation_message_task import ConversationMessageTask
from core.embedding.cached_embedding import CacheEmbedding
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment, Document
from services.retrieval_service import RetrievalService
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
class DatasetMultiRetrieverToolInput(BaseModel):
query: str = Field(..., description="dataset multi retriever and rerank")
class DatasetMultiRetrieverTool(BaseTool):
"""Tool for querying multi dataset."""
name: str = "dataset-"
args_schema: Type[BaseModel] = DatasetMultiRetrieverToolInput
description: str = "dataset multi retriever and rerank. "
tenant_id: str
dataset_ids: List[str]
top_k: int = 2
score_threshold: Optional[float] = None
reranking_provider_name: str
reranking_model_name: str
conversation_message_task: ConversationMessageTask
return_resource: bool
retriever_from: str
@classmethod
def from_dataset(cls, dataset_ids: List[str], tenant_id: str, **kwargs):
return cls(
name=f'dataset-{tenant_id}',
tenant_id=tenant_id,
dataset_ids=dataset_ids,
**kwargs
)
def _run(self, query: str) -> str:
threads = []
all_documents = []
for dataset_id in self.dataset_ids:
retrieval_thread = threading.Thread(target=self._retriever, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': dataset_id,
'query': query,
'all_documents': all_documents
})
threads.append(retrieval_thread)
retrieval_thread.start()
for thread in threads:
thread.join()
# do rerank for searched documents
rerank = ModelFactory.get_reranking_model(
tenant_id=self.tenant_id,
model_provider_name=self.reranking_provider_name,
model_name=self.reranking_model_name
)
all_documents = rerank.rerank(query, all_documents, self.score_threshold, self.top_k)
hit_callback = DatasetIndexToolCallbackHandler(self.conversation_message_task)
hit_callback.on_tool_end(all_documents)
document_score_list = {}
for item in all_documents:
document_score_list[item.metadata['doc_id']] = item.metadata['score']
document_context_list = []
index_node_ids = [document.metadata['doc_id'] for document in all_documents]
segments = DocumentSegment.query.filter(
DocumentSegment.completed_at.isnot(None),
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True,
DocumentSegment.index_node_id.in_(index_node_ids)
).all()
if segments:
index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
sorted_segments = sorted(segments,
key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
float('inf')))
for segment in sorted_segments:
if segment.answer:
document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
else:
document_context_list.append(segment.content)
if self.return_resource:
context_list = []
resource_number = 1
for segment in sorted_segments:
dataset = Dataset.query.filter_by(
id=segment.dataset_id
).first()
document = Document.query.filter(Document.id == segment.document_id,
Document.enabled == True,
Document.archived == False,
).first()
if dataset and document:
source = {
'position': resource_number,
'dataset_id': dataset.id,
'dataset_name': dataset.name,
'document_id': document.id,
'document_name': document.name,
'data_source_type': document.data_source_type,
'segment_id': segment.id,
'retriever_from': self.retriever_from,
'score': document_score_list.get(segment.index_node_id, None)
}
if self.retriever_from == 'dev':
source['hit_count'] = segment.hit_count
source['word_count'] = segment.word_count
source['segment_position'] = segment.position
source['index_node_hash'] = segment.index_node_hash
if segment.answer:
source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
else:
source['content'] = segment.content
context_list.append(source)
resource_number += 1
hit_callback.return_retriever_resource_info(context_list)
return str("\n".join(document_context_list))
async def _arun(self, tool_input: str) -> str:
raise NotImplementedError()
def _retriever(self, flask_app: Flask, dataset_id: str, query: str, all_documents: List):
with flask_app.app_context():
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == self.tenant_id,
Dataset.id == dataset_id
).first()
if not dataset:
return []
# get retrieval model , if the model is not setting , using default
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
if dataset.indexing_technique == "economy":
# use keyword table query
kw_table_index = KeywordTableIndex(
dataset=dataset,
config=KeywordTableConfig(
max_keywords_per_chunk=5
)
)
documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
if documents:
all_documents.extend(documents)
else:
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
return []
except ProviderTokenNotInitError:
return []
embeddings = CacheEmbedding(embedding_model)
documents = []
threads = []
if self.top_k > 0:
# retrieval_model source with semantic
if retrieval_model['search_method'] == 'semantic_search' or retrieval_model[
'search_method'] == 'hybrid_search':
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'top_k': self.top_k,
'score_threshold': self.score_threshold,
'reranking_model': None,
'all_documents': documents,
'search_method': 'hybrid_search',
'embeddings': embeddings
})
threads.append(embedding_thread)
embedding_thread.start()
# retrieval_model source with full text
if retrieval_model['search_method'] == 'full_text_search' or retrieval_model[
'search_method'] == 'hybrid_search':
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search,
kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'search_method': 'hybrid_search',
'embeddings': embeddings,
'score_threshold': retrieval_model[
'score_threshold'] if retrieval_model[
'score_threshold_enabled'] else None,
'top_k': self.top_k,
'reranking_model': retrieval_model[
'reranking_model'] if retrieval_model[
'reranking_enable'] else None,
'all_documents': documents
})
threads.append(full_text_index_thread)
full_text_index_thread.start()
for thread in threads:
thread.join()
all_documents.extend(documents)
| [
"dataset multi retriever and rerank. "
] |
2024-01-10 | tonychen0716/dify | api~core~tool~dataset_retriever_tool.py | import json
import threading
from typing import Type, Optional, List
from flask import current_app
from langchain.tools import BaseTool
from pydantic import Field, BaseModel
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.conversation_message_task import ConversationMessageTask
from core.embedding.cached_embedding import CacheEmbedding
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
from core.index.vector_index.vector_index import VectorIndex
from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment, Document
from services.retrieval_service import RetrievalService
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
class DatasetRetrieverToolInput(BaseModel):
query: str = Field(..., description="Query for the dataset to be used to retrieve the dataset.")
class DatasetRetrieverTool(BaseTool):
"""Tool for querying a Dataset."""
name: str = "dataset"
args_schema: Type[BaseModel] = DatasetRetrieverToolInput
description: str = "use this to retrieve a dataset. "
tenant_id: str
dataset_id: str
top_k: int = 2
score_threshold: Optional[float] = None
conversation_message_task: ConversationMessageTask
return_resource: bool
retriever_from: str
@classmethod
def from_dataset(cls, dataset: Dataset, **kwargs):
description = dataset.description
if not description:
description = 'useful for when you want to answer queries about the ' + dataset.name
description = description.replace('\n', '').replace('\r', '')
return cls(
name=f'dataset-{dataset.id}',
tenant_id=dataset.tenant_id,
dataset_id=dataset.id,
description=description,
**kwargs
)
def _run(self, query: str) -> str:
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == self.tenant_id,
Dataset.id == self.dataset_id
).first()
if not dataset:
return ''
# get retrieval model , if the model is not setting , using default
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
if dataset.indexing_technique == "economy":
# use keyword table query
kw_table_index = KeywordTableIndex(
dataset=dataset,
config=KeywordTableConfig(
max_keywords_per_chunk=5
)
)
documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
return str("\n".join([document.page_content for document in documents]))
else:
try:
embedding_model = ModelFactory.get_embedding_model(
tenant_id=dataset.tenant_id,
model_provider_name=dataset.embedding_model_provider,
model_name=dataset.embedding_model
)
except LLMBadRequestError:
return ''
except ProviderTokenNotInitError:
return ''
embeddings = CacheEmbedding(embedding_model)
documents = []
threads = []
if self.top_k > 0:
# retrieval source with semantic
if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'top_k': self.top_k,
'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
'score_threshold_enabled'] else None,
'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
'reranking_enable'] else None,
'all_documents': documents,
'search_method': retrieval_model['search_method'],
'embeddings': embeddings
})
threads.append(embedding_thread)
embedding_thread.start()
# retrieval_model source with full text
if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': str(dataset.id),
'query': query,
'search_method': retrieval_model['search_method'],
'embeddings': embeddings,
'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
'score_threshold_enabled'] else None,
'top_k': self.top_k,
'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
'reranking_enable'] else None,
'all_documents': documents
})
threads.append(full_text_index_thread)
full_text_index_thread.start()
for thread in threads:
thread.join()
# hybrid search: rerank after all documents have been searched
if retrieval_model['search_method'] == 'hybrid_search':
hybrid_rerank = ModelFactory.get_reranking_model(
tenant_id=dataset.tenant_id,
model_provider_name=retrieval_model['reranking_model']['reranking_provider_name'],
model_name=retrieval_model['reranking_model']['reranking_model_name']
)
documents = hybrid_rerank.rerank(query, documents,
retrieval_model['score_threshold'] if retrieval_model['score_threshold_enabled'] else None,
self.top_k)
else:
documents = []
hit_callback = DatasetIndexToolCallbackHandler(self.conversation_message_task)
hit_callback.on_tool_end(documents)
document_score_list = {}
if dataset.indexing_technique != "economy":
for item in documents:
document_score_list[item.metadata['doc_id']] = item.metadata['score']
document_context_list = []
index_node_ids = [document.metadata['doc_id'] for document in documents]
segments = DocumentSegment.query.filter(DocumentSegment.dataset_id == self.dataset_id,
DocumentSegment.completed_at.isnot(None),
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True,
DocumentSegment.index_node_id.in_(index_node_ids)
).all()
if segments:
index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
sorted_segments = sorted(segments,
key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
float('inf')))
for segment in sorted_segments:
if segment.answer:
document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
else:
document_context_list.append(segment.content)
if self.return_resource:
context_list = []
resource_number = 1
for segment in sorted_segments:
context = {}
document = Document.query.filter(Document.id == segment.document_id,
Document.enabled == True,
Document.archived == False,
).first()
if dataset and document:
source = {
'position': resource_number,
'dataset_id': dataset.id,
'dataset_name': dataset.name,
'document_id': document.id,
'document_name': document.name,
'data_source_type': document.data_source_type,
'segment_id': segment.id,
'retriever_from': self.retriever_from,
'score': document_score_list.get(segment.index_node_id, None)
}
if self.retriever_from == 'dev':
source['hit_count'] = segment.hit_count
source['word_count'] = segment.word_count
source['segment_position'] = segment.position
source['index_node_hash'] = segment.index_node_hash
if segment.answer:
source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
else:
source['content'] = segment.content
context_list.append(source)
resource_number += 1
hit_callback.return_retriever_resource_info(context_list)
return str("\n".join(document_context_list))
async def _arun(self, tool_input: str) -> str:
raise NotImplementedError()
| [
"use this to retrieve a dataset. "
] |
2024-01-10 | tonychen0716/dify | api~core~orchestrator_rule_parser.py | import json
import threading
from typing import Optional, List
from flask import Flask
from langchain import WikipediaAPIWrapper
from langchain.callbacks.manager import Callbacks
from langchain.memory.chat_memory import BaseChatMemory
from langchain.tools import BaseTool, Tool, WikipediaQueryRun
from pydantic import BaseModel, Field
from core.agent.agent.multi_dataset_router_agent import MultiDatasetRouterAgent
from core.agent.agent.output_parser.structured_chat import StructuredChatOutputParser
from core.agent.agent.structed_multi_dataset_router_agent import StructuredMultiDatasetRouterAgent
from core.agent.agent_executor import AgentExecutor, PlanningStrategy, AgentConfiguration
from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
from core.callback_handler.main_chain_gather_callback_handler import MainChainGatherCallbackHandler
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
from core.conversation_message_task import ConversationMessageTask
from core.model_providers.error import ProviderTokenNotInitError
from core.model_providers.model_factory import ModelFactory
from core.model_providers.models.entity.model_params import ModelKwargs, ModelMode
from core.model_providers.models.llm.base import BaseLLM
from core.tool.current_datetime_tool import DatetimeTool
from core.tool.dataset_multi_retriever_tool import DatasetMultiRetrieverTool
from core.tool.dataset_retriever_tool import DatasetRetrieverTool
from core.tool.provider.serpapi_provider import SerpAPIToolProvider
from core.tool.serpapi_wrapper import OptimizedSerpAPIWrapper, OptimizedSerpAPIInput
from core.tool.web_reader_tool import WebReaderTool
from extensions.ext_database import db
from models.dataset import Dataset, DatasetProcessRule
from models.model import AppModelConfig
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
class OrchestratorRuleParser:
"""Parse the orchestrator rule to entities."""
def __init__(self, tenant_id: str, app_model_config: AppModelConfig):
self.tenant_id = tenant_id
self.app_model_config = app_model_config
def to_agent_executor(self, conversation_message_task: ConversationMessageTask, memory: Optional[BaseChatMemory],
rest_tokens: int, chain_callback: MainChainGatherCallbackHandler, tenant_id: str,
retriever_from: str = 'dev') -> Optional[AgentExecutor]:
if not self.app_model_config.agent_mode_dict:
return None
agent_mode_config = self.app_model_config.agent_mode_dict
model_dict = self.app_model_config.model_dict
return_resource = self.app_model_config.retriever_resource_dict.get('enabled', False)
chain = None
if agent_mode_config and agent_mode_config.get('enabled'):
tool_configs = agent_mode_config.get('tools', [])
agent_provider_name = model_dict.get('provider', 'openai')
agent_model_name = model_dict.get('name', 'gpt-4')
dataset_configs = self.app_model_config.dataset_configs_dict
agent_model_instance = ModelFactory.get_text_generation_model(
tenant_id=self.tenant_id,
model_provider_name=agent_provider_name,
model_name=agent_model_name,
model_kwargs=ModelKwargs(
temperature=0.2,
top_p=0.3,
max_tokens=1500
)
)
# add agent callback to record agent thoughts
agent_callback = AgentLoopGatherCallbackHandler(
model_instance=agent_model_instance,
conversation_message_task=conversation_message_task
)
chain_callback.agent_callback = agent_callback
agent_model_instance.add_callbacks([agent_callback])
planning_strategy = PlanningStrategy(agent_mode_config.get('strategy', 'router'))
# only OpenAI chat model (include Azure) support function call, use ReACT instead
if not agent_model_instance.support_function_call:
if planning_strategy == PlanningStrategy.FUNCTION_CALL:
planning_strategy = PlanningStrategy.REACT
elif planning_strategy == PlanningStrategy.ROUTER:
planning_strategy = PlanningStrategy.REACT_ROUTER
try:
summary_model_instance = ModelFactory.get_text_generation_model(
tenant_id=self.tenant_id,
model_provider_name=agent_provider_name,
model_name=agent_model_name,
model_kwargs=ModelKwargs(
temperature=0,
max_tokens=500
),
deduct_quota=False
)
except ProviderTokenNotInitError as e:
summary_model_instance = None
tools = self.to_tools(
tool_configs=tool_configs,
callbacks=[agent_callback, DifyStdOutCallbackHandler()],
agent_model_instance=agent_model_instance,
conversation_message_task=conversation_message_task,
rest_tokens=rest_tokens,
return_resource=return_resource,
retriever_from=retriever_from,
dataset_configs=dataset_configs,
tenant_id=tenant_id
)
if len(tools) == 0:
return None
agent_configuration = AgentConfiguration(
strategy=planning_strategy,
model_instance=agent_model_instance,
tools=tools,
summary_model_instance=summary_model_instance,
memory=memory,
callbacks=[chain_callback, agent_callback],
max_iterations=10,
max_execution_time=400.0,
early_stopping_method="generate"
)
return AgentExecutor(agent_configuration)
return chain
def to_tools(self, tool_configs: list, callbacks: Callbacks = None, **kwargs) -> list[BaseTool]:
"""
Convert app agent tool configs to tools
:param tool_configs: app agent tool configs
:param callbacks:
:return:
"""
tools = []
dataset_tools = []
for tool_config in tool_configs:
tool_type = list(tool_config.keys())[0]
tool_val = list(tool_config.values())[0]
if not tool_val.get("enabled") or tool_val.get("enabled") is not True:
continue
tool = None
if tool_type == "dataset":
dataset_tools.append(tool_config)
elif tool_type == "web_reader":
tool = self.to_web_reader_tool(tool_config=tool_val, **kwargs)
elif tool_type == "google_search":
tool = self.to_google_search_tool(tool_config=tool_val, **kwargs)
elif tool_type == "wikipedia":
tool = self.to_wikipedia_tool(tool_config=tool_val, **kwargs)
elif tool_type == "current_datetime":
tool = self.to_current_datetime_tool(tool_config=tool_val, **kwargs)
if tool:
if tool.callbacks is not None:
tool.callbacks.extend(callbacks)
else:
tool.callbacks = callbacks
tools.append(tool)
# format dataset tool
if len(dataset_tools) > 0:
dataset_retriever_tools = self.to_dataset_retriever_tool(tool_configs=dataset_tools, **kwargs)
if dataset_retriever_tools:
tools.extend(dataset_retriever_tools)
return tools
def to_dataset_retriever_tool(self, tool_configs: List, conversation_message_task: ConversationMessageTask,
return_resource: bool = False, retriever_from: str = 'dev',
**kwargs) \
-> Optional[List[BaseTool]]:
"""
A dataset tool is a tool that can be used to retrieve information from a dataset
:param tool_configs:
:param conversation_message_task:
:param return_resource:
:param retriever_from:
:return:
"""
dataset_configs = kwargs['dataset_configs']
retrieval_model = dataset_configs.get('retrieval_model', 'single')
tools = []
dataset_ids = []
tenant_id = None
for tool_config in tool_configs:
# get dataset from dataset id
dataset = db.session.query(Dataset).filter(
Dataset.tenant_id == self.tenant_id,
Dataset.id == tool_config.get('dataset').get("id")
).first()
if not dataset:
continue
if dataset and dataset.available_document_count == 0 and dataset.available_document_count == 0:
continue
dataset_ids.append(dataset.id)
if retrieval_model == 'single':
retrieval_model_config = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
top_k = retrieval_model_config['top_k']
# dynamically adjust top_k when the remaining token number is not enough to support top_k
# top_k = self._dynamic_calc_retrieve_k(dataset=dataset, top_k=top_k, rest_tokens=rest_tokens)
score_threshold = None
score_threshold_enabled = retrieval_model_config.get("score_threshold_enabled")
if score_threshold_enabled:
score_threshold = retrieval_model_config.get("score_threshold")
tool = DatasetRetrieverTool.from_dataset(
dataset=dataset,
top_k=top_k,
score_threshold=score_threshold,
callbacks=[DatasetToolCallbackHandler(conversation_message_task)],
conversation_message_task=conversation_message_task,
return_resource=return_resource,
retriever_from=retriever_from
)
tools.append(tool)
if retrieval_model == 'multiple':
tool = DatasetMultiRetrieverTool.from_dataset(
dataset_ids=dataset_ids,
tenant_id=kwargs['tenant_id'],
top_k=dataset_configs.get('top_k', 2),
score_threshold=dataset_configs.get('score_threshold', 0.5) if dataset_configs.get('score_threshold_enabled', False) else None,
callbacks=[DatasetToolCallbackHandler(conversation_message_task)],
conversation_message_task=conversation_message_task,
return_resource=return_resource,
retriever_from=retriever_from,
reranking_provider_name=dataset_configs.get('reranking_model').get('reranking_provider_name'),
reranking_model_name=dataset_configs.get('reranking_model').get('reranking_model_name')
)
tools.append(tool)
return tools
def to_web_reader_tool(self, tool_config: dict, agent_model_instance: BaseLLM, **kwargs) -> Optional[BaseTool]:
"""
A tool for reading web pages
:return:
"""
try:
summary_model_instance = ModelFactory.get_text_generation_model(
tenant_id=self.tenant_id,
model_provider_name=agent_model_instance.model_provider.provider_name,
model_name=agent_model_instance.name,
model_kwargs=ModelKwargs(
temperature=0,
max_tokens=500
),
deduct_quota=False
)
except ProviderTokenNotInitError:
summary_model_instance = None
tool = WebReaderTool(
model_instance=summary_model_instance if summary_model_instance else None,
max_chunk_length=4000,
continue_reading=True
)
return tool
def to_google_search_tool(self, tool_config: dict, **kwargs) -> Optional[BaseTool]:
tool_provider = SerpAPIToolProvider(tenant_id=self.tenant_id)
func_kwargs = tool_provider.credentials_to_func_kwargs()
if not func_kwargs:
return None
tool = Tool(
name="google_search",
description="A tool for performing a Google search and extracting snippets and webpages "
"when you need to search for something you don't know or when your information "
"is not up to date. "
"Input should be a search query.",
func=OptimizedSerpAPIWrapper(**func_kwargs).run,
args_schema=OptimizedSerpAPIInput
)
return tool
def to_current_datetime_tool(self, tool_config: dict, **kwargs) -> Optional[BaseTool]:
tool = DatetimeTool()
return tool
def to_wikipedia_tool(self, tool_config: dict, **kwargs) -> Optional[BaseTool]:
class WikipediaInput(BaseModel):
query: str = Field(..., description="search query.")
return WikipediaQueryRun(
name="wikipedia",
api_wrapper=WikipediaAPIWrapper(doc_content_chars_max=4000),
args_schema=WikipediaInput
)
@classmethod
def _dynamic_calc_retrieve_k(cls, dataset: Dataset, top_k: int, rest_tokens: int) -> int:
if rest_tokens == -1:
return top_k
processing_rule = dataset.latest_process_rule
if not processing_rule:
return top_k
if processing_rule.mode == "custom":
rules = processing_rule.rules_dict
if not rules:
return top_k
segmentation = rules["segmentation"]
segment_max_tokens = segmentation["max_tokens"]
else:
segment_max_tokens = DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens']
# when rest_tokens is less than default context tokens
if rest_tokens < segment_max_tokens * top_k:
return rest_tokens // segment_max_tokens
return min(top_k, 10)
| [] |
2024-01-10 | tonychen0716/dify | api~services~retrieval_service.py |
from typing import Optional
from flask import current_app, Flask
from langchain.embeddings.base import Embeddings
from core.index.vector_index.vector_index import VectorIndex
from core.model_providers.model_factory import ModelFactory
from extensions.ext_database import db
from models.dataset import Dataset
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
class RetrievalService:
@classmethod
def embedding_search(cls, flask_app: Flask, dataset_id: str, query: str,
top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
all_documents: list, search_method: str, embeddings: Embeddings):
with flask_app.app_context():
dataset = db.session.query(Dataset).filter(
Dataset.id == dataset_id
).first()
vector_index = VectorIndex(
dataset=dataset,
config=current_app.config,
embeddings=embeddings
)
documents = vector_index.search(
query,
search_type='similarity_score_threshold',
search_kwargs={
'k': top_k,
'score_threshold': score_threshold,
'filter': {
'group_id': [dataset.id]
}
}
)
if documents:
if reranking_model and search_method == 'semantic_search':
rerank = ModelFactory.get_reranking_model(
tenant_id=dataset.tenant_id,
model_provider_name=reranking_model['reranking_provider_name'],
model_name=reranking_model['reranking_model_name']
)
all_documents.extend(rerank.rerank(query, documents, score_threshold, len(documents)))
else:
all_documents.extend(documents)
@classmethod
def full_text_index_search(cls, flask_app: Flask, dataset_id: str, query: str,
top_k: int, score_threshold: Optional[float], reranking_model: Optional[dict],
all_documents: list, search_method: str, embeddings: Embeddings):
with flask_app.app_context():
dataset = db.session.query(Dataset).filter(
Dataset.id == dataset_id
).first()
vector_index = VectorIndex(
dataset=dataset,
config=current_app.config,
embeddings=embeddings
)
documents = vector_index.search_by_full_text_index(
query,
search_type='similarity_score_threshold',
top_k=top_k
)
if documents:
if reranking_model and search_method == 'full_text_search':
rerank = ModelFactory.get_reranking_model(
tenant_id=dataset.tenant_id,
model_provider_name=reranking_model['reranking_provider_name'],
model_name=reranking_model['reranking_model_name']
)
all_documents.extend(rerank.rerank(query, documents, score_threshold, len(documents)))
else:
all_documents.extend(documents)
| [] |
2024-01-10 | tonychen0716/dify | api~core~agent~agent~output_parser~retirver_dataset_agent.py | import json
from typing import Tuple, List, Any, Union, Sequence, Optional, cast
from langchain.agents import OpenAIFunctionsAgent, BaseSingleActionAgent
from langchain.agents.openai_functions_agent.base import _format_intermediate_steps, _parse_ai_message
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.prompts.chat import BaseMessagePromptTemplate
from langchain.schema import AgentAction, AgentFinish, SystemMessage, Generation, LLMResult, AIMessage
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from pydantic import root_validator
from core.model_providers.models.entity.message import to_prompt_messages
from core.model_providers.models.llm.base import BaseLLM
from core.third_party.langchain.llms.fake import FakeLLM
from core.tool.dataset_retriever_tool import DatasetRetrieverTool
class MultiDatasetRouterAgent(OpenAIFunctionsAgent):
"""
An Multi Dataset Retrieve Agent driven by Router.
"""
model_instance: BaseLLM
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator
def validate_llm(cls, values: dict) -> dict:
return values
def should_use_agent(self, query: str):
"""
return should use agent
:param query:
:return:
"""
return True
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
if len(self.tools) == 0:
return AgentFinish(return_values={"output": ''}, log='')
elif len(self.tools) == 1:
tool = next(iter(self.tools))
tool = cast(DatasetRetrieverTool, tool)
rst = tool.run(tool_input={'query': kwargs['input']})
# output = ''
# rst_json = json.loads(rst)
# for item in rst_json:
# output += f'{item["content"]}\n'
return AgentFinish(return_values={"output": rst}, log=rst)
if intermediate_steps:
_, observation = intermediate_steps[-1]
return AgentFinish(return_values={"output": observation}, log=observation)
try:
agent_decision = self.real_plan(intermediate_steps, callbacks, **kwargs)
if isinstance(agent_decision, AgentAction):
tool_inputs = agent_decision.tool_input
if isinstance(tool_inputs, dict) and 'query' in tool_inputs and 'chat_history' not in kwargs:
tool_inputs['query'] = kwargs['input']
agent_decision.tool_input = tool_inputs
else:
agent_decision.return_values['output'] = ''
return agent_decision
except Exception as e:
new_exception = self.model_instance.handle_exceptions(e)
raise new_exception
def real_plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = _format_intermediate_steps(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
prompt_messages = to_prompt_messages(messages)
result = self.model_instance.run(
messages=prompt_messages,
functions=self.functions,
)
ai_message = AIMessage(
content=result.content,
additional_kwargs={
'function_call': result.function_call
}
)
agent_decision = _parse_ai_message(ai_message)
return agent_decision
async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
raise NotImplementedError()
@classmethod
def from_llm_and_tools(
cls,
model_instance: BaseLLM,
tools: Sequence[BaseTool],
callback_manager: Optional[BaseCallbackManager] = None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
**kwargs: Any,
) -> BaseSingleActionAgent:
prompt = cls.create_prompt(
extra_prompt_messages=extra_prompt_messages,
system_message=system_message,
)
return cls(
model_instance=model_instance,
llm=FakeLLM(response=''),
prompt=prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
| [
"You are a helpful AI assistant."
] |
2024-01-10 | ghas-results/constrained-rl | ceres~baselines~ppo1~pposgd_simple.py | '''
This is the learn function from OpenAI's baselines.ppo1.pposgd_simple
rewritten with individual functions in .pposgd_simple_helper
OpenAI Baselines is licensed under the MIT License, see LICENSE
'''
from baselines.common.mpi_moments import mpi_moments
from baselines.ppo1.pposgd_simple import traj_segment_generator
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
from mpi4py import MPI
from .pposgd_simple_helper import build_policy_training_vars, build_counters, adjust_policy_learning_rate, update_policy, log_iter_info, calc_end_training
def learn(env, policy_fn, *,
timesteps_per_actorbatch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant' # annealing for stepsize parameters (epsilon and adam)
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space) # Construct network for new policy
oldpi = policy_fn("oldpi", ob_space, ac_space) # Network for old policy
loss_names, var_list, lossandgrad, adam, assign_old_eq_new, compute_losses = build_policy_training_vars(pi, oldpi, clip_param, entcoeff, adam_epsilon)
mpi_moments_fn = lambda losses: mpi_moments(losses, axis=0)
allgather_fn = MPI.COMM_WORLD.allgather
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True)
iters_so_far, episodes_so_far, timesteps_so_far, tstart, lenbuffer, rewbuffer = build_counters()
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
while True:
if callback: callback(locals(), globals())
if calc_end_training(max_timesteps, timesteps_so_far,
max_episodes, episodes_so_far,
max_iters, iters_so_far,
max_seconds, tstart):
break
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
cur_lrmult = adjust_policy_learning_rate(schedule, max_timesteps, timesteps_so_far, max_episodes, episodes_so_far, max_iters, iters_so_far)
vpredbefore, tdlamret, optim_batchsize = update_policy(pi, seg, gamma, lam,
logger, optim_epochs, optim_batchsize, optim_stepsize, cur_lrmult,
loss_names, lossandgrad, adam, assign_old_eq_new, compute_losses,
mpi_moments_fn, allgather_fn)
episodes_so_far, timesteps_so_far = log_iter_info(lenbuffer, rewbuffer, tstart,
vpredbefore, tdlamret, seg,
episodes_so_far, timesteps_so_far,
MPI.COMM_WORLD.Get_rank()==0)
iters_so_far += 1
return pi
| [] |
2024-01-10 | elsababyy/openai-be | backendcode-auth.py | from flask import Flask, request, jsonify,session, abort, redirect, request
from flask_cors import CORS
import requests
from flask_pymongo import PyMongo
import json
import pathlib
import os
# from dotenv import load_dotenv, find_dotenv
import openai
from google.oauth2 import id_token
from google_auth_oauthlib.flow import Flow
from pip._vendor import cachecontrol
import google.auth.transport.requests
import json
from flask import Flask
from flask.wrappers import Response
from flask.globals import request, session
import requests
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from google.oauth2 import id_token
from google_auth_oauthlib.flow import Flow
import os, pathlib
import google
import jwt
from flask_cors import CORS
os.environ["OPENAI_API_KEY"] = "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"
# Initialize Flask app
app = Flask(__name__)
CORS(app)
app.config['Access-Control-Allow-Origin'] = '*'
app.config["Access-Control-Allow-Headers"]="Content-Type"
app.config["MONGO_URI"] = "mongodb://localhost:27017/openai"
mongo = PyMongo(app)
app.secret_key = "CodeSpecialist.com"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
GOOGLE_CLIENT_ID = "990487476652-dct8v0k5a9l9776ci05g3dq15p8muj33.apps.googleusercontent.com"
client_secrets_file = os.path.join(pathlib.Path(__file__).parent, "client_secret.json")
flow = Flow.from_client_secrets_file(
client_secrets_file=client_secrets_file,
scopes=[
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/userinfo.email",
"openid",
],
redirect_uri="http://127.0.0.1:5000/callback",
)
# wrapper
def login_required(function):
def wrapper(*args, **kwargs):
encoded_jwt=request.headers.get("Authorization").split("Bearer ")[1] #extract the actual token
if encoded_jwt==None: #no JWT was found in the "Authorization" header.
return abort(401)
else:
return function()
return wrapper
def Generate_JWT(payload):#payload: dictionary containing the data wanted to include in the JWT
encoded_jwt = jwt.encode(payload, app.secret_key, algorithm='HS256') #acommonly used algorithm which ensures that the token is securely signed.
return encoded_jwt
@app.route("/callback")
def callback():
#Fetch the access token and credentials using the authorization response
flow.fetch_token(authorization_response=request.url)
credentials = flow.credentials
#Create a requests session and a token request
request_session = requests.session()
token_request = google.auth.transport.requests.Request(session=request_session)
#Verify the ID token obtained from Google
id_info = id_token.verify_oauth2_token(
id_token=credentials._id_token, request=token_request,
audience=GOOGLE_CLIENT_ID
)
#Store the user's Google ID in the session
session["google_id"] = id_info.get("sub")
#removing the specific audience, as it is throwing error
del id_info['aud']
#Generate a JWT token using the ID token's payload data
jwt_token=Generate_JWT(id_info)
#Prepare user data for insertion into a MongoDB collection
data={
'name':id_info.get('name'),
'email':id_info.get('email'),
'picture':id_info.get('picture')
}
#Insert the user data into a MongoDB collection named 'users'
mongo.db.users.insert_one(data)
#Redirect the user to a specific URL with the JWT token as a query parameter
return redirect(f"http://localhost:5173/chat?jwt={jwt_token}")
@app.route("/auth/google")
def login():
#Generate the authorization URL and state
authorization_url, state = flow.authorization_url()
# Store the state so the callback can verify the auth server response.
session["state"] = state
return Response(
response=json.dumps({'auth_url':authorization_url}),
status=200,
mimetype='application/json'
)
@app.route("/logout")
def logout():
#clear the local storage from frontend
session.clear()
return Response(
response=json.dumps({"message":"Logged out"}),
status=202,
mimetype='application/json'
)
@app.route("/home")
@login_required
def home_page_user():
#Extract the JWT token from the "Authorization" header
encoded_jwt=request.headers.get("Authorization").split("Bearer ")[1]
#Attempt to decode and verify the JWT token
try:
decoded_jwt=jwt.decode(encoded_jwt, app.secret_key, algorithms=['HS256',])
print(decoded_jwt)
except Exception as e:
#Return an error response if JWT decoding fails
return Response(
response=json.dumps({"message":"Decoding JWT Failed", "exception":e.args}),
status=500,
mimetype='application/json'
)
#Return a JSON response containing the decoded JWT payload
return Response(
response=json.dumps(decoded_jwt),
status=200,
mimetype='application/json')
@app.route('/get_response', methods=['POST'])
def chatgpt():
# Get data from the request
data = request.get_json()
user_input = data.get("user_input") # Extract the user input from the JSON data
# Create a response using the OpenAI API
response = openai.Completion.create(
engine="text-davinci-003", # Use "text-davinci-003" as the engine
prompt=user_input,
max_tokens=3000, # Limit the response to a certain number of tokens
temperature=0.7 # Adjust the temperature parameter
)
reply={
"user_input":user_input,
"response":response.choices[0].text
}
# Extract and return the response text
return jsonify(reply),201
@app.route('/get-history',methods=['GET'])
def get_history():
#Query the MongoDB collection named 'test' to fetch data
userd = mongo.db.test.find({})
#Serialize the retrieved data
serialized_user_data = []
for user in userd:
user['_id'] = str(user['_id'])
serialized_user_data.append(user)
return jsonify(serialized_user_data), 201
@app.route('/save-history',methods=['POST'])
def save_history():
try:
# Get JSON data from the request
response_data = request.get_json()
if not response_data:
print("Error: The response_data list is empty.")
else:
# Proceed with processing the data
json_dict = {'chatd':response_data}
print(json_dict)
mongo.db.test.insert_one(json_dict)
# Return a JSON response indicating success
return jsonify({'message': 'Data inserted successfully'}), 201
except Exception as e:
# Handle exceptions (e.g., invalid JSON, database errors)
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) | [] |
2024-01-10 | jedi4ever/learning-llms-and-genai-for-dev-sec-ops | lessons~developer~_lessonshelper~pretty_print_callback_handler.py | from typing import Any, Dict, List, Optional, Sequence, Union
from langchain.schema import AgentAction, BaseMessage, LLMResult, AgentFinish
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema.document import Document
from uuid import UUID
class PrettyPrintCallbackHandler(BaseCallbackHandler):
"""Base callback handler that can be used to handle callbacks from langchain."""
### LLLms
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
# print(f"on_llm_start -serial {serialized}")
for prompt in prompts:
print(f"\n=============\n[llm][start] - prompts: {prompt}")
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
print(f"[llm][new_token] {token}")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
for generation in response.generations:
print(f"\n=============\n[llm][end] - generation {generation[0].text}")
# print(f"on_llm_end {response}")
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
### Chat model
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> Any:
"""Run when Chat Model starts running."""
# print(f"on_chat_model_start -serial {serialized}")
for message in messages:
for sub in message:
print(f"[chat_model][start] - prompts : {sub.content}")
#### Chains
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
# print(f"[chain][start]{serialized}, {inputs}")
print(f"[chain][start] - inputs {inputs}")
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
print(f"[chain][end] - outputs: {outputs}")
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
### Tools
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
print(f"[tool][start] - input_str: {input_str}")
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
print(f"[tool][end] - output: {output}")
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
print(f"[tool][error] - error: {error}")
### Text
def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
print(f"\n=============\n[text] {text}\n==============")
### Agent
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
print(f"[agent][action] - action: {action}")
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
print(f"[agent][finish] - finish: {finish}")
### Retrievers
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Run on retriever end."""
def on_retriever_error(
self,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Run on retriever error."""
| [] |
2024-01-10 | jiaw3i/aiapi | app~azureai~auzre_ai.py | # This file is used to handle the azure ai chatbot
import uuid
from queue import Queue
from cachetools import TTLCache
from flask import Blueprint, request, Response
from langchain import ConversationChain, LLMChain
from langchain.chat_models import AzureChatOpenAI
import os
import threading
from langchain.memory import ConversationBufferMemory
from app.azureai.handler.stream_handler import QueueCallbackHandler
from app.azureai.langchain.chain.prompts import message_prompt
azure_ai = Blueprint('azure_ai', __name__)
os.environ['OPENAI_API_TYPE'] = "azure"
os.environ['OPENAI_API_BASE'] = 'https://jiawei.openai.azure.com/'
# os.environ['OPENAI_API_VERSION'] = "2022-12-01"
os.environ['OPENAI_API_VERSION'] = "2023-03-15-preview"
ai = AzureChatOpenAI(
deployment_name='gpt-35-turbo',
model_name='gpt-35-turbo',
streaming=True,
temperature=0.9,
)
sessions = {}
ttl_cache = TTLCache(maxsize=10, ttl=60 * 60 * 24)
def llm_thread(q, input):
if "id" in input:
uid = input["id"]
if uid in sessions:
chain = sessions[uid]
else:
chain = ConversationChain(
llm=ai,
memory=ConversationBufferMemory()
)
sessions[uid] = chain
else:
chain = LLMChain(
llm=ai,
prompt=message_prompt,
)
chain.run(input["message"], callbacks=[QueueCallbackHandler(q)])
def stream(q, input):
threading.Thread(target=llm_thread, args=(q, input)).start()
while True:
message = q.get()
message = message.replace('\n', '\n;')
if message != "":
yield f'data: %s\n\n' % message
@azure_ai.route('/chat', methods=['GET', 'POST'])
def chat():
q = Queue()
if request.method == 'POST':
input = request.get_json()
verify_token = input.get("verifyToken", None)
if verify_token in ttl_cache:
# asyncio.run(dochat(input_text, q))
return Response(stream(q, input), mimetype='text/event-stream')
else:
return {
"code": 200,
"success": False,
"data": "verify token error"
}
else:
return Response(None, mimetype='text/event-stream')
@azure_ai.route('/clear')
def clear():
uid = request.args.get("id")
if uid in sessions:
del sessions[uid]
return {
"code": 200,
"success": True,
"data": "success"
}
@azure_ai.route('/verify', methods=['GET', 'POST'])
def verify():
param = request.get_json()
verify_code = param["verifyCode"]
# 生成token放入cachetools
verify_token = uuid.uuid4().hex
ttl_cache[verify_token] = verify_code
if verify_code == os.getenv("VERIFY_CODE", "qpzm"):
return {
"code": 200,
"success": True,
"data": verify_token
}
return {
"code": 200,
"success": False,
"data": "认证失败"
}
| [] |
2024-01-10 | ShawBC/streamlithackathon | chat_with_obsidian_streamlit.py | import streamlit as st
import zipfile
import os
# 1. Import necessary libraries
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.document_loaders import ObsidianLoader
from langchain.vectorstores import Chroma
from langchain.indexes import VectorstoreIndexCreator
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain import HuggingFaceHub
# 2. Set up Streamlit UI elements
st.title("Obsidian Query Interface")
# Ask user for huggingface api key
HUGGINGFACE_API_KEY = st.sidebar.text_input("HUGGINGFACE API Key", type="password")
os.environ['HUGGINGFACEHUB_API_TOKEN'] = HUGGINGFACE_API_KEY
# get repo id from Hugging Face
repo_id = "google/flan-ul2"
# Upload zipped Obsidian vault
uploaded_vault = st.file_uploader("Upload a zipped Obsidian vault", type="zip")
# Extract the vault if uploaded
if uploaded_vault:
vault_dir = "/tmp/obsidian_vault"
with zipfile.ZipFile(uploaded_vault, 'r') as zip_ref:
zip_ref.extractall(vault_dir)
st.success("Vault uploaded and extracted successfully!")
# Enter a query
query = st.text_input("Enter your query:")
temperature = st.sidebar.slider("Temperature", min_value=0.1, max_value=5.0, value=1.0, step=0.1)
max_length = st.sidebar.slider("Max Length", min_value=50, max_value=5000, value=2048, step=12)
# Button to execute query
if st.button("Execute Query"):
if query and os.path.exists(vault_dir):
# 3. Load the model and data using the provided code logic
loader = ObsidianLoader(vault_dir)
embeddings = SentenceTransformerEmbeddings(model_name="paraphrase-MiniLM-L6-v2")
callbacks = [StreamingStdOutCallbackHandler()]
llm = HuggingFaceHub(repo_id=repo_id,
callbacks=callbacks,
verbose=True,
model_kwargs={"temperature": temperature, "max_length": max_length })
index = VectorstoreIndexCreator(embedding=embeddings,
vectorstore_cls=Chroma,
vectorstore_kwargs={"persist_directory": "db"},
text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)).from_loaders([loader])
# 4. Process the query
chain = RetrievalQA.from_chain_type(llm=llm,
chain_type="stuff",
retriever=index.vectorstore.as_retriever(),
input_key = "question")
# 5. Display the results
st.write("Results:", chain.run(query))
else:
st.warning("Please upload your obsidian zip file, and enter a query.")
| [] |
2024-01-10 | YadnikaKotwal/CareCompanion.github.io | launch.py | import spacy
import openai
import gradio as gr
# Loading the pre-trained English language model in spaCy
try:
nlp = spacy.load('en_core_web_sm')
except OSError:
spacy.cli.download('en_core_web_sm')
nlp = spacy.load('en_core_web_sm')
# Function to obtain word embeddings
def get_word_embeddings(sentence):
tokens = nlp(sentence)
embeddings = [token.vector for token in tokens]
return embeddings
# Function to get the chatbot response
def get_response(message):
openai.api_key = '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'
# Obtain word embeddings for the user message
embeddings = get_word_embeddings(message)
# Convert the embeddings to a string representation
embeddings_str = [str(embedding) for embedding in embeddings]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo-16k',
messages=[
{"role":"system", "content":"You are a polite, helpful postpartum care assistant who answers anything related to POSTPARTUM CARE, for other questions politely say you cannot answer, if you have any concern related then I can help."},
{"role":"user","content": message},
{"role":"assistant","content":""},
{"role": "user", "content": message + " "+ " ".join(embeddings_str) }
],
max_tokens=200,
temperature=0.9,
)
return response["choices"][0]["message"]["content"]
def chatbot_interface(input_text):
response = get_response(input_text)
return response
# code to create the Gradio app interface
iface = gr.Interface(
fn=chatbot_interface,
inputs="text",
outputs="text",
layout="vertical",
title="Postpartum Care Chatbot",
description="Ask any postpartum care-related questions!",
theme='HaleyCH/HaleyCH_Theme',
# inputs_layout="textarea",
# outputs_layout="textarea",
examples=[
["Hi, how can I take care of my newborn's skin?"],
["What is postpartum depression? What are the signs and symptoms?"],
["What activities are safe to do in the first few days? Which activities should I avoid?"],
['Are there certain foods or beverages I should avoid when breastfeeding?'],
["What should I do to prevent deep vein thrombosis?"],
['What should I do to help prevent post-delivery infections?']
],
)
# Start the Gradio app
if __name__ == "__main__":
print("Welcome to the Postpartum Care Chatbot!")
print("How can I assist you today?")
iface.launch(share=True)
| [
"You are a polite, helpful postpartum care assistant who answers anything related to POSTPARTUM CARE, for other questions politely say you cannot answer, if you have any concern related then I can help.",
" "
] |
2024-01-10 | bhctest123/superagent | app~lib~loaders~sitemap.py | import re
from xml.etree import ElementTree
import requests
from bs4 import BeautifulSoup
from langchain.schema import Document
class SitemapLoader:
SITEMAP_NAMESPACE = "{http://www.sitemaps.org/schemas/sitemap/0.9}"
def __init__(self, sitemap_url, filter_urls=None):
self.sitemap_url = sitemap_url
self.filter_urls = filter_urls if filter_urls else []
def fetch(self, url):
"""Fetch content of a URL using requests."""
response = requests.get(url)
response.raise_for_status() # Raise exception for HTTP errors
return response.text
def fetch_text(self, url):
response = requests.get(url)
response.raise_for_status() # Raise exception for HTTP errors
soup = BeautifulSoup(response.content, "html.parser")
raw_text = soup.get_text(separator=" ").strip()
cleaned_text = re.sub(r"\s+", " ", raw_text)
return cleaned_text
def matches_any_pattern(self, url):
"""Check if the URL matches any of the given patterns."""
is_match = any(re.search(pattern, url) for pattern in self.filter_urls)
if is_match:
print(f"Matched URL: {url}")
return is_match
def fetch_sitemap_urls(self):
"""Fetch URLs from a sitemap.xml file and filter based on patterns."""
sitemap_content = self.fetch(self.sitemap_url)
# Parse XML content
root = ElementTree.fromstring(sitemap_content)
urls = [
url_element.text
for url_element in root.findall(
f"{self.SITEMAP_NAMESPACE}url/{self.SITEMAP_NAMESPACE}loc"
)
]
# Filter URLs
if self.filter_urls:
urls = [url for url in urls if self.matches_any_pattern(url)]
return urls
def load(self):
"""Fetch content of each URL listed in a sitemap.xml file."""
urls = self.fetch_sitemap_urls()
return [
Document(page_content=self.fetch_text(url), metadata={"url": url})
for url in urls
]
| [] |
2024-01-10 | bhctest123/superagent | app~lib~agents~base.py | # flake8: noqa
import json
from typing import Any, Tuple
from slugify import slugify
from decouple import config
from langchain import HuggingFaceHub
from langchain.agents import Tool, create_csv_agent, AgentType
from langchain.chains import RetrievalQA
from langchain.chat_models import (
AzureChatOpenAI,
ChatAnthropic,
ChatOpenAI,
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import Cohere, OpenAI
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import SystemMessage
from app.lib.callbacks import StreamingCallbackHandler
from app.lib.models.document import DocumentInput
from app.lib.models.tool import (
SearchToolInput,
WolframToolInput,
ReplicateToolInput,
ZapierToolInput,
AgentToolInput,
OpenApiToolInput,
MetaphorToolInput,
)
from app.lib.prisma import prisma
from app.lib.prompts import (
CustomPromptTemplate,
DEFAULT_CHAT_PROMPT,
DEFAULT_AGENT_PROMPT,
)
from app.lib.tools import (
ToolDescription,
get_search_tool,
get_wolfram_alpha_tool,
get_replicate_tool,
get_zapier_nla_tool,
get_openapi_tool,
get_chatgpt_plugin_tool,
AgentTool,
MetaphorTool,
DocSummarizerTool,
)
from app.lib.vectorstores.base import VectorStoreBase
class AgentBase:
def __init__(
self,
agent: dict,
api_key: str = None,
has_streaming: bool = False,
on_llm_new_token=None,
on_llm_end=None,
on_chain_end=None,
):
self.id = agent.id
self.api_key = api_key
self.userId = agent.userId
self.document = agent.document
self.has_memory = agent.hasMemory
self.type = agent.type
self.llm = agent.llm
self.prompt = agent.prompt
self.tool = agent.tool
self.has_streaming = has_streaming
self.on_llm_new_token = on_llm_new_token
self.on_llm_end = on_llm_end
self.on_chain_end = on_chain_end
self.documents = self._get_agent_documents()
self.tools = self._get_agent_tools()
def _get_api_key(self) -> str:
if self.llm["provider"] == "openai-chat" or self.llm["provider"] == "openai":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("OPENAI_API_KEY")
)
if self.llm["provider"] == "anthropic":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("ANTHROPIC_API_KEY")
)
if self.llm["provider"] == "cohere":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("COHERE_API_KEY")
)
if self.llm["provider"] == "azure-openai":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("AZURE_API_KEY")
)
if self.llm["provider"] == "huggingface":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("HUGGINGFACEHUB_API_TOKEN")
)
def _get_prompt(self, tools: list = None) -> Any:
if not self.tools and not self.documents:
return (
PromptTemplate(
input_variables=self.prompt.input_variables,
template=self.prompt.template,
)
if self.prompt
else DEFAULT_CHAT_PROMPT
)
if self.type == "REACT":
return CustomPromptTemplate(
template=self.prompt.template if self.prompt else DEFAULT_AGENT_PROMPT,
tools=tools,
input_variables=["input", "intermediate_steps", "chat_history"],
)
if self.type == "OPENAI":
return SystemMessage(content=self.prompt.template) if self.prompt else None
return DEFAULT_CHAT_PROMPT
def _get_llm(self, has_streaming: bool = True) -> Any:
if self.llm["provider"] == "openai-chat":
return (
ChatOpenAI(
temperature=0,
openai_api_key=self._get_api_key(),
model_name=self.llm["model"],
streaming=self.has_streaming,
callbacks=[
StreamingCallbackHandler(
agent_type=self.type,
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
),
],
)
if self.has_streaming and has_streaming != False
else ChatOpenAI(
model_name=self.llm["model"],
openai_api_key=self._get_api_key(),
temperature=0,
)
)
if self.llm["provider"] == "openai":
return OpenAI(
model_name=self.llm["model"], openai_api_key=self._get_api_key()
)
if self.llm["provider"] == "anthropic":
return (
ChatAnthropic(
model=self.llm["model"] or "claude-v1",
streaming=self.has_streaming,
anthropic_api_key=self._get_api_key(),
callbacks=[
StreamingCallbackHandler(
agent_type=self.type,
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming and has_streaming != False
else ChatAnthropic(anthropic_api_key=self._get_api_key())
)
if self.llm["provider"] == "cohere":
return (
Cohere(
cohere_api_key=self._get_api_key(),
model=self.llm["model"],
callbacks=[
StreamingCallbackHandler(
agent_type=self.type,
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming and has_streaming != False
else Cohere(cohere_api_key=self._get_api_key(), model=self.llm["model"])
)
if self.llm["provider"] == "azure-openai":
return (
AzureChatOpenAI(
openai_api_key=self._get_api_key(),
openai_api_base=config("AZURE_API_BASE"),
openai_api_type=config("AZURE_API_TYPE"),
openai_api_version=config("AZURE_API_VERSION"),
deployment_name=self.llm["model"],
streaming=self.has_streaming,
callbacks=[
StreamingCallbackHandler(
agent_type=self.type,
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else AzureChatOpenAI(
deployment_name=self.llm["model"],
openai_api_key=self._get_api_key(),
openai_api_base=config("AZURE_API_BASE"),
openai_api_type=config("AZURE_API_TYPE"),
openai_api_version=config("AZURE_API_VERSION"),
)
)
if self.llm["provider"] == "huggingface":
return HuggingFaceHub(
repo_id=self.llm["model"], huggingfacehub_api_token=self._get_api_key()
)
# Use ChatOpenAI as default llm in agents
return ChatOpenAI(temperature=0, openai_api_key=self._get_api_key())
def _get_memory(self, session) -> Any:
history = ChatMessageHistory()
if self.has_memory:
memory_filter = {"agentId": self.id}
if session is not None:
memory_filter["session"] = session
memories = prisma.agentmemory.find_many(
where=memory_filter,
order={"createdAt": "desc"},
take=3,
)
[
history.add_ai_message(memory.message)
if memory.author == "AI"
else history.add_user_message(memory.message)
for memory in memories
]
if (self.documents or self.tools) and self.type == "OPENAI":
return ConversationBufferMemory(
chat_memory=history,
memory_key="chat_history",
output_key="output",
return_messages=True,
)
return ConversationBufferMemory(
chat_memory=history,
memory_key="chat_history",
output_key="output",
)
return ConversationBufferMemory(memory_key="chat_history", output_key="output")
def _get_agent_documents(self) -> Any:
agent_documents = prisma.agentdocument.find_many(
where={"agentId": self.id}, include={"document": True}
)
return agent_documents
def _get_tool_and_input_by_type(
self, type: str, metadata: dict = None
) -> Tuple[Any, Any]:
if type == "SEARCH":
return get_search_tool(), SearchToolInput
if type == "WOLFRAM_ALPHA":
return get_wolfram_alpha_tool(), WolframToolInput
if type == "REPLICATE":
return get_replicate_tool(metadata=metadata), ReplicateToolInput
if type == "ZAPIER_NLA":
return (
get_zapier_nla_tool(
metadata=metadata, llm=self._get_llm(has_streaming=False)
),
ZapierToolInput,
)
if type == "AGENT":
return (
AgentTool(metadata=metadata, api_key=self.api_key),
AgentToolInput,
)
if type == "OPENAPI":
return (get_openapi_tool(metadata=metadata), OpenApiToolInput)
if type == "CHATGPT_PLUGIN":
# TODO: confirm metadata has (can have) url
return (get_chatgpt_plugin_tool(metadata), type)
if type == "METAPHOR":
return (MetaphorTool(metadata=metadata), MetaphorToolInput)
def _get_tools(self) -> list:
tools = []
embeddings = OpenAIEmbeddings()
for agent_document in self.documents:
description = agent_document.document.description or (
f"useful for finding information about {agent_document.document.name}"
)
args_schema = DocumentInput if self.type == "OPENAI" else None
embeddings = OpenAIEmbeddings()
docsearch = (
VectorStoreBase()
.get_database()
.from_existing_index(embeddings, agent_document.document.id)
)
summary_tool = DocSummarizerTool(
docsearch=docsearch, llm=self._get_llm(has_streaming=False)
)
if agent_document.document.type == "CSV":
csv_agent = create_csv_agent(
llm=self._get_llm(has_streaming=False),
path=agent_document.document.url,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS
if self.type == "OPENAI"
else AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
tools.append(
Tool(
name=slugify(agent_document.document.name),
description=description,
args_schema=args_schema,
func=csv_agent.run,
)
)
else:
tools.append(
Tool(
name=slugify(agent_document.document.name)
if self.type == "OPENAI"
else agent_document.document.name,
description=description,
args_schema=args_schema,
func=RetrievalQA.from_chain_type(
llm=self._get_llm(has_streaming=False),
retriever=docsearch.as_retriever(),
),
)
)
tools.append(
Tool.from_function(
func=summary_tool.run,
name="document-summarizer",
description="useful for summarizing a whole document",
)
)
for agent_tool in self.tools:
tool, args_schema = self._get_tool_and_input_by_type(
agent_tool.tool.type, metadata=agent_tool.tool.metadata
)
if args_schema == "CHATGPT_PLUGIN":
# if chatgpt plugin this is a list of tools
tools += tool
continue
tools.append(
Tool(
name=slugify(agent_tool.tool.name),
description=agent_tool.tool.description
or ToolDescription[agent_tool.tool.type].value,
args_schema=args_schema if self.type == "OPENAI" else None,
func=tool.run if agent_tool.tool.type != "REPLICATE" else tool,
)
)
return tools
def _get_agent_tools(self) -> Any:
tools = prisma.agenttool.find_many(
where={"agentId": self.id}, include={"tool": True}
)
return tools
def _format_trace(self, trace: Any) -> dict:
if self.documents or self.tools:
return json.dumps(
{
"output": trace.get("output") or trace.get("result"),
"steps": [
{
"action": step[0].tool,
"input": step[0].tool_input,
"log": step[0].log,
"observation": step[1],
}
for step in trace["intermediate_steps"]
],
}
)
return json.dumps(
{
"output": trace.get("output") or trace.get("result"),
"steps": [trace],
}
)
def process_payload(self, payload):
if isinstance(payload, dict):
if self.type == "OPENAI":
payload = str(payload)
return payload
def create_agent_memory(
self, agentId: str, sessionId: str, author: str, message: str
):
prisma.agentmemory.create(
{
"author": author,
"message": message,
"agentId": agentId,
"session": sessionId,
}
)
def save_intermediate_steps(self, trace: dict) -> None:
prisma.agenttrace.create(
{
"userId": self.userId,
"agentId": self.id,
"data": trace,
}
)
def get_agent(self) -> Any:
pass
| [] |
2024-01-10 | bhctest123/superagent | app~lib~documents.py | import tempfile
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import pinecone
import requests # type: ignore
from decouple import config
from langchain.document_loaders import (
GitLoader,
PsychicLoader,
TextLoader,
UnstructuredMarkdownLoader,
WebBaseLoader,
YoutubeLoader,
)
from langchain.embeddings.openai import OpenAIEmbeddings
from llama_index.readers.schema.base import Document
from llama_index import download_loader
from app.lib.loaders.sitemap import SitemapLoader
from app.lib.parsers import CustomPDFPlumberLoader
from app.lib.splitters import TextSplitters
from app.lib.vectorstores.base import VectorStoreBase
NotionPageReader = download_loader("NotionPageReader")
valid_ingestion_types = [
"TXT",
"PDF",
"URL",
"YOUTUBE",
"MARKDOWN",
"FIRESTORE",
"PSYCHIC",
"GITHUB_REPOSITORY",
"WEBPAGE",
"STRIPE",
"AIRTABLE",
"SITEMAP",
"NOTION",
]
def chunkify(lst, size):
"""Divide a list into chunks of given size."""
return [lst[i : i + size] for i in range(0, len(lst), size)]
def upsert_document(
type: str,
document_id: str,
from_page: int,
to_page: int,
url: str | None = None,
content: str | None = None,
text_splitter: dict | None = None,
user_id: str | None = None,
authorization: dict | None = None,
metadata: dict | None = None,
) -> None:
"""Upserts documents to Pinecone index"""
INDEX_NAME = config("PINECONE_INDEX", "superagent")
pinecone.Index(INDEX_NAME)
embeddings = OpenAIEmbeddings()
if type == "STRIPE":
pass
if type == "NOTION":
integration_token: str = metadata["integration_token"]
page_ids: str = metadata["page_ids"]
loader = NotionPageReader(integration_token=integration_token)
documents = loader.load_langchain_documents(page_ids=page_ids.split(","))
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "AIRTABLE":
from langchain.document_loaders import AirtableLoader
api_key: str = metadata["api_key"]
base_id: str = metadata["base_id"]
table_id: str = metadata["table_id"]
loader = AirtableLoader(api_key, table_id, base_id)
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "SITEMAP":
filter_urls: str = metadata["filter_urls"]
loader = SitemapLoader(sitemap_url=url, filter_urls=filter_urls.split(","))
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
chunk_size = 100
chunks = chunkify(docs, chunk_size)
for chunk in chunks:
VectorStoreBase().get_database().from_documents(
chunk, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "WEBPAGE":
from llama_index import download_loader
RemoteDepthReader = download_loader("RemoteDepthReader")
depth = int(metadata["depth"])
loader = RemoteDepthReader(depth=depth)
documents = loader.load_data(url=url)
langchain_documents = [d.to_langchain_format() for d in documents]
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in langchain_documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
chunk_size = 100
chunks = chunkify(docs, chunk_size)
for chunk in chunks:
VectorStoreBase().get_database().from_documents(
chunk, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "TXT":
file_response = content
if content is None:
if url is None:
raise ValueError("URL must not be None when content is None.")
file_response = requests.get(url).text
if file_response is not None:
with NamedTemporaryFile(suffix=".txt", delete=True) as temp_file:
temp_file.write(file_response.encode())
temp_file.flush()
loader = TextLoader(file_path=temp_file.name)
documents = loader.load()
else:
raise ValueError("file_response must not be None.")
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "PDF":
if url is None:
raise ValueError("URL must not be None for PDF type.")
loader = CustomPDFPlumberLoader(
file_path=url, from_page=from_page, to_page=to_page
)
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "URL":
if url is None:
raise ValueError("URL must not be None for URL type.")
url_list = url.split(",")
loader = WebBaseLoader(url_list)
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id, "language": "en"})
or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "YOUTUBE":
if url is None:
raise ValueError("URL must not be None for YOUTUBE type.")
video_id = url.split("youtube.com/watch?v=")[-1]
loader = YoutubeLoader(video_id=video_id)
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "MARKDOWN":
if url is None:
raise ValueError("URL must not be None for MARKDOWN type.")
file_response = requests.get(url).text
if file_response:
with NamedTemporaryFile(suffix=".md", delete=True) as temp_file:
temp_file.write(file_response.encode())
temp_file.flush()
loader = UnstructuredMarkdownLoader(file_path=temp_file.name)
else:
raise ValueError("file_response must not be None.")
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "PSYCHIC":
if metadata is not None:
connector_id = metadata["connectorId"]
else:
connector_id = None # or some default value
loader = PsychicLoader(
api_key=config("PSYCHIC_API_KEY"),
account_id=user_id,
connector_id=connector_id,
)
documents = loader.load()
newDocuments = [
document.metadata.update({"namespace": document_id}) or document
for document in documents
]
docs = TextSplitters(newDocuments, text_splitter).document_splitter()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "FIRESTORE":
from google.cloud import firestore
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_info(
authorization
)
if authorization is None:
raise ValueError("Authorization must not be None for FIRESTORE type.")
db = firestore.Client(
credentials=credentials, project=authorization["project_id"]
)
documents = []
if metadata is None:
raise ValueError("Metadata must not be None for FIRESTORE type.")
col_ref = db.collection(metadata["collection"])
for doc in col_ref.stream():
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
documents.append(Document(text=doc_str))
VectorStoreBase().get_database().from_documents(
documents, embeddings, index_name=INDEX_NAME, namespace=document_id
)
if type == "GITHUB_REPOSITORY":
parsed_url = urlparse(url)
path_parts = parsed_url.path.split("/") # type: ignore
repo_name = path_parts[2]
with tempfile.TemporaryDirectory() as temp_dir:
repo_path = f"{temp_dir}/{repo_name}/" # type: ignore
loader = GitLoader(
clone_url=url,
repo_path=repo_path,
branch=metadata["branch"], # type: ignore
)
docs = loader.load_and_split()
VectorStoreBase().get_database().from_documents(
docs, embeddings, index_name=INDEX_NAME, namespace=document_id
)
| [] |
2024-01-10 | bhctest123/superagent | app~lib~vectorstores~pinecone.py | import pinecone
from decouple import config
from langchain.vectorstores.pinecone import Pinecone
pinecone.init(
api_key=config("PINECONE_API_KEY"), # find at app.pinecone.io
environment=config("PINECONE_ENVIRONMENT"), # next to api key in console
)
INDEX_NAME = config("PINECONE_INDEX", "superagent")
index = pinecone.Index(INDEX_NAME)
class PineconeVectorstore:
def __init__(self):
pass
def from_documents(self, docs, embeddings, index_name, namespace):
Pinecone.from_documents(
docs, embeddings, index_name=index_name, namespace=namespace
)
def from_existing_index(self, embeddings, namespace):
return Pinecone.from_existing_index(
INDEX_NAME, embedding=embeddings, namespace=namespace
)
def delete(self, namespace):
return index.delete(delete_all=True, namespace=namespace)
| [] |
2024-01-10 | csiv23/ZenGen | backend~gpt_api.py | import openai
from decouple import config
def generate_meditation_prompt(length_choice, focus, method):
openai.api_key = config("OPENAI_API_KEY")
model = "gpt-3.5-turbo"
if length_choice == "short":
description = "around 2-3 minutes"
interval_duration = "30 seconds"
intervals = "00:30, 01:00, 01:30, and so on"
elif length_choice == "medium":
description = "around 5 minutes"
interval_duration = "1 minute"
intervals = "01:00, 02:00, 03:00, and so on"
elif length_choice == "long":
description = "around 10 minutes"
interval_duration = "2 minutes"
intervals = "02:00, 04:00, 06:00, and so on"
else:
raise ValueError("Invalid length_choice provided")
if method == "none" or not method:
prompt_text = (
f"Concisely craft a meditation script for {description} focusing on {focus}. "
f"Provide instructions with intervals of {interval_duration}. "
f"Use timestamps that progress as {intervals}. "
f"Format: '00:00 - txt'. No square brackets or extraneous content or spaces between lines."
)
else:
prompt_text = (
f"Concisely craft a meditation script for {description} focusing on {focus} employing the {method} technique. "
f"Provide instructions with intervals of {interval_duration}. "
f"Use timestamps that progress as {intervals}. "
f"Format: '00:00 - txt'. Exclude any square brackets or superfluous content or spaces between lines."
)
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt_text},
],
)
return response.choices[0].message["content"].strip()
| [
"Concisely craft a meditation script for PLACEHOLDER focusing on PLACEHOLDER. Provide instructions with intervals of PLACEHOLDER. Use timestamps that progress as PLACEHOLDER. Format: '00:00 - txt'. No square brackets or extraneous content or spaces between lines.",
"Concisely craft a meditation script for PLACEHOLDER focusing on PLACEHOLDER employing the PLACEHOLDER technique. Provide instructions with intervals of PLACEHOLDER. Use timestamps that progress as PLACEHOLDER. Format: '00:00 - txt'. Exclude any square brackets or superfluous content or spaces between lines.",
"You are a helpful assistant."
] |
2024-01-10 | undertheseanlp/underthesea | underthesea~pipeline~classification~classification_prompt.py | import openai
import os
openai.api_key = os.environ.get('OPENAI_API_KEY')
if not openai.api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable.")
def classify(X, domain=None):
class_labels = "Thể thao, Pháp luật, Thế giới, Đời sống, Chính trị Xã hội, Vi tính, Khoa học, Văn hoá, Kinh doanh, Sức khỏe"
user_prompt = f"""Classify the following text:
{X}
Suggested labels: {class_labels}
Provide a single label as the output.
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # this may need to be changed based on available models
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_prompt}
],
temperature=0
)
output = response.choices[0].message['content'].strip()
return output
| [
"You are a helpful assistant.",
"Classify the following text:\n PLACEHOLDER\n Suggested labels: Thể thao, Pháp luật, Thế giới, Đời sống, Chính trị Xã hội, Vi tính, Khoa học, Văn hoá, Kinh doanh, Sức khỏe\n\n Provide a single label as the output.\n "
] |
2024-01-10 | murphybread/Chat2Image-Creator | chat2image.py | import gradio as gr
import openai
from diffusers import StableDiffusionPipeline
import torch
import uuid
openai.api_key = "[API key]"
model_id = "dreamlike-art/dreamlike-photoreal-2.0"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
def answer(state, state_chatbot, text):
if ("SDY" or "SDM") in text:
if "SDY" in text:
prompt = state[-1]["content"]
if "SDM" in text:
prompt = state[-2]["content"]
img = pipe(prompt).images[0]
print(prompt)
img_path = (
f'imgs/{prompt.replace(" ", "-").replace(",", "-").replace(".","")}.jpg'
)
img.save(img_path)
state_chatbot = state_chatbot + [(text, f"")]
print(img_path)
print(prompt.replace(" ", "").replace(",", "-").replace(".",""))
print(img_path)
print(text)
print(f'')
print(f"")
else:
messages = state + [{"role": "user", "content": text}]
res = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
msg = res["choices"][0]["message"]["content"]
new_state = [
{"role": "user", "content": text},
{"role": "assistant", "content": msg},
]
state = state + new_state
state_chatbot = state_chatbot + [(text, msg)]
print(state)
# print(res['usage'])
return state, state_chatbot, state_chatbot
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
state = gr.State([{"role": "system", "content": "You are a helpful assistant."}])
state_chatbot = gr.State([])
with gr.Row():
gr.HTML(
"""<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div>
<h1>Chat2Image Creator</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
YouTube <a href="https://www.youtube.com/@bbanghyong">빵형의 개발도상국</a>
</p>
</div>"""
)
with gr.Row():
chatbot = gr.Chatbot(elem_id="chatbot")
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="ChatGPT의 상상을 그림으로 그려보세요").style(
container=False
)
txt.submit(answer, [state, state_chatbot, txt], [state, state_chatbot, chatbot])
txt.submit(lambda: "", None, txt)
| [
"content",
"You are a helpful assistant."
] |
2024-01-10 | bioinformatics-ua/PEI-detector-models | openai-roberta-detector~test_model.py | from roberta_openai_detector import OpenAIBaseRobertaGPT2DetectorModel, OpenAILargeRobertaGPT2DetectorModel
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("model_checkpoint", type=str)
args = parser.parse_args()
print("Load Model")
if args.model_checkpoint=="large":
model = OpenAILargeRobertaGPT2DetectorModel()
else:
model = OpenAIBaseRobertaGPT2DetectorModel()
if args.model_checkpoint!="base":
print(f"WARNING: received {args.model_checkpoint} but only [base, large] are valid. Fallback to the default model (base)")
# the first is an abstract for the paper GAN Goodfellow et al that was generated by chatgpt
# the second is the true abstract of the same paper
batched_input_text = ["Generative Adversarial Networks (GANs) are a type of deep learning model that have gained significant attention in recent years for their ability to generate realistic data samples. GANs are composed of two neural networks, a generator and a discriminator, that are trained simultaneously in a competitive manner. The generator network is tasked with generating samples that can fool the discriminator network into thinking they are real, while the discriminator network is trained to distinguish between real and generated data.\nThis paper provides a comprehensive overview of GANs, including their architecture, training procedure, and applications. We discuss the theoretical foundations of GANs, including the concept of adversarial training and the objective functions used to optimize the generator and discriminator networks. We also review recent advancements in GANs, such as conditional GANs and progressive GANs, that have enabled the generation of high-quality images, videos, and other types of data.\nIn addition to discussing the technical aspects of GANs, we also explore their practical applications, including image synthesis, data augmentation, and style transfer. We highlight the potential of GANs for generating synthetic data for training machine learning models, and discuss their implications for privacy and security.\nOverall, this paper provides a comprehensive overview of Generative Adversarial Networks, and their potential for advancing the field of artificial intelligence.",
"We propose a new framework for estimating generative models via an adversarial process, in which we simultaneously train two models: a generative model G that captures the data distribution, and a discriminative model D that estimates the probability that a sample came from the training data rather than G. The training procedure for G is to maximize the probability of D making a mistake. This framework corresponds to a minimax two-player game. In the space of arbitrary functions G and D, a unique solution exists, with G recovering the training data distribution and D equal to 1/2 everywhere. In the case where G and D are defined by multilayer perceptrons, the entire system can be trained with backpropagation. There is no need for any Markov chains or unrolled approximate inference networks during either training or generation of samples. Experiments demonstrate the potential of the framework through qualitative and quantitative evaluation of the generated samples."]
probs = model(batched_input_text)
for i, text in enumerate(batched_input_text):
print(f"Input text: {text[:100]}...")
print(f"Probability of being computer-generated: {probs[i]}")
while True:
input_text = input("Input text: ")
print(f"Probability of being computer-generated: {model(input_text)}") | [] |
2024-01-10 | tdene/FastChat | fastchat~llm_judge~common.py | """
Common data structures and utilities.
"""
import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import get_conversation_template
# API setting constants
API_MAX_RETRY = 16
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
TIE_DELTA = 0.1
# Categories that need reference answers
NEED_REF_CATS = ["math", "reasoning", "coding", "arena-hard-200"]
# Extract scores from judgments
two_score_pattern = re.compile("\[\[(\d+\.?\d*),\s?(\d+\.?\d*)\]\]")
two_score_pattern_backup = re.compile("\[(\d+\.?\d*),\s?(\d+\.?\d*)\]")
one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
# Sampling temperature configs for
temperature_config = {
"writing": 0.7,
"roleplay": 0.7,
"extraction": 0.0,
"math": 0.0,
"coding": 0.0,
"reasoning": 0.0,
"stem": 0.1,
"humanities": 0.1,
"arena-hard-200": 0.0,
}
reverse_model_map = {
"model_1": "model_2",
"model_2": "model_1",
}
@dataclasses.dataclass
class Judge:
model_name: str
prompt_template: dict
ref_based: bool = False
multi_turn: bool = False
@dataclasses.dataclass
class MatchSingle:
question: dict
model: str
answer: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
@dataclasses.dataclass
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def load_questions(question_file: str, begin: Optional[int], end: Optional[int]):
"""Load questions from a file."""
questions = []
with open(question_file, "r") as ques_file:
for line in ques_file:
if line:
questions.append(json.loads(line))
questions = questions[begin:end]
return questions
def load_model_answers(answer_dir: str):
"""Load model answers.
The return value is a python dict of type:
Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
"""
filenames = glob.glob(os.path.join(answer_dir, "*.jsonl"))
filenames.sort()
model_answers = {}
for filename in filenames:
model_name = os.path.basename(filename)[:-6]
answer = {}
with open(filename) as fin:
for line in fin:
line = json.loads(line)
answer[line["question_id"]] = line
model_answers[model_name] = answer
return model_answers
def load_judge_prompts(prompt_file: str):
"""Load judge prompts.
The return value is a python dict of type:
Dict[judge_name: str -> dict]
"""
prompts = {}
with open(prompt_file) as fin:
for line in fin:
line = json.loads(line)
prompts[line["name"]] = line
return prompts
def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
if multi_turn:
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_1=answer["choices"][0]["turns"][0],
answer_2=answer["choices"][0]["turns"][1],
**kwargs,
)
else:
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer=answer["choices"][0]["turns"][0],
**kwargs,
)
rating = -1
system_prompt = judge.prompt_template["system_prompt"]
conv = get_conversation_template(model)
conv.set_system_message(system_prompt)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in ["gpt-3.5-turbo", "gpt-4"]:
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ["claude-v1", "claude-instant-v1"]:
judgment = chat_compeletion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[rating]]":
match = re.search(one_score_pattern, judgment)
if not match:
match = re.search(one_score_pattern_backup, judgment)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return rating, user_prompt, judgment
def play_a_match_single(match: MatchPair, output_file: str):
question, model, answer, judge, ref_answer, multi_turn = (
match.question,
match.model,
match.answer,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "single":
score, user_prompt, judgment = run_judge_single(
question, answer, judge, ref_answer, multi_turn=multi_turn
)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model": model,
"judge": (judge.model_name, judge.prompt_template["name"]),
"user_prompt": user_prompt,
"judgment": judgment,
"score": score,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model: {model}, "
f"score: {score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result
def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
if multi_turn:
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_a_1=answer_a["choices"][0]["turns"][0],
answer_b_1=answer_b["choices"][0]["turns"][0],
answer_a_2=answer_a["choices"][0]["turns"][1],
answer_b_2=answer_b["choices"][0]["turns"][1],
**kwargs,
)
else:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer_a=answer_a["choices"][0]["turns"][0],
answer_b=answer_b["choices"][0]["turns"][0],
**kwargs,
)
winner = "error"
conv = get_conversation_template(model)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in ["gpt-3.5-turbo", "gpt-4"]:
conv.set_system_message(system_prompt)
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ["claude-v1", "claude-instant-v1"]:
if system_prompt != "You are a helpful assistant.":
user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
conv.messages[0][1] = user_prompt
judgment = chat_compeletion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[A]]":
if "[[A]]" in judgment:
winner = "A"
elif "[[B]]" in judgment:
winner = "B"
elif "[[C]]" in judgment:
winner = "tie"
else:
winner = "error"
elif judge.prompt_template["output_format"] == "[[rating_a,rating_b]]":
match = re.search(two_score_pattern, judgment)
if not match:
match = re.search(two_score_pattern_backup, judgment)
if match:
scores = [ast.literal_eval(s.strip()) for s in match.groups()]
if abs(scores[0] - scores[1]) <= TIE_DELTA:
winner = "tie"
elif scores[0] > scores[1]:
winner = "A"
else:
winner = "B"
else:
winner = "error"
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return winner, user_prompt, judgment
def play_a_match_pair(match: MatchPair, output_file: str):
question, model_1, model_2, answer_1, answer_2, judge, ref_answer, multi_turn = (
match.question,
match.model_1,
match.model_2,
match.answer_1,
match.answer_2,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "pairwise":
g1_winner, g1_user_prompt, g1_judgment = run_judge_pair(
question, answer_1, answer_2, judge, ref_answer, multi_turn=multi_turn
)
g2_winner, g2_user_prompt, g2_judgment = run_judge_pair(
question, answer_2, answer_1, judge, ref_answer, multi_turn=multi_turn
)
g1_map = {"A": "model_1", "B": "model_2"}
g2_map = {"A": "model_2", "B": "model_1"}
g1_winner = g1_map.get(g1_winner, g1_winner)
g2_winner = g2_map.get(g2_winner, g2_winner)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": g1_winner,
"g2_winner": g2_winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": g1_user_prompt,
"g1_judgment": g1_judgment,
"g2_user_prompt": g2_user_prompt,
"g2_judgment": g2_judgment,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model_1: {model_1}, model_2: {model_2}, "
f"g1_winner: {g1_winner}, g2_winner: {g2_winner}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
elif judge.prompt_template["type"] == "single":
m1_score, m1_user_prompt, m1_judgment = run_judge_single(
question, answer_1, judge
)
m2_score, m2_user_prompt, m2_judgment = run_judge_single(
question, answer_2, judge
)
if abs(m1_score - m2_score) <= TIE_DELTA:
winner = "tie"
elif m1_score > m2_score:
winner = "model_1"
else:
winner = "model_2"
question_id = question["question_id"]
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": winner,
"g2_winner": winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": m1_user_prompt,
"g1_judgment": m1_judgment,
"g2_user_prompt": m2_user_prompt,
"g2_judgment": m2_judgment,
"m1_score": m1_score,
"m2_score": m2_score,
"tstamp": time.time(),
}
print(
f"question: {question_id}, model_1: {model_1}, model_2: {model_2}, "
f"winner: {winner}, m1_score: {m1_score}, m2_score: {m2_score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result
def chat_compeletion_openai(model, conv, temperature, max_tokens):
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
messages = conv.to_openai_api_messages()
response = openai.ChatCompletion.create(
model=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def chat_compeletion_openai_azure(model, conv, temperature, max_tokens):
openai.api_type = "azure"
openai.api_base = os.environ["AZURE_OPENAI_ENDPOINT"]
openai.api_key = os.environ["AZURE_OPENAI_KEY"]
openai.api_version = "2023-05-15"
if "azure-" in model:
model = model[6:]
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
messages = conv.to_openai_api_messages()
response = openai.ChatCompletion.create(
engine=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
prompt = conv.get_prompt()
response = c.completions.create(
model=model,
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_tokens,
temperature=temperature,
)
output = response.completion
break
except anthropic.APIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output.strip()
def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
from fastchat.serve.api_provider import init_palm_chat
assert model == "palm-2-chat-bison-001"
if chat_state is None:
chat_state = init_palm_chat("chat-bison@001")
parameters = {
"temperature": temperature,
"top_p": 0.8,
"top_k": 40,
"max_output_tokens": max_tokens,
}
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
response = chat_state.send_message(conv.messages[-2][1], **parameters)
output = response.text
break
except Exception as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return chat_state, output
def normalize_game_key_single(gamekey, result):
"""Make the model names sorted in a game key."""
qid, model_1, model_2 = gamekey
if model_1 < model_2:
return gamekey, result
else:
new_gamekey = (qid, model_2, model_1)
new_result = {
"winners": tuple(reverse_model_map.get(x, x) for x in result["winners"]),
"g1_judgment": result["g2_judgment"],
"g2_judgment": result["g1_judgment"],
}
return new_gamekey, new_result
def normalize_game_key_dict(judgment_dict):
"""Make the model names sorted in the game keys."""
ret = {}
for key, value in judgment_dict.items():
new_key, new_value = normalize_game_key_single(key, value)
ret[new_key] = new_value
return ret
def load_pairwise_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model_1, model_2 = obj["question_id"], obj["model_1"], obj["model_2"]
if judge not in judge_dict:
judge_dict[judge] = {}
if "winner" in obj:
winner = obj["winner"]
elif "g1_winner" in obj and "g2_winner" in obj:
g1_winner, g2_winner = obj["g1_winner"], obj["g2_winner"]
if g1_winner == g2_winner:
winner = g1_winner
else:
winner = "inconsistent"
else:
raise ValueError(f"Invalid keys: {list(obj.keys())}")
gamekey = (qid, model_1, model_2)
winners = (winner,)
judge_dict[judge][gamekey] = {
"winners": winners,
"g1_judgment": obj["g1_judgment"],
"g2_judgment": obj["g2_judgment"],
}
# Make the model names sorted in the game keys
normalized = {}
for judge, value in judge_dict.items():
normalized[judge] = normalize_game_key_dict(value)
return normalized
def load_single_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model = obj["question_id"], obj["model"]
if judge not in judge_dict:
judge_dict[judge] = {}
gamekey = (qid, model)
judge_dict[judge][gamekey] = {
"score": obj["score"],
"judgment": obj["judgment"],
}
return judge_dict
def resolve_pairwise_judgment_dict(
question, model_judgments_normal, model_judgments_math, multi_turn=False
):
"""Return the correct pairwise judge."""
if multi_turn:
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "pair-math-v1-multi-turn")]
return model_judgments_normal[("gpt-4", "pair-v2-multi-turn")]
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "pair-math-v1")]
else:
return model_judgments_normal[("gpt-4", "pair-v2")]
def resolve_single_judgment_dict(
question, model_judgments_normal, model_judgments_math, multi_turn=False
):
"""Return the correct single answer grading judge."""
if multi_turn:
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "single-math-v1-multi-turn")]
return model_judgments_normal[("gpt-4", "single-v1-multi-turn")]
if question["category"] in NEED_REF_CATS:
return model_judgments_math[("gpt-4", "single-math-v1")]
else:
return model_judgments_normal[("gpt-4", "single-v1")]
def get_pairwise_judge_explanation(gamekey, judgment_dict):
"""Get model judge explanation."""
try:
qid, model_1, model_2 = gamekey
if model_1 < model_2:
res = judgment_dict[gamekey]
g1_judgment, g2_judgment = res["g1_judgment"], res["g2_judgment"]
else:
new_gamekey = (qid, model_2, model_1)
res = judgment_dict[new_gamekey]
model_1, model_2 = model_1, model_2
g1_judgment, g2_judgment = res["g2_judgment"], res["g1_judgment"]
return (
f"**Game 1**. **A**: {model_1}, **B**: {model_2}\n\n"
f"**Judgment**: {g1_judgment}"
+ f"\n\n`--------------------------`\n\n"
+ f"**Game 2**. **A**: {model_2}, **B**: {model_1}\n\n"
f"**Judgment**: {g2_judgment}"
)
except KeyError:
return "N/A"
def get_single_judge_explanation(gamekey, judgment_dict):
"""Get model judge explanation."""
try:
qid, model = gamekey
res = judgment_dict[gamekey]
g1_judgment = res["judgment"]
g1_score = res["score"]
return (
f"**Game 1**. **A**: {model}, **Score**: {g1_score}\n\n"
f"**Judgment**: {g1_judgment}"
)
except KeyError:
return "N/A"
def check_data(questions, model_answers, ref_answers, models, judges):
# check model answers
for m in models:
assert m in model_answers, f"Missing model answer for {m}"
m_answer = model_answers[m]
for q in questions:
assert (
q["question_id"] in m_answer
), f"Missing model {m}'s answer to Question {q['question_id']}"
# check ref answers
for jg in judges.values():
if not jg.ref_based:
continue
for q in questions:
if q["category"] not in NEED_REF_CATS:
continue
assert (
q["question_id"] in ref_answers[jg.model_name]
), f"Missing reference answer to Question {q['question_id']} for judge {jg.model_name}"
def get_model_list(answer_dir):
file_paths = glob.glob(f"{answer_dir}/*.jsonl")
file_names = [os.path.splitext(os.path.basename(f))[0] for f in file_paths]
return file_names
| [
"[Instruction]\nPLACEHOLDER\n\nPLACEHOLDER",
"{}",
"turns",
"prompt_template",
"system_prompt"
] |
2024-01-10 | jxb3641/ungreenwash | ungreenwash_app.py | import streamlit as st
from streamlit import session_state as ss
import streamlit.components.v1 as components
from streamlit_option_menu import option_menu
import extra_streamlit_components as stx
import streamlit_toggle as tog
from streamlit_elements import elements, mui, html, dashboard, editor, lazy, sync
import pandas as pd
import json
import requests
import openai
import stock_api
from PIL import Image
import os
primary_color = "#91b36b"
background_color = "#FFFFFF"
secondary_background_color = "#F0F2F6"
companies = [
{
"name": "Pepsico",
"symbol": "PEP",
},
{
"name": "Fisker",
"symbol": "FSR",
},
{
"name": "General Mills",
"symbol": "GIS",
},
{
"name": "Ford",
"symbol": "F",
},
]
# load all json data from output_data folder
def load_json_data():
data = []
for filename in os.listdir("output_data"):
if filename.endswith(".json"):
with open("output_data/" + filename) as f:
temp_data = json.load(f)
# change qa_pairs list to map of category to qa_pair
qa_pairs = {}
for qa_pair in temp_data["qa_pairs"]:
if qa_pair["category"] not in qa_pairs:
qa_pairs[qa_pair["category"]] = []
# for each qa_pair, order the qa_pair["answers"] by confidence
qa_pair["answers"] = sorted(qa_pair["answers"], key=lambda x: x["confidence"], reverse=True)
qa_pairs[qa_pair["category"]].append(qa_pair)
temp_data["qa_pairs"] = qa_pairs
data.append(temp_data)
return data
data = load_json_data()
st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
available_companies = (company["name"] for company in companies)
### Streamlit app starts here
c1 = st.container()
c2 = st.container()
c3 = st.container()
c4 = st.container()
def get_styled_title(title):
return st.markdown(f'<p style="color:{primary_color};font-size:45px;border-radius:2%;font-weight:bold">{title}</p>', unsafe_allow_html=True)
def get_symbol_from_company(company):
for companyInfo in companies:
if companyInfo["name"] == company:
return companyInfo["symbol"]
return ""
def format_market_cap(market_cap):
if market_cap < 1000:
rounded = round(market_cap, 2)
return "$" + str(rounded) + " M"
elif market_cap < 1000000:
rounded = round(market_cap / 1000, 2)
return "$" + str(rounded) + " B"
else:
rounded = round(market_cap / 1000000, 2)
return "$" + str(rounded) + " T"
def get_investment_profile(company):
with st.expander(label="Investment Profile"):
company_info = stock_api.get_company_info(symbol=get_symbol_from_company(company))
# Write and format exchange, country, market capitalization, and industry
st.write("Exchange: " + company_info["exchange"])
st.write("Country: " + company_info["country"])
st.write("Market Capitalization: " + format_market_cap(company_info["marketCapitalization"]))
st.write("Industry: " + company_info["finnhubIndustry"])
def get_peers(company):
peers = stock_api.get_peers(symbol=get_symbol_from_company(company))
ret = []
for peer in peers:
st.write(peer)
company_info = stock_api.get_company_info(symbol=peer)
ret.append(company_info["name"])
return ret
def get_confidence_style(qa_pair, bg_color):
if "confidence" in qa_pair:
conf = qa_pair["confidence"]
else:
conf = 0.5
color = "rgb({},{},{},{})".format(145, 179, 107, conf)
return f'radial-gradient(farthest-side at 40% 50%, {color}, {bg_color})'
def get_no_confidence(bg_color):
color = "rgb({},{},{},{})".format(255, 0, 0, 0.5)
return f'radial-gradient(farthest-side at 40% 50%, {color}, {bg_color})'
# Share to social media
def compose_share_text():
params = st.experimental_get_query_params()
if "companies" in params:
# Format a returned statement like "Here's a sustainability comparison of Apple, Boeing, and Bayer"
companies = params["companies"]
if len(companies) == 1:
return "Here's a sustainability evaluation of " + companies[0] + ":"
elif len(companies) == 2:
return "Here's a sustainability comparison of " + companies[0] + " and " + companies[1] + ":"
else:
return "Here's a sustainability comparison of " + ", ".join(companies[:-1]) + ", and " + companies[-1]
else:
return "Check out this website to see how sustainable your favourite companies are!"
def compose_curr_url():
domain = "akhilgupta1093-openai-hackathon-scope3-ungreenwash-app-v8ncns.streamlit.app/"
queryParams = []
if "companies" in ss:
for c in ss.companies:
cFormatted = "+".join(c.split(" "))
queryParams.append(f'companies={cFormatted}')
queryStr = ""
if len(queryParams) > 0:
queryStr = "?" + "&".join(queryParams)
# using domain and query params (map of query params), compose the current url
return "https://" + domain + queryStr
def get_share_text():
return """
<div style="display:flex;margin:0px">
<div style="margin-top:12px;margin-right:10px">
<a class="twitter-share-button"
href="https://twitter.com/intent/tweet?text={text}"
data-url="{url}">
Tweet</a>
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
</div>
<div style="margin-top:12px;margin-right:10px">
<a data-size="large" data-url="{url}"/>
<script src="https://platform.linkedin.com/in.js" type="text/javascript"> lang: en_US</script>
<script type="IN/Share" data-url="{url}"></script>
</div>
</div>
""".format(text=compose_share_text(), url=compose_curr_url())
def get_share_elements():
components.html(get_share_text())
# Mock function for now, will be an api call.
def get_company_info(company):
for companyInfo in data:
if companyInfo["name"] == company:
return companyInfo
return "{}"
def handle_company_select():
st.experimental_set_query_params(companies=ss.companies)
for company in ss.companies:
if company not in ss:
ss[company] = get_company_info(company)
# Get company info based on query params
params = st.experimental_get_query_params()
if "companies" in params:
param_companies = params["companies"]
ss.companies = param_companies
for company in param_companies:
if company not in ss:
ss[company] = get_company_info(company)
with st.sidebar:
page = option_menu(
menu_title=None,
options=["Company Lookup", "Trust & Data", "About Us"],
icons=["house", "clipboard-data", "person-circle"],
)
if page == "Company Lookup":
with c1:
get_styled_title("Company Lookup")
get_share_elements()
with c2:
title_column_1, title_column_2 = st.columns([8, 1])
with title_column_1:
st.multiselect("", available_companies, key="companies", on_change=handle_company_select)
with title_column_2:
st.markdown('#')
if "compare" in ss:
default_val = ss.compare
else:
default_val = False
tog.st_toggle_switch(label="Compare",
key="compare",
default_value=default_val,
label_after = False,
inactive_color = '#D3D3D3',
active_color=primary_color,
track_color=primary_color,
)
st.markdown('#')
with c3:
params = st.experimental_get_query_params()
param_companies = params["companies"] if "companies" in params else []
if len(param_companies) > 0:
# comparison mode
if ss.compare:
with elements("dashboard"):
if len(param_companies) > 0:
if "layout" not in ss:
ss.layout = []
for i, company in enumerate(param_companies):
# check whether company is already in layout
exists = False
for l in ss.layout:
if l["i"] == company:
exists = True
break
# if not, add it
if not exists:
# if it's an odd index, add it to the right
if i % 2 != 0:
x = 6
else:
x = 0
ss.layout.append(dashboard.Item(company, x, 0, 5, 4, allowOverlap=True))
with dashboard.Grid(ss.layout):
for company in param_companies:
company_info = ss[company]
with mui.Card(key=company, sx={"display": "flex", "flexDirection": "column"}, raised=True):
mui.CardHeader(title=company, subheader=f'Disclosure Score: {company_info["score"]}', sx={"color": "white", "background-color": primary_color, "padding": "5px 15px 5px 15px", "borderBottom": 2, "borderColor": "divider"})
with mui.CardContent(sx={"flex": 1, "minHeight": 0, "background-color": secondary_background_color}):
# with mui.List():
# for qa_pair in company_info["qa_pairs"]:
# with mui.ListItem(sx={"background-image": get_confidence_style(qa_pair, secondary_background_color)}):
# mui.ListItemText(primary= f'Q: {qa_pair["question"]}', secondary= f'A: {qa_pair["answer"]}', sx={"padding": "0px 0px 0px 0px"})
for category, qa_pairs in company_info["qa_pairs"].items():
expanded = category == "General"
with mui.Accordion(defaultExpanded=expanded):
with mui.AccordionSummary(expandIcon=mui.icon.ExpandMore):
mui.Typography(category)
with mui.AccordionDetails():
with mui.List():
for qa_pair in qa_pairs:
with mui.ListItem(alignItems="flex-start", sx={"padding": "0px 0px 0px 0px"}):
mui.ListItemText(primary= f'Q: {qa_pair["question"]}', sx={"padding": "0px 0px 0px 0px"})
if len(qa_pair["answers"]) == 0:
mui.ListItemText(secondary= f'A: No answer found', sx={"padding": "0px 0px 0px 0px", "background-image": get_no_confidence("white")})
for answer in qa_pair["answers"]:
mui.ListItemText(secondary= f'A: {answer["answer"]}', sx={"padding": "0px 0px 0px 0px", "background-image": get_confidence_style(answer, "white")})
# with mui.CardActions(sx={"color": "white", "padding": "5px 15px 5px 15px", "background-color": "#ff4b4b", "borderTop": 2, "borderColor": "divider"}):
# mui.Button("Learn More", size="small", sx={"color": "white"})
# tabular mode
else:
if "prev_company" in ss and ss.prev_company in param_companies:
df = ss.prev_company
else:
df = param_companies[0]
tabs = st.tabs(param_companies)
for i, tab in enumerate(tabs):
with tab:
curr_company = param_companies[i]
company_info = ss[curr_company]
col1, col2, _col, _col, col3 = st.columns([1, 2, 1, 1, 1])
with col1:
get_styled_title(company_info["name"])
#col1.subheader(company_info["name"])
with col2:
get_investment_profile(curr_company)
col3.metric(label="Disclosure Score", value=company_info["score"])
for category, qa_pairs in company_info["qa_pairs"].items():
expanded = category == "General"
with st.expander(category, expanded=expanded):
for qa_pair in qa_pairs:
st.write(f'**Q:** {qa_pair["question"]}')
if len(qa_pair["answers"]) == 0:
answer_html = """
<div style="background-image: {}; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
<div>{}</div>
</div>
""".format(get_no_confidence("white"), "No answer found")
st.markdown(answer_html, unsafe_allow_html=True)
for answer in qa_pair["answers"]:
answer_html = """
<div style="background-image: {}; padding: 10px; border-radius: 5px; margin-bottom: 10px;">
<div>{}</div>
</div>
""".format(get_confidence_style(answer, "white"), answer["answer"])
st.markdown(answer_html, unsafe_allow_html=True)
# qa_html = """
# <div style="margin:10px;background-image:{}">
# <div style="font-weight: bold">Q: {}</div>
# <div>A: {}</div>
# </div>
# """.format(get_confidence_style(qa_pair, background_color), qa_pair["question"], qa_pair["answer"])
# st.markdown(qa_html, unsafe_allow_html=True)
# ss.curr_company = stx.tab_bar(
# data=(stx.TabBarItemData(id=company, title=company, description="") for company in param_companies),
# default=df,
# )
# if ss.curr_company in ss:
# col1, _col, col3 = st.columns([1, 3, 1])
# company_info = ss[ss.curr_company]
# col1.subheader(company_info["name"])
# col3.metric(label="Disclosure Score", value=company_info["score"])
# for qa_pair in company_info["qa_pairs"]:
# qa_html = """
# <div style="margin:10px;background-image:{}">
# <div style="font-weight: bold">Q: {}</div>
# <div>A: {}</div>
# </div>
# """.format(get_confidence_style(qa_pair, background_color), qa_pair["question"], qa_pair["answer"])
# st.markdown(qa_html, unsafe_allow_html=True)
# st.markdown('#')
# ss.prev_company = ss.curr_company
# get_investment_profile(ss.curr_company)
# else:
# st.write("N/A")
with c4:
for i in range(6):
st.markdown("#")
default_trust = """
We want to ensure that you view this tool as a trustworthy source of reliable information on firm-level, climate-relevant information. In particular, it will guide you through the maze of corporate climate data to aid your decision-making. We base the information that is presented to you on credible data disclosed by the firm itself.
#### Method
At a high-level, the information is compiled as follows:
1. The tool searches through one of three firm disclosures to identify passages that match your search query.
2. Subsequently, you will be presented with either the passages that best match your search query or a high-quality summary of that information.
#### Sources
We ensure credibility by relying on the following three data sources:
1. A firm's 10-K filing, filed with the SEC EDGAR database.
2. The most recent earnings conference call transcript which features managements' discussion of quarterly results, alongside a Q&A session with sell-side analysts.
3. A firm's voluntarily disclosed sustainability report.
#### Why is this important?
Importantly, trawling through the various data sources individually is a tremendous challenge for investors. The different sources are not only heterogeneous in layout and structure, but also 100+ pages long, transcripts of hour long conversations, and hard to navigate and parse as a regular investor seeking aid in investment decision-making. We aim to alleviate this issue by providing answers to a curated set of climate-relevant questions.
"""
# Map of company name to references section
company_references = {
"Ford": [
"Ford Motor Company 10-K filing (0000037996-22-000013): https://www.sec.gov/ix?doc=/Archives/edgar/data/37996/000003799622000013/f-20211231.htm",
"Ford Motor Company Earnings Conference Call (2022-02-04): https://seekingalpha.com/article/4484425-ford-motor-company-2021-q4-results-earnings-call-presentation",
"Ford Motor Company Integrated Sustainabiltiy Report (2022): https://corporate.ford.com/content/dam/corporate/us/en-us/documents/reports/integrated-sustainability-and-financial-report-2022.pdf",
],
"General Mills": [
"General Mills, Inc. 10-K filing (0001193125-21-204830): https://www.sec.gov/ix?doc=/Archives/edgar/data/40704/000119312521204830/d184854d10k.htm",
"General Mills, Inc. Earnings Conference Call (2022-03-23): https://seekingalpha.com/article/4497316-general-mills-inc-gis-ceo-jeff-harmening-on-q3-2022-results-earnings-call-transcript",
"General Mills, Inc. Sustainabiltiy Report (2022): https://globalresponsibility.generalmills.com/images/General_Mills-Global_Responsibility_2022.pdf",
],
"Fisker": [
"Fisker Inc. 10-K filing (0001720990-22-000010): https://www.sec.gov/ix?doc=/Archives/edgar/data/1720990/000172099022000010/fsr-20211231.htm",
"Fisker Inc. Earnings Conference Call (2022-02-16): https://seekingalpha.com/article/4487648-fisker-inc-fsr-ceo-henrik-fisker-on-q4-2021-results-earnings-call-transcript",
"Fisker Inc. Company ESG Impact Report (2021): https://assets.ctfassets.net/cghen8gr8e1n/2sBPf0jjfZa20R8Ycwar4Q/ff96bb41c1348978af542610f3f7a88e/2021_Fisker_ESG_Report.pdf",
],
"Pepsico": [
"PepsiCo, Inc. 10-K filing (0000077476-22-000010): https://www.sec.gov/ix?doc=/Archives/edgar/data/77476/000007747622000010/pep-20211225.htm",
"PepsiCo, Inc. Earnings Conference Call (2022-02-10): https://seekingalpha.com/article/4485846-pepsico-inc-pep-ceo-ramon-laguarta-on-q4-2021-results-earnings-call-transcript",
"PepsiCo, Inc. SASB Index (2021): https://www.pepsico.com/docs/default-source/sustainability-and-esg-topics/2021-sasb-index.pdf",
],
}
if page == "Trust & Data":
get_styled_title("Notes on Data Use and Trust")
st.markdown(default_trust)
params = st.experimental_get_query_params()
param_companies = params["companies"] if "companies" in params else []
if len(param_companies) > 0:
# Create a separating line
st.markdown('---')
st.markdown("### References")
st.markdown("The presented information can be looked up in the following documents:")
for company in param_companies:
if company in company_references:
st.markdown("#### " + company)
for ref in company_references[company]:
st.markdown(ref)
if page == "About Us":
get_styled_title("About Us")
# display all images in the "pictures" folder
# display them in 3 columns
col1, col2, col3 = st.columns(3)
pics = os.listdir("pictures")
# reverse the list so Akhil isn't first, lol
pics.reverse()
for i, img in enumerate(pics):
# get image name without .jpeg
person = img.split(".")[0].upper()
if i % 3 == 0:
col1.image("pictures/" + img, caption=person)
elif i % 3 == 1:
col2.image("pictures/" + img, caption=person)
else:
col3.image("pictures/" + img, caption=person)
| [] |
2024-01-10 | jxb3641/ungreenwash | HaystackUtils.py | from abc import abstractmethod
from haystack import Pipeline
from haystack.nodes import reader, retriever
from haystack.nodes.base import BaseComponent
from typing import Optional, List
from pathlib import Path
import pandas as pd
from OpenAIUtils import query_to_summaries as query_to_summaries_openai, file_to_embeddings as file_to_embeddings_openai, produce_prompt as produce_prompt_openai
from CohereUtils import query_to_summaries as query_to_summaries_cohere, file_to_embeddings as file_to_embeddings_cohere, produce_prompt as produce_prompt_cohere
class Embed(BaseComponent):
def __init__(self, module_choice = "Cohere"):
self.module_choice = module_choice
outgoing_edges = 1
def get_embeddings(self, queries: List[str], batch_size: Optional[int] = None, filenames: List[str] = [], use_cache: bool = False):
for fname in filenames:
if self.module_choice == "OpenAI":
file_to_embeddings_openai(Path(fname), use_cache)
elif self.module_choice == "Cohere":
file_to_embeddings_cohere(Path(fname), use_cache)
else:
raise ValueError("Invalid module choice for Embed")
return {}
def run(self, query: str, filenames: List[str] = [], recalc_embeddings: bool = True): # type: ignore
output = {}
if recalc_embeddings:
output = self.get_embeddings([query], filenames=filenames)
return output, "output_1"
def run_batch(self, queries: List[str], batch_size: Optional[int] = None, filenames: List[str] = [], recalc_embeddings: bool = True): # type: ignore
output = {}
if recalc_embeddings:
output = self.get_embeddings(queries, batch_size, filenames)
return output, "output_1"
class Complete(BaseComponent):
def __init__(self, module_choice = "OpenAI"):
self.module_choice = module_choice
outgoing_edges = 1
def get_summaries(self, queries: List[str], filenames: List[str], temperature: Optional[float] = 0.5, print_responses: bool = False):
if self.module_choice == "OpenAI":
output = {"completions": query_to_summaries_openai(filenames, queries, temperature, print_responses)}
elif self.module_choice == "Cohere":
output = {"completions": query_to_summaries_cohere(filenames, queries, temperature, print_responses)}
else:
raise ValueError("Invalid module choice for Complete")
return output
def run(self, query: str, filenames: List[str] = [], temperature: Optional[float] = 0.5): # type: ignore
output = self.get_summaries([query], filenames, temperature)
return output, "output_1"
def run_batch(self, queries: List[str], batch_size: Optional[int] = None, filenames: List[str] = [], temperature: Optional[float] = 0.5): # type: ignore
output = self.get_summaries(queries, filenames, temperature)
return output, "output_1"
def run_qap(embeddings_choice, completion_choice, temperature, relevant_questions, filenames, recalc_embeddings):
p = Pipeline()
embed = Embed(embeddings_choice)
complete = Complete(completion_choice)
p.add_node(component=embed, name="Embed", inputs=["Query"])
p.add_node(component=complete, name="Complete", inputs=["Embed"])
res = p.run_batch(queries=relevant_questions, params={"filenames": filenames, "temperature": temperature, "recalc_embeddings": recalc_embeddings})
completions = res["completions"]
completions.to_pickle("./risks_responses.pkl")
return completions | [] |
2024-01-10 | jxb3641/ungreenwash | EDGARFilingUtils.py | """Utilities for 10K filings."""
import streamlit as st
import glob
import re
import random
import json
import pandas as pd
import nltk
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
from pathlib import Path
import openai
openai.api_key = st.secrets["openai_api_key"]
TICKER_TO_COMPANY_NAME = {
"F": "Ford Motor Company",
"GIS": "General Mills, Inc.",
"PEP": "PepsiCo, Inc.",
"FSR": "Fisker, Inc."
}
QUESTION_TO_CATEGORY = {
"What does this company do?": "General",
"What are the risks this company faces?": "General",
"What are the environmental risks this company faces?": "General",
"What are the climate-related risks and opportunities the organization has identified over the short, medium and long term?":"General",
"Environmental laws, environmental risks, environmental regulations": "General"
}
ROOT_DATA_DIR = Path("data/ind_lists")
#TODO: Refactor this into two functions:
# one that takes in a submission id, and creates a dict of item1/mda sources, and texts
# another that takes in the directory, and outputs all submission ids
# random sample filings should be done separately, or in a third function.
def get_all_submission_ids(datadir=ROOT_DATA_DIR/'4_food_bev'/'10k'):
"""get all the submission IDs of 10-K .txts.
Assumes filing texts are of form (submission-id).txt, (submission-id)_item1.txt, (submission-id)_mda.txt
Args:
datadir (str): Where to look for the text files.
Returns:
(tuple(str)): Tuple of unique submission IDs.
"""
tenk_all_filingnames = sorted(set([re.search("([A-Z]+)_\d+-\d+-\d+",str(fp)).group() for fp in datadir.glob("*.txt")]))
return tuple(tenk_all_filingnames)
def get_text_from_files_for_submission_id(filename, datadir=ROOT_DATA_DIR/'4_food_bev'/'10k'):
"""Read in the .txt files for submission_id, located in datadir.
Args:
filename (str): Submission id of the filing.
datadir (str): filepath where all 3 files (.txt, item1.txt, mda.txt) for the submission id should be located.
Returns:
dict: Dictionary containing the submission id, filepath of the .txt, item1.txt and mda.txt, files,
and their texts read in as strings with keys full_txt, item1_txt, mda_txt.
"""
text_dict = {}
for fp in datadir.glob(f"{filename}*.txt"):
if re.search("item1.txt",str(fp)):
text_dict["item1"] = str(fp)
text_dict["item1_txt"] = fp.read_text(encoding="utf-8").replace("$","\$")
elif re.search("mda.txt",str(fp)):
text_dict["mda"] = str(fp)
text_dict["mda_txt"] = fp.read_text(encoding="utf-8").replace("$","\$")
else:
text_dict["fullFiling"] = str(fp)
text_dict["full_txt"] = fp.read_text(encoding="utf-8").replace("$","\$")
return text_dict
def get_random_sample_filings(number_filings=50,seed=None):
"""For a random sample of filings, parse their names, MDA, and Item1 .txt files
and their text.
Args:
seed (int, optional): Seed for random instance. Defaults to None.
number_filings (int, optional): Number of filings to get the MDA, Item1 and full .txt files. Defaults to 50.
Returns:
pandas.DataFrame: DF of filing names, the filepaths of the Full, MDA and Item1 text, and their parsed in text.
"""
# Helper function to read in file texts as strings
def get_text(fp):
with open(fp) as f:
text = f.read()
return text
random_inst = random.Random(seed) if seed else random.Random()
# All .txt files in the data directory have name {digits}-{digits}-{digits} as their prefix, and
# one of _mda.txt, _item1.txt, or just .txt as suffixes. The RE below just captures the common prefixes.
tenk_all_filingnames = [re.search("\d+-\d+-\d+",fp).group() for fp in glob.glob("data/10K/q1/*.txt")]
# Pull number_filings (fullFiling, MDA, and Item1) filename triples
txt_by_filing = {}
for filing_num in random_inst.sample(tenk_all_filingnames,number_filings):
txt_by_filing[filing_num] = {}
for fp in glob.glob(f"data/10K/q1/{filing_num}*.txt"): # Find the 3 files with the common filing prefix
if re.search("item1.txt",fp):
txt_by_filing[filing_num]["item1"] = fp
elif re.search("mda.txt",fp):
txt_by_filing[filing_num]["mda"] = fp
else:
txt_by_filing[filing_num]["fullFiling"] = fp
# DF indexed by filing prefix, with columns "item1", "mda", "fullFiling".
# Add in 3 more columns to contain the text strings.
df = pd.read_json(json.dumps(txt_by_filing),orient='index')
for col in df.columns:
df[col+"_txt"] = df[col].apply(lambda x: get_text(x))
return df
def split_text(text,form_type=None):
"""split text into workable chunks. Filter out header and footer.
Args:
text (str): original text.
form_type (str, optional): flag to customize splitting for different form types. Implements rolling text chunking with 5 sentences for 10KItemsOnly texts.
Default None.
Returns:
list(str): list of text chunks.
"""
if form_type == "10KItemsOnly":
# Implement super basic chunking (as big as possible, with a sliding window of 3 sentences)
split_text = []
sentences = nltk.sent_tokenize(text.replace(";",".").replace("\u2022",""))
chunk = ""
chunk_index = 0
previous_sentences_token_len = 0
for sent_ind, sentence in enumerate(sentences):
#Collect chunks with up to 1800 tokens.
if len(tokenizer.encode(chunk)) <= 700-previous_sentences_token_len:
chunk += f" {sentence}"
else: #Chunk token limit reached.
chunk = chunk.strip() #Get rid of leading/trailing whitespace
chunk_index += 1
if chunk_index %10 == 0:
print(chunk_index,"chunks processed.")
if chunk_index > 1: #For any chunks after the first
# Add in up to last N sentences of last chunk to this one, making sure we dont wrap around to negative indices
if sent_ind -4 >= 0:
previous_sentences = " ".join([sentences[sent_ind-4],sentences[sent_ind-3], sentences[sent_ind-2], sentences[sent_ind-1]])
elif sent_ind -3 >= 0:
previous_sentences= " ".join([sentences[sent_ind-3], sentences[sent_ind-2], sentences[sent_ind-1]])
elif sent_ind -2 >= 0:
previous_sentences= " ".join([sentences[sent_ind-2], sentences[sent_ind-1]])
elif sent_ind -1 >= 0:
previous_sentences= " ".join([sentences[sent_ind-1]])
else:
previous_sentences = ""
previous_sentences_token_len = len(tokenizer.encode(previous_sentences))
#print("\n\nBEFORE:\n\n", chunk)
#print("\n\n LENGTH:",len(tokenizer.encode(chunk)))
#print()
chunk = " ".join([previous_sentences,chunk])
#print(f"AFTER INCORPORATING SENTENCES BEFORE {sent_ind}:\n\n", chunk)
#print("\n\n LENGTH:",len(tokenizer.encode(chunk)))
#print()
if len(tokenizer.encode(chunk)) <2048:
split_text.append(chunk)
# Add in the current sentence.
chunk = sentence
return split_text
split_text = text.split("\n\n")
start_index = 0 # Find the "Washington, DC" chunk, we will throw out all other chunks before this
end_index = -1 # Find the the "Item 15" chunk, we will throw out all chunks after this
for i, chunk in enumerate(split_text):
if re.search("washington,",chunk.lower()):
start_index = i
# elif re.search(r"item 15\.",chunk.lower()):
# end_index = i
return split_text[start_index+1:end_index]
def filter_chunks(split_text):
"""Filter split chunks."""
filtered_split = []
#Remove chunks less than some hard limit in length
for chunk in split_text:
if len(chunk.split())>=15:
filtered_split.append(chunk)
return filtered_split
def does_text_have_climate_keywords(text):
"""Checks if any of a preset list of keywords is in the text.
Args:
text (str): text to search for keywords.
Returns:
A dict of sentences featuring keywords, a dict of keyword counts in the text.
"""
keywords = [
"energy",
"electric vehicle",
"climate change",
"wind (power|energy)",
"greenhouse gas",
"solar",
"\bair\b",
"carbon",
"emission",
"extreme weather",
"carbon dioxide",
"battery",
"pollution",
"environment",
"clean power",
"onshore",
"coastal area",
"footprint",
"charge station",
"eco friendly",
"sustainability",
"energy reform",
"renewable",
]
keyword_contexts = {keyword : [] for keyword in keywords}
keyword_counts = {keyword : 0 for keyword in keywords}
# pre-process text
split_text = text.lower().split(". ")
# Count occurrences for each keyword in the text.
for keyword in keywords:
for sentence in split_text:
if re.search(keyword,sentence):
keyword_contexts[keyword].append(sentence)
keyword_counts[keyword] = keyword_counts[keyword] + len(re.findall(keyword,sentence))
return keyword_contexts, keyword_counts
def concat_keyword_sentences(keyword_sentence_map,max_str_length=900):
"""Take in a dictionary of keyword to sentences, and concatenate them up to max_str_length.
Args:
keyword_sentence_map (dict): dictionary of sentences by keyword.
max_str_length (int, optional): maximum length of the concated string. Defaults to 900.
Returns:
str: concatenated string of keyword sentences, of length approximately max_str_length characters.
"""
keyword_sentence_list = [ sent for sentlist in keyword_sentence_map.values() for sent in sentlist]
concat_str = ""
while len(concat_str)<max_str_length:
for keyword_sentence in keyword_sentence_list:
concat_str += keyword_sentence+"\n\n"
return concat_str
def get_chunks_from_file(filename):
import csv
chunks = []
with open(filename) as f:
#skip header
reader = csv.reader(f, delimiter=';')
next(reader)
for row in reader:
if row[3] == "Firm":
if row[4] and len(row[4]) > 75:
if len(row[4]) > 200:
sentences = nltk.sent_tokenize(row[4])
#create chunks of 8 sentences
for i in range(0, len(sentences), 8):
chunk = "".join(sentences[i:i+8])
if chunk:
chunks.append(chunk)
else:
chunks.append(row[4])
return chunks
def get_chunks_from_esg_report(filename):
with open(filename) as f:
text = f.read()
chunks = []
for line in text.split("\n\n"):
line = line.replace(' \n', ' ').replace('.\n', '.').replace('\r', '')
print(line)
if line and len(line) > 50:
chunks.append(line)
return chunks
def get_big_chunks_from_esg_report(filename):
with open(filename) as f:
text = f.read()
text = text.replace(' \n', ' ').replace('.\n', '.').replace('\r', '')
sentences = nltk.sent_tokenize(text)
#create chunks of 100 sentences
chunks = []
for i in range(0, len(sentences), 20):
chunk = "".join(sentences[i:i+20])
if chunk:
chunks.append(chunk)
return chunks
if __name__ == "__main__":
from OpenAIUtils import file_to_embeddings, questions_to_answers
filename = "/Users/colemanhindes/hackathon/OpenAI-hackathon-Scope3/data/ind_lists/4_food_bev/sustainability_reports/gis_2022.txt"
text_questions = '''What does this company do?
What are the risks this company faces?
What are the environmental risks this company faces?
"What are the climate-related risks and opportunities the organization has identified over the short, medium, and long term?
What is the impact of climate-related risks and opportunities on the organization’s business, strategy, and financial planning.
What are the organization’s processes for identifying and assessing climate-related risks?
What are extreme climate events the firm is exposed to?
What are lasting changes in the climate the firm is exposed to?
What are climate-related regulations, rules, bills or standards that the entity must adhere to?
What are new technologies that the entity is considering or requiring to decarbonize its business model?
What are the reputational risks or concerns that the firm attributes to climate- or corporate socical responsibility-related issues?
Does the firm rely on or employ any kind of green financing?
Has the firm set up a committee (or other governance mechanism) that is concerned with climate- and ESG-related issues?
Has the firm set up a committee (or other governance mechanism) that is concerned with climate- and ESG-related issues?
What does the company disclose about its energy mix?
What is the percentage of energy or electricity used that is from renewable sources?
What are the company's emissions targets and have they been validated as credible and substantial?
Does this company's emissions targets include Scope 3 (indirect) emissions?
Does a discussion of long-term and short-term strategy or plan to manage Scope 1 emissions, emissions reduction targets, and an analysis of performance toward those targets exist?
What does the company say about its impacts on biodiversity?
What does the company disclose about the waste it generates and what is it doing to reduce waste?
What are key aspects of "Product Safety" that the firm discusses in its sustainability report?
What are key aspects of "Labor Practices" that the firm discusses in its sustainability report?
What are key aspects of "Fuel Economy & Use-phase Emissions" that the firm discusses in its sustainability report?
What are key aspects of "Material Sourcing" that the firm discusses in its sustainability report?
What are key aspects of "Materials Efficiency & Recycling" that the firm discusses in its sustainability report?
What are key aspects of "Water Management" that the firm discusses in its sustainability report?
What are key aspects of "Food Safety" that the firm discusses in its sustainability report?
What are key aspects of "Health & Nutrition" that the firm discusses in its sustainability report?
What are key aspects of "Ingridient Sourcing" that the firm discusses in its sustainability report?
What is this company's strategy to reduce the environmental impact of packaging throughout its lifecycle?
What is the company doing about the environmental and social impacts of their ingredient supply chain?'''
questions = text_questions.split("\n")
#Only show matches above this level
match_threshold = 0.35
chunks = get_big_chunks_from_esg_report(filename)
for chunk in chunks:
if not chunk:
print("empty chunk")
embeddings = file_to_embeddings(Path(filename), chunks)
answers = questions_to_answers(questions, embeddings, min_similarity=match_threshold)
for question, answer in zip(questions, answers):
prompt = f"If the answer to the question is in the excerpt below, answer it or else say N/A\n START CONTEXT\n{answer}\nEND CONTEXT \nAnswer the following question: {question}\nAnswer:"
response = openai.Completion.create(
model='text-davinci-002',
prompt=prompt,
max_tokens=200,
temperature=0.7,
stop=["\n","."]
).choices[0].text
print(f"Question: {question}")
print(f"Answer: {response}")
| [
"If the answer to the question is in the excerpt below, answer it or else say N/A\n START CONTEXT\nPLACEHOLDER\nEND CONTEXT \nAnswer the following question: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | jxb3641/ungreenwash | CohereUtils.py | import re
import pandas as pd
import streamlit as st
from glob import glob
from openai.embeddings_utils import cosine_similarity
from pathlib import Path
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
import cohere
co = cohere.Client(st.secrets["cohere_api_key"])
from EDGARFilingUtils import ROOT_DATA_DIR, filter_chunks, split_text, TICKER_TO_COMPANY_NAME, QUESTION_TO_CATEGORY
EMBEDDING_CACHE_DIR = ROOT_DATA_DIR / "embedding_cache"
SECTION_DELIM_PATTERN = re.compile("####.+") # for pooled 10k files
def get_embedding(text):
"""Given a string of long-form text, produce the embedding using the corresponding text-search-doc API endpoint.
Args:
text (str): String to produce an embedding for.
model_family (str, optional): OpenAI model family to use text-search-doc for. Can be any of "ada", "babbage", "curie", "davinci".
Defaults to "babbage".
Returns:
np.ndarray: Vector representation of the text.
"""
embedding = None
try:
response = co.embed(model='large', texts=[text])
embedding = response.embeddings[0]
except Exception as e:
raise e
return embedding
def file_to_embeddings(filepath, text_chunks = None, use_cache=True):
"""Given a filepath, produce a DataFrame containing the filtered text chunks, with their embeddings and number of tokens,
if the DataFrame isn't cached. If it saved to disk, just load the DataFrame.
Args:
filename (Path): Pathlib.Path repr of the filepath of the file to be chunked and embedded.
text_chunks (list(str), optional): list of chunked text, if already parsed.
use_cache (boolean,optional): Whether to load the DataFrame from disk or produce a new one and overwrite.
Returns:
DataFrame: DataFrame containing columns "text", "n_tokens", "doc_embedding". Each entry corresponds to one chunk of the text.
"""
if not EMBEDDING_CACHE_DIR.exists():
EMBEDDING_CACHE_DIR.mkdir()
# Search for the pickle, and read it in if it exists and use_cache is True.
pickle_path = EMBEDDING_CACHE_DIR / f"{str(filepath.name).replace('.','_')}_embeddings_cohere.pkl"
if pickle_path.is_file() and use_cache:
return pd.read_pickle(str(pickle_path))
# Read in and parse the file, if not passed in.
if not text_chunks:
raw_text = filepath.read_text(encoding="utf-8").replace("$","\$")
if "pooled" in str(filepath): # pooled 10-K files are split into item1, item1a, item7 using a delimiter.
items = re.split(SECTION_DELIM_PATTERN,raw_text)
text_chunks = []
for item in items:
section_chunked = split_text(item,form_type="10KItemsOnly")
for chunk in section_chunked:
text_chunks.append(chunk)
else:
text_chunks = filter_chunks(split_text(raw_text))
embeddings = []
for i, text in enumerate(text_chunks):
embedding_row = {}
embedding_row["text"] = text
embedding_row["n_tokens"] = len(tokenizer.encode(text))
embedding_row["doc_embeddings"] = get_embedding(text)
embeddings.append(embedding_row)
if (i+1)%10 == 0:
print(f"{i+1} Chunks embedded.")
df_embeddings = pd.DataFrame(embeddings)
df_embeddings.to_pickle(str(pickle_path))
return df_embeddings
def call_cohere_api_completion(prompt, temperature=0.0):
"""Send a request to Cohere's generate API endpoint,
with send_prompt and temperature.
Args:
prompt (str): The full prompt.
model_family (str, optional): model family to use for generation. Can be any of "ada", "babbage", "curie", "davinci".
Defaults to 'ada'.
temperature (float): The temperature of the model. Range from 0 to 1.
0 will only pick most probably completions, while 1 selects lower probability completions. Default 0.
Returns:
str: The top scoring autocompletion.
"""
response = co.generate(
model='xlarge',
prompt=prompt,
max_tokens=400,
temperature=temperature,
stop_sequences=[".\n\n"]
)
return response.generations[0].text
def query_to_summaries(filenames, list_of_query_questions, completion_temperature = 0.5,print_responses=True):
"""Given a list of search queries, embed them, and search the chunk database for most similar response.
Then prompt GPT-3 to summarize the resulting sections.
Args:
list_of_query_questions (list(str)): list of question strings to embed, searching for similar document chunks.
completion_temperature (float, optional): Temperature for davinci.
print_responses (boolean, optional): whether to print the results to terminal. Default True.
Returns:
pd.DataFrame('filename', 'query', 'response',"confidence"): DataFrame containing the filename, query, and completion.
"""
questions_to_gpt3_completions = []
for fname in filenames:
embeddings = file_to_embeddings(Path(fname),use_cache=True)
df_questions_to_relevant_passages = questions_to_answers(list_of_query_questions,
embeddings,
answers_per_question=3,
min_similarity=0.25,
model_family='curie',pprint=False)
for _, fields in df_questions_to_relevant_passages.iterrows():
completion_prompt = produce_prompt(fields["text"],"")
completion_resp =call_cohere_api_completion(completion_prompt,temperature=completion_temperature)
questions_to_gpt3_completions.append((Path(fname).stem,fields["Question"],fields["text"],completion_resp,fields["similarities"]))
if print_responses:
for (fname, question, search_result, gpt3_completion,confidence) in questions_to_gpt3_completions:
print("For filing", fname)
print("For Question:")
print(question,"\n")
print(f"GPT-3 Responds with confidence {confidence}:")
print(gpt3_completion)
# Refactor the response to front end standard
return pd.DataFrame(data=questions_to_gpt3_completions,columns=["filename","query","snippet","summary","confidence"])
def query_similarity_search(embeddings, query, n=3, min_similarity=0.0, pprint=True):
"""Search the doc embeddings for the most similar matches with the query.
Args:
embeddings (DataFrame): df containing 'text' field, and its search/doc embeddings.
query (str): Question to embed. Uses the 'query' version of the embedding model.
model_family (str, optional): model name. can be "davinci", "curie", "babbage", "ada"; Default "babbage"
n (int, optional): number of top results. Defaults to 3.
pprint (bool, optional): Whether to print the text and scores of the top results. Defaults to True.
Returns:
DataFrame: Top n rows of the embeddings DataFrame, with similarity column added. Sorted by similarity score from highest to lowest.
"""
embedded = get_embedding(query)
embeddings["similarities"] = embeddings["doc_embeddings"].apply(lambda x: cosine_similarity(x, embedded))
res = embeddings.sort_values("similarities", ascending=False).head(n)
if pprint:
print(f"{'-'*50}\nQuery: {query}\n{'-'*50}")
for _, series in res.iterrows():
if float(series["similarities"]) > min_similarity:
print(f"Score: {series['similarities']:.3f}")
print(series["text"])
print()
return res
def questions_to_answers(list_of_questions,embeddings,answers_per_question=5, min_similarity=0.0, model_family="curie",pprint=True):
question_results = []
for question in list_of_questions:
top_similar = query_similarity_search(embeddings=embeddings,
query=question,
n=answers_per_question,
min_similarity=min_similarity,
pprint=pprint)
top_similar["Question"]=question
question_results.append(top_similar.drop(columns=["n_tokens","doc_embeddings"]))
return pd.concat(question_results)
def produce_prompt(context, query_text):
"""Produce the prompt by appending the query text with the context.
Args:
context (str): Context to try to answer the question with.
query_text (str): Question to ask.
Returns:
str: Prompt to prime GPT-3 completion API endpoint.
"""
#return f"Given the text snippet:\n{context}\n\nWhat are the environmental regulation risks?\n\nAnswer:\n"
#return f"Given the text snippet:\n{context}\n\nWhat does this company do?\n\nAnswer:\n"
#return f"Given the text snippet:\n{context}\n\nWhat are the risks this company faces?\n\nAnswer:\n"
return f"""From the 10-K excerpt below:\n\n{context}\n\nCan you paraphrase an answer to the following question: {query_text}\n\nAnswer:""" | [] |
2024-01-10 | jxb3641/ungreenwash | streamlit_10K_contextual_question_tester.py | import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import json
import glob
import requests
from pathlib import Path
from OpenAIUtils import query_to_summaries, file_to_embeddings, produce_prompt
from CohereUtils import query_to_summaries as query_to_summaries_cohere, file_to_embeddings as file_to_embeddings_cohere, produce_prompt as produce_prompt_cohere
from HaystackUtils import run_qap
from EDGARFilingUtils import (
get_all_submission_ids,
get_text_from_files_for_submission_id,
split_text,
filter_chunks,
ROOT_DATA_DIR,
TICKER_TO_COMPANY_NAME,
QUESTION_TO_CATEGORY
)
import openai
openai.api_key = st.secrets["openai_api_key"]
from transformers import GPT2TokenizerFast
# Used to check the length of the prompt.
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
st.set_page_config(layout="wide",page_title="my_title",page_icon="earth")
### Streamlit app starts here
st.title("Play with GPT-3 Completion API and 10-Ks")
list_of_questions = list(QUESTION_TO_CATEGORY.keys())
#Change this question to add to the UI
list_of_questions = [
"What are the risks this company faces?",
"What does the company do?",
"Environmental regulations, environmental laws",
]
relevant_questions = st.multiselect("Select questions to use for search within the text.",
list_of_questions,default=list_of_questions)
list_of_files = ["data/ind_lists/4_food_bev/10k/GIS_0001193125-21-204830_pooled.txt",
"data/ind_lists/4_food_bev/10k/PEP_0000077476-22-000010_pooled.txt",
"data/ind_lists/11_transportation/10k/F_0000037996-22-000013_pooled.txt",
"data/ind_lists/11_transportation/10k/FSR_0001720990-22-000010_pooled.txt"]
filenames = st.multiselect("Select files to use for search.",
list_of_files,default=list_of_files)
temperature = st.number_input("Model Temperature",min_value = 0., max_value = 1., value=0.5, step=0.05)
options = ["OpenAI","Cohere"]
embeddings_choice = st.selectbox('Use for embeddings', options, index=1)
completion_choice = st.selectbox('Use for completion', options)
recalc_embeddings = st.checkbox("Recalculate Embeddings",value=True)
if st.button("Generate Answers"):
st.write(run_qap(embeddings_choice, completion_choice, temperature, relevant_questions, filenames, recalc_embeddings)) | [] |
2024-01-10 | Dr-Hutchinson/What-Do-AIs-Know-About-History | pages~Baconbot_1_7_1.py | import os
import openai
import streamlit as st
from datetime import datetime as dt
import pandas as pd
from numpy import mean
import streamlit_authenticator as stauth
import pygsheets
from google.oauth2 import service_account
import ssl
#st.set_page_config(
#page_title='Simulated Conversations with Francis Bacon',
#layout='wide',
#page_icon='🔍'
#)
def app():
st.title("BaconBot: An AI Imitation of Francis Bacon")
st.header("Open Interface")
st.subheader("Access to the Open Interface for BaconBot is currently limited. Click here to apply for access.")
st.write("The following version of BaconBot permits users to compose their own questions and customize the question parameters. The login panel will load in a moment.")
#pygsheets credentials for Google Sheets API
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
#login setup for streamlit_authenticator via Google Sheets API
sh0 = gc.open('users')
wks0 = sh0[0]
database_length = wks0.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row0 = str(len(database_length))
usernames_grab = wks0.get_as_df(has_header=False, index_column=0, start='A2', end=('A'+end_row0), numerize=False)
usernames_list = usernames_grab.values.tolist()
access_grab= wks0.get_as_df(has_header=False, index_column=0, start='B2', end=('B'+end_row0), numerize=False)
access_list = access_grab.values.tolist()
names_grab = wks0.get_as_df(has_header=False, index_column=0, start='C2', end=('C'+end_row0), numerize=False)
names_list = names_grab.values.tolist()
user_id_grab = wks0.get_as_df(has_header=False, index_column=0, start='D2', end=('D'+end_row0), numerize=False)
user_id_list = names_grab.values.tolist()
#streamlit_authenticator login
names = [lst[0] for lst in names_list]
usernames = [lst[0] for lst in usernames_list]
passwords = [lst[0] for lst in access_list]
user_ids = [lst[0] for lst in user_id_list]
hashed_passwords = stauth.Hasher(passwords).generate()
authenticator = stauth.Authenticate(names, usernames, hashed_passwords,
'some_cookie_name', 'some_signature_key', cookie_expiry_days=300)
name, authentication_status, username = authenticator.login('Login', 'main')
if authentication_status:
authenticator.logout('Logout', 'main')
st.write('Welcome *%s*' % (name))
with st.sidebar:
st.write('Explore more about the life and times of Francis Bacon:')
st.write('[Six Degrees of Francis Bacon](http://www.sixdegreesoffrancisbacon.com/), Carnegie Mellon University')
st.write('[Jürgen Klein and Guido Giglioni, "Francis Bacon", The Stanford Encyclopedia of Philosophy](https://plato.stanford.edu/entries/francis-bacon/)')
st.write('[Richard S. Westfall, "Francis Bacon", The Galileo Project, Rice University](http://galileo.rice.edu/Catalog/NewFiles/bacon.html)')
#Begin Baconbot code
st.title('Simulated Conversations with Francis Bacon')
col1, col2 = st.columns([3.0,3.5])
with col2:
bacon_pic = st.image(image='./bacon.png', caption="Portrait of Francis Bacon. National Portrait Gallery, London.")
def button_one():
st.write("Choose the settings below, and then pose questions to Francis Bacon. See this link for more information on these settings.")
with st.form('form2'):
prompt_choice_freeform = "I am a representation of Francis Bacon, a key figure in the Scientific Revolution. You can ask me questions and I will answer in the style of Bacon's Novum Organum."
prompt_choice_rationale = "I am an AI representation of Francis Bacon, a key figure in the early modern period. I will reply to your questions, and provide a historical rationale for my response."
prompt_choice_haiku = "I am Lord Francis Bacon, a key figure in reign of King James I of England. I will answer your questions in the form of a haiku in a 5-7-5 syllabic structure."
model_choice = st.radio("Select AI model. GPT-3 is the general purpose AI model. The Novum Organum model is a GPT-3 fine-tuned on Bacon's classic work of scientific theory.", ["GPT-3: Davinci Engine model", "Novum Organum model"])
prompt_choice = st.radio('Select Prompt.', [prompt_choice_freeform, prompt_choice_rationale, prompt_choice_haiku])
with st.expander("Advanced Settings:"):
prompt_booster = st.radio("Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.", ["None", "Question Booster", "Rationale Booster", "Haiku Booster"])
temperature_dial = st.slider("Temperature Dial. Lower values are generally more accurate, but lower extremes yield more repetitive replies. Higher values are more creative, but higher extremes result in incoherent responses.", 0.0, 1.0)
response_length = st.slider("Response Length. Recommended range is 75-150 for general questions, 150-250 for rationale questions, and 25-50 for haikus.", 25, 250)
submission_text = st.text_area("Enter your questions and comments below to Francis Bacon in this space. Be patient as he considers your statement.", max_chars=1000)
submit_button_1 = st.form_submit_button(label='Submit Question')
if submit_button_1:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#match user with user_id for Openai submission
@st.cache(ttl=6000)
def user_id_lookup():
sh_id = gc.open('users')
wks_id = sh_id[0]
database_length = wks_id.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row0 = str(len(database_length))
df = wks_id.get_as_df(has_header=True, index_column=None, start='A1', end=('D'+end_row0), numerize=False)
user_info = df.loc[df['names'] == name]
index_info = df.index.values[df['names']==name]
index_str = ' '.join(str(x) for x in index_info)
index_number = int(float(index_str))
user_id = user_info.at[index_number, 'user_id']
return user_id
user_id = user_id_lookup()
#model selection for OpenAI query
if model_choice == "GPT-3: Davinci Engine":
model_select = 'text-davinci-003'
else:
model_select = st.secrets['novum_organum_model']
prompt_boost_haiku_1 = "Compose a haiku on the events in London during the spring of 1610."
prompt_boost_haiku_2 = "Haiku: The taverns are full of talk, Of the moons of Jupiter and of the Prince’s ship."
prompt_boost_haiku_3 = "Compose a haiku in the style of Basho."
prompt_boost_haiku_4 = "Haiku: On a withered branch, A crow has alighted, Nightfall in autumn."
prompt_boost_rationale_1 = "Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?"
prompt_boost_rationale_2 = "Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction."
prompt_boost_rationale_3 = "Rationale: The critique of an ancient authority in medicine on the basis of his inadequate method is keeping with an important theme in Novum Organum and Bacon’s larger scientific philosophy. The specific mention of the “Idols of the Marketplace” is an important concept in the Novum Organum."
prompt_boost_rationale_4 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_rationale_5 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
prompt_boost_rationale_6 = "Rationale: The generated response outlines one of the major contributions of Francis Bacon to the philosophy of science, what would become the modern scientific method."
prompt_boost_question_1 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_question_2 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
if prompt_booster == "None":
prompt_text = prompt_choice + "\n\nQ:"
elif prompt_booster == "Rationale Booster":
prompt_text = prompt_choice + "\n\n" + prompt_boost_rationale_1 + "\n\n" + prompt_boost_rationale_2 + "\n\n" + prompt_boost_rationale_3 + "\n\n" + prompt_boost_rationale_4 + "\n\n" + prompt_boost_rationale_5 + "\n\n" + prompt_boost_rationale_6 + "\n\n" + "Question:"
elif prompt_booster == "Haiku Booster":
prompt_text = prompt_choice + "\n\n" + prompt_boost_haiku_1 + "\n\n" + prompt_boost_haiku_2 + "\n\n" + prompt_boost_haiku_3 + "\n\n" + prompt_boost_haiku_4
else:
prompt_text = prompt_choice + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
#prompt_text = prompt_choice + "\n\nQ:"
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model=model_select,
prompt= prompt_text + " " + submission_text,
temperature=temperature_dial,
user=user_id,
max_tokens=response_length)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user=user_id,
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.write("Bacon's Response:")
st.write(output)
st.write("\n\n\n\n")
st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed. Consider adjusting the question or temperature, and ask again.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
def total_output_collection():
d1 = {'user':[name], 'user_id':[user_id], 'model':[model_choice], 'prompt':[prompt_choice], 'prompt_boost':[prompt_booster], 'question':[submission_text], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('bacon_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'user':[name], 'user_id':[user_id], 'model':[model_choice], 'prompt':[prompt_choice], 'prompt_boost':[prompt_booster], 'question':[submission_text], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('bacon_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'user':[name], 'user_id':[user_id], 'model':[model_choice], 'prompt':[prompt_choice], 'prompt_boost':[prompt_booster], 'question':[submission_text], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('bacon_outputs_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def button_two():
#Rank Bacon_bot Responses
st.write("Rank Bacon's Reply:")
sh1 = gc.open('bacon_outputs_temp')
wks1 = sh1[0]
submission_text = wks1.get_value('F2')
output = wks1.get_value('G2')
prompt_text = wks1.get_value('D2')
st.subheader('Prompt:')
st.write(prompt_text)
st.subheader('Your Question')
st.write(submission_text)
st.subheader("Bacon's Reply:")
st.write(output)
with st.form('form2'):
bacon_score = st.slider("How much does the reply resemble the style of Francis Bacon?", 0, 10, key='bacon')
worldview_score = st.slider("Is the reply consistent with Bacon's worldview?", 0, 10, key='worldview')
accuracy_rank = st.slider("Does the reply appear factually accurate?", 0, 10, key='accuracy')
coherence_rank = st.slider("How coherent and well-written is the reply?", 0,10, key='coherence')
st.write("Transmitting the rankings takes a few moments. Thank you for your patience.")
submit_button_2 = st.form_submit_button(label='Submit Ranking')
if submit_button_2:
sh1 = gc.open('bacon_outputs_temp')
wks1 = sh1[0]
df = wks1.get_as_df(has_header=True, index_column=None, start='A1', end=('K2'), numerize=False)
name = df['user'][0]
user_id = df['user_id'][0]
model_choice = df['model'][0]
prompt_choice = df['prompt'][0]
prompt_boost = df['prompt_boost'][0]
submission_text = df['question'][0]
output = df['output'][0]
temperature_dial = df['temperature'][0]
response_length = df['response_length'][0]
output_label = df['filter_ranking'][0]
now = dt.now()
ranking_score = [bacon_score, worldview_score, accuracy_rank, coherence_rank]
ranking_average = mean(ranking_score)
def ranking_collection():
d4 = {'user':[name], 'user_id':[user_id],'model':[model_choice], 'prompt':[prompt_choice], 'prompt_boost':[prompt_boost],'question':[submission_text], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'bacon_score':[bacon_score], 'worldview_score':[worldview_score],'accuracy_rank':[accuracy_rank], 'coherence':[coherence_rank], 'overall_ranking':[ranking_average], 'date':[now]}
df4 = pd.DataFrame(data=d4, index=None)
sh4 = gc.open('bacon_rankings')
wks4 = sh4[0]
cells4 = wks4.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row4 = len(cells4)
wks4.set_dataframe(df4,(end_row4+1,1), copy_head=False, extend=True)
ranking_collection()
st.write('Rankings recorded - thank you! Feel free to continue your conversation with Francis Bacon.')
with col1:
st.write("Select the 'Ask Bacon' button to ask the AI questions. Select 'Rank Bacon' to note your impressions of his responses.")
pages = {
0 : button_one,
1 : button_two,
}
if "current" not in st.session_state:
st.session_state.current = None
if st.button("Ask Bacon"):
st.session_state.current = 0
if st.button("Rank Bacon"):
st.session_state.current = 1
if st.session_state.current != None:
pages[st.session_state.current]()
elif authentication_status == False:
st.error('Username/password is incorrect')
elif authentication_status == None:
st.warning('Please enter your username and password')
| [
"Question Booster",
"Rationale: The critique of an ancient authority in medicine on the basis of his inadequate method is keeping with an important theme in Novum Organum and Bacon’s larger scientific philosophy. The specific mention of the “Idols of the Marketplace” is an important concept in the Novum Organum.",
"None",
"Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction.",
"P",
"PLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER",
"Select Prompt.",
"Rationale Booster",
"I am a representation of Francis Bacon, a key figure in the Scientific Revolution. You can ask me questions and I will answer in the style of Bacon's Novum Organum.",
"I am an AI representation of Francis Bacon, a key figure in the early modern period. I will reply to your questions, and provide a historical rationale for my response.",
"Question: What do you see as the hallmarks of the New Science?",
"Question:",
"Compose a haiku in the style of Basho.",
"Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?",
"Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action.",
"\n\n",
"<|endoftext|>PLACEHOLDER\n--\nLabel:",
"Haiku Booster",
"PLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER\n\nQuestion:",
"Haiku: The taverns are full of talk, Of the moons of Jupiter and of the Prince’s ship.",
"PLACEHOLDER\n\nQ:",
"Rationale: The generated response outlines one of the major contributions of Francis Bacon to the philosophy of science, what would become the modern scientific method.",
"PLACEHOLDER PLACEHOLDER",
"Compose a haiku on the events in London during the spring of 1610.",
"Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.",
"Haiku: On a withered branch, A crow has alighted, Nightfall in autumn.",
"I am Lord Francis Bacon, a key figure in reign of King James I of England. I will answer your questions in the form of a haiku in a 5-7-5 syllabic structure."
] |
2024-01-10 | Dr-Hutchinson/What-Do-AIs-Know-About-History | pages~benchmarks_results.py | import os
import openai
import streamlit as st
from datetime import datetime as dt
import pandas as pd
from numpy import mean
import pygsheets
from re import search
import time
from google.oauth2 import service_account
def app():
#st.set_page_config(layout="wide")
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
euro_sheet = gc.open('high_school_european_history_test')
ap_euro = euro_sheet.sheet1
us_sheet = gc.open('high_school_us_history_test')
ap_us = us_sheet.sheet1
world_sheet = gc.open('high_school_world_history_test')
ap_world = world_sheet.sheet1
benchmarks_sheets = gc.open('benchmark_tests')
benchmarks = benchmarks_sheets.sheet1
df1 = pd.DataFrame(ap_euro, index=None)
df2 = pd.DataFrame(ap_us, index=None)
df3 = pd.DataFrame(ap_world, index=None)
df4_preheaders = pd.DataFrame(benchmarks, index=None)
df4 = df4_preheaders.rename(columns=df4_preheaders.iloc[0]).drop(df4_preheaders.index[0])
euro_random = df1.sample()
us_random = df2.sample()
world_random = df3.sample()
st.header('What Does an AI "Know" About History?')
st.subheader("Instructions:")
st.write("This app permits you to test your historical knowledge against GPT-3. Choose one of the A.P. categories on the left to load a random question. Select an answer in the Human Interface to see if you answered correctly. You can then compare your answer against the AI in the GPT-3 Interface.")
st.subheader("Implications:")
st.write("Recent advances in machine learning have resulted in the creation of AI models capable of accurate recall of historical knowledge. These abilities have been measured by testing AIs on a commonly-assessed form of historical knowledge: the Advanced Placement (A.P.) curriculums for U.S., European, and World History. The first major assessment came from a [January 2021 study](https://arxiv.org/pdf/2008.02275.pdf) by machine learning researchers led by [Dan Hendryks](https://people.eecs.berkeley.edu/~hendrycks/), with GPT-3 initially achieving over 50% accuracy in these fields. By January 2022 a new model of GPT-3 demonstrated significant advances in performance, approaching near expert-level accuracy (80%) in two historical fields. You can see the more information and the full results from the most recent replication of the Hendryks study [here](https://github.com/Dr-Hutchinson/gpt-3_history_benchmark_results).")
with st.sidebar.form(key ='Form2'):
field_choice = st.radio("Choose the subject:", ["U.S. History", "European History", "World History"])
def delete_sessions():
for key in st.session_state.keys():
del st.session_state[key]
button2 = st.form_submit_button("Click here to load another question")
if button2:
delete_sessions()
st.experimental_rerun()
if field_choice == "U.S. History":
field = us_random
elif field_choice == "European History":
field = euro_random
else:
field = world_random
question_number1 = str(field.index[0])
question = field.iloc[0][0]
option_a = field.iloc[0][1]
option_b = field.iloc[0][2]
option_c = field.iloc[0][3]
option_d = field.iloc[0][4]
answer = field.iloc[0][5]
benchmarks_question_number = df4.loc[df4['question_number'] == question_number1]
question_check = not benchmarks_question_number.empty
is_question_already_in_benchmarks = str(question_check)
if answer == "A":
answer_response = option_a
elif answer == "B":
answer_response = option_b
elif answer == "C":
answer_response = option_c
else:
answer_response = option_d
if 'field' not in st.session_state:
st.session_state.field = field_choice
if 'question_number1' not in st.session_state:
st.session_state.question_number1 = question_number1
if 'question' not in st.session_state:
st.session_state.question = question
if 'option_a' not in st.session_state:
st.session_state.option_a = option_a
if 'option_b' not in st.session_state:
st.session_state.option_b = option_b
if 'option_c' not in st.session_state:
st.session_state.option_c = option_c
if 'option_d' not in st.session_state:
st.session_state.option_d = option_d
if 'answer' not in st.session_state:
st.session_state.answer = answer
if 'answer_response' not in st.session_state:
st.session_state.answer_response = answer_response
col1, col2 = st.columns([1,1])
with col1:
with st.form('form1'):
st.subheader("Human Interface:")
st.write('Read the question below, and select the best answer. Click on the "Submit Your Answer" button to see if you answered correctly.')
st.write("Question #" + st.session_state.question_number1 + ":" + "\n\n" + st.session_state.question)
submit_answer = st.radio("Choose from the following options:", ["A: " + st.session_state.option_a, "B: " + st.session_state.option_b, "C: " + st.session_state.option_c, "D: " + st.session_state.option_d])
button1 = st.form_submit_button("Submit your Answer")
if button1:
fullstring = st.session_state.answer + ": " + st.session_state.answer_response
substring = submit_answer
if substring in fullstring:
st.write("Correct")
else:
st.write("Incorrect")
st.write("Answer - " + st.session_state.answer + ": " + st.session_state.answer_response)
with col2:
with st.form('form3'):
st.subheader("GPT-3 Interface")
st.write("Click on the button below to pose the question to GPT-3")
button3 = st.form_submit_button("Submit Question")
if button3:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model='text-davinci-002',
prompt=st.session_state.question + "A: " + st.session_state.option_a + "B: " + st.session_state.option_b + "C: " + st.session_state.option_c + "D: " + st.session_state.option_d,
temperature=0,
max_tokens=50)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
output_cleaned = output.replace("\n\n", "")
if 'output' not in st.session_state:
st.session_state.output = output_cleaned
fullstring = st.session_state.answer + ": " + st.session_state.answer_response
substring = st.session_state.output
if substring in fullstring:
correct_status = 'correct'
st.write("GPT-3's Response: Correct")
else:
correct_status = 'incorrect'
st.write("GPT-3's Response: Incorrect")
st.write(st.session_state.output)
def ranking_collection():
now = dt.now()
sh4 = gc.open('benchmark_tests')
wks4 = sh4[0]
cells = wks4.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row = len(cells)
end_row_st = str(end_row+1)
d4 = {'field':[st.session_state.field], 'question_number':[st.session_state.question_number1],'correct_answer':[st.session_state.answer + ": " + st.session_state.answer_response], 'output_answer':[st.session_state.output], 'correct_status':[correct_status], 'time':[now]}
df4 = pd.DataFrame(data=d4, index=None)
wks4.set_dataframe(df4,(end_row+1,1), copy_head=False, extend=True)
ranking_collection()
def us_history_data():
sh1 = gc.open('us_history_benchmark_results')
sh2 = gc.open('benchmark_tests')
wks1 = sh1[0]
wks2 = sh2[0]
now = dt.now()
data = wks1.get_as_df(has_header=True, index_col=None)
data2 = wks2.get_as_df(has_header=True, index_col=None)
data['time'] = pd.to_datetime(data['time'])
mask = (data['time'] > '5/11/2022 11:20:00') & (data['time'] <= now)
data = data.loc[mask]
field_value = "U.S. History"
total_attempts = data["correct_status"].count()
field_data = data[data['field'] == 'U.S. History']
correct_data = field_data[field_data['correct_status'] == 'correct']
incorrect_data = field_data[field_data['correct_status'] == 'incorrect']
st.write('In May 2022, GPT-3 correctly answered {} out of {} U.S. History questions, for a {:.2f}% accuracy rate. Click [here](https://github.com/Dr-Hutchinson/gpt-3_history_benchmark_results/blob/main/us_history_benchmark_results.csv) to see the results.'.format(len(correct_data), len(field_data), len(correct_data)/len(field_data)*100))
st.write("Below is GPT-3's accuracy on all questions to date.")
st.bar_chart(data2['correct_status'].value_counts())
def euro_history_data():
sh1 = gc.open('european_history_benchmark_results')
sh2 = gc.open('benchmark_tests')
wks1 = sh1[0]
wks2 = sh2[0]
now = dt.now()
data = wks1.get_as_df(has_header=True, index_col=None)
data2 = wks2.get_as_df(has_header=True, index_col=None)
data['time'] = pd.to_datetime(data['time'])
mask = (data['time'] > '5/11/2022 11:20:00') & (data['time'] <= now)
data = data.loc[mask]
field_value = "European History"
total_attempts = data["correct_status"].count()
field_data = data[data['field'] == 'European History']
correct_data = field_data[field_data['correct_status'] == 'correct']
incorrect_data = field_data[field_data['correct_status'] == 'incorrect']
st.write('In May 2022, GPT-3 correctly answered {} out of {} European history questions, for a {:.2f}% accuracy rate. Click [here](https://github.com/Dr-Hutchinson/gpt-3_history_benchmark_results/blob/main/european_history_benchmark_results.csv) to see the results.'.format(len(correct_data), len(field_data), len(correct_data)/len(field_data)*100))
st.write("Below is GPT-3's accuracy on all questions to date.")
st.bar_chart(data2['correct_status'].value_counts())
def world_history_data():
sh1 = gc.open('world_history_benchmark_results')
sh2 = gc.open('benchmark_tests')
wks1 = sh1[0]
wks2 = sh2[0]
now = dt.now()
data = wks1.get_as_df(has_header=True, index_col=None)
data2 = wks2.get_as_df(has_header=True, index_col=None)
data['time'] = pd.to_datetime(data['time'])
mask = (data['time'] > '5/11/2022 11:20:00') & (data['time'] <= now)
data = data.loc[mask]
field_value = "World History"
total_attempts = data["correct_status"].count()
field_data = data[data['field'] == 'World History']
correct_data = field_data[field_data['correct_status'] == 'correct']
incorrect_data = field_data[field_data['correct_status'] == 'incorrect']
st.write('In May 2022, GPT-3 correctly answered {} out of {} World history questions, for a {:.2f}% accuracy rate. Click [here](https://github.com/Dr-Hutchinson/gpt-3_history_benchmark_results/blob/main/world_history_benchmark_results.csv) to see the results.'.format(len(correct_data), len(field_data), len(correct_data)/len(field_data)*100))
st.write("Below is GPT-3's accuracy on all questions to date.")
st.bar_chart(data2['correct_status'].value_counts())
if st.session_state.field == "U.S. History":
us_history_data()
elif st.session_state.field == "European History":
euro_history_data()
else:
world_history_data()
| [
"C: ",
"D: ",
"A: ",
"B: "
] |
2024-01-10 | Dr-Hutchinson/What-Do-AIs-Know-About-History | pages~primary_source_analyzer.py | import os
import openai
import streamlit as st
from datetime import datetime as dt
import pandas as pd
from numpy import mean
import streamlit_authenticator as stauth
import pygsheets
from google.oauth2 import service_account
import ssl
#st.set_page_config(
#page_title='Simulated Conversations with Francis Bacon',
#layout='wide',
#page_icon='🔍'
#)
def app():
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
st.title('Can an AI "Interpret" a Primary Source?')
col1, col2 = st.columns([5.5,.5])
def button_one():
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include: "
histriography_options = "Progressive, Consensus, Marxist, postmodern, social history, political history, gender history, and cultural history."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.subheader("Instructions:")
st.write("Select one of the primary sources on the left. Then click on the **Analyze Source** button below to see GPT-3's interpretation.")
st.subheader("App Description:")
st.write("This app prompts GPT-3 to simulate historical analysis of a primary source. GPT-3 is given the text of the source, the source information, and a historical method for interpreting the source. The specific prompt for these instructions is revealed after you select a submit a source for analysis.")
def hayseed_question():
with col1:
with st.form('Hayseed Question'):
question = "Hayseed"
#prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
st.header('Primary Source - "The Hayseed" (1890)')
hayseed_lyrics = 'I was once a tool of oppression,\nAnd as green as a sucker could be,\nAnd monopolies banded together\nTo beat a poor hayseed like me.\nThe railroads and old party bosses\nTogether did sweetly agree;\nAnd they thought there would be little trouble\nIn working a hayseed like me. . . .'
source_information = "Source Information: The Primary Source is an American political campaign song called 'The Hayseed,' published in 1890 by a Nebraska newspaper known as the Farmer's Alliance."
st.image(image='./hayseed.png')
st.write("Arthur L. Kellog, “The Hayseed,” Farmers Alliance (4 October 1890). Nebraska Newspapers (University of Nebraska Libraries), [link.](https://nebnewspapers.unl.edu/lccn/2017270209/1890-10-04/ed-1/seq-1/)")
st.write(hayseed_lyrics)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
if submit_button_1:
primary_source_analysis = prompt + "\n" + historical_method + histriography_options + ".\n\n" + "Primary Source: " + "\n" + hayseed_lyrics + "\n" + source_information + "\n" + instructions + "\n"
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-003",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.00,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def household_question():
with col1:
with st.form('Household Question'):
question = "Household Management"
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include: "
histriography_options = "Marxist history, British history, economic history, gender history, labor history, women's history, social history, and the history of marriage."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.header('Primary Source - "The Book of Household Management" (1861)')
household_text = '"As with a Commander of the Army, or leader of any enterprise, so it is with the mistress of the house. Her spirit will be seen through the whole establishment; and just in proportion as she performs her duties intelligently and thoroughly, so will her domestics follow in her path. Of all of those acquirements, which more particularly belong to the feminine character, there are none which take a higher rank, in our estimation, than such as enter into a knowledge of household duties; for on these are perpetually dependent the happiness, comfort, and well-being of the family.'
source_information = "Source Information: The Primary Source is The Book of Household Management, published in London in 1861 and written by Isabella Beeton."
st.image(image='./household_management.png', use_column_width='never')
st.write("Isabella Beeton, _Book of Household Management_ (S.O. Beeton: London, 1861), 46. Avaliable via the [Internet Archive.](https://archive.org/details/b20392758/page/n45/mode/2up)")
st.write(household_text)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
if submit_button_1:
k_shot = "Step 1 - Contextualization: The Primary Source is an American political campaign song popularized in 1890, and published by a Nebraska newspaper known as the Farmer's Alliance. The song reflects the historical period of America's Gilded Age, a time of great economic growth and prosperity. However, this prosperity was not evenly distributed, and many Americans were left behind. The song speaks to this inequality, with the 'hayseed' protagonist being oppressed by wealthy interests. This source provides insights into the larger historic events of the Gilded Age, including the rise of monopolies and the power of political bosses. It also offers insight into the ideologies of the time, including populism and progressivism. \n\nStep 2 - Purpose: The purpose of the Primary Source is to offer a populist critique of the Gilded Age status quo. The song argues that the rich are oppressing the poor, and that this needs to change. It calls for a return to more egalitarian values, and for Americans to stand up against the powerful interests that are keeping them down. \n\nStep 3 - Audience: The intended audience of the Primary Source is working-class Americans who feel left behind by the country's economic success. The song speaks to their situation, and offers a message of hope that things can change. It is also meant to inspire them to take action against the wealthy interests that are oppressing them. \n\nStep 4 - Historiographical Interpretation: Different historians would interpret this source differently, depending on their historiographical school of thought. For example, Marxist historians would focus on the class conflict inherent in the song, and see it as reflective of the wider struggle between workers and capitalists during the Gilded Age. Postcolonial historians might focus on the hayseed protagonist's position as an outsider within American society, and use the song to explore issues of race and ethnicity during the period. Gender historians might focus on the fact that the hayseed is male, and use the song to explore issues of masculinity during the Gilded Age."
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
primary_source_analysis = prompt + "\n" + historical_method + histriography_options + "\n\n" + instructions + k_shot + "/nLet's try another." + "/nPrimary Source: " + "\n" + household_text + "\n" + source_information + "\n" + instructions + "\n"
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-003",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.0,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
#output_cleaned = output.replace("\n", "")
#output_cleaned2 = output_cleaned.strip()
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
#st.write("This prompt also uses a single-shot example of another primary source analysis to guide GPT-3's generation.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def lin_zexu_1():
with col1:
with st.form('lin_letter'):
question = "Lin Zexu to Victoria"
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include: "
histriography_options = "Marxist, postcolonial, World Systems Theory, social history, history of medicine, Diplomatic history, economic history."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.header('Primary Source - Translation of a letter from Lin Zexu to Queen Victoria (1839)')
zexu_letter = '"By what principle of reason then, should these foreigners send in return a poisonous drug? Without meaning to say that the foreigners harbor such destructive intentions in their hearts, we yet positively assert that from their inordinate thirst after gain, they are perfectly careless about the injuries they inflict upon us! And such being the case, we should like to ask what has become of that conscience which heaven has implanted in the breasts of all men? We have heard that in your own country opium is prohibited with the utmost strictness and severity. This is a strong proof that you know full well how hurtful it is to mankind. Since you do not permit it to injure your own country, you ought not to have this injurious drug transferred to another country, and above all others, how much less to the Inner Land! Of the products which China exports to your foreign countries, there is not one which is not beneficial to mankind in some shape or other."'
source_information = "Source Information: The Primary Source is a translation of an 1839 letter from Lin Zexu, the Chinese trade commissioner, to Queen Victoria of England."
st.image(image='./lin_letter.jpg')
st.write("Source: Elijah Coleman Bridgman and Samuel Wells Williams. _The Chinese Repository_, vol. 8 (Canton, 1840), 499 . Avaliable via [Google Books.](https://books.google.com/books?id=ngMMAAAAYAAJ&lpg=PR5&pg=PA499#v=onepage&q&f=false)")
st.write(zexu_letter)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
k_shot = "Step 1 - Contextualization: The Primary Source is an American political campaign song popularized in 1890, and published by a Nebraska newspaper known as the Farmer's Alliance. The song reflects the historical period of America's Gilded Age, a time of great economic growth and prosperity. However, this prosperity was not evenly distributed, and many Americans were left behind. The song speaks to this inequality, with the 'hayseed' protagonist being oppressed by wealthy interests. This source provides insights into the larger historic events of the Gilded Age, including the rise of monopolies and the power of political bosses. It also offers insight into the ideologies of the time, including populism and progressivism. \n\nStep 2 - Purpose: The purpose of the Primary Source is to offer a populist critique of the Gilded Age status quo. The song argues that the rich are oppressing the poor, and that this needs to change. It calls for a return to more egalitarian values, and for Americans to stand up against the powerful interests that are keeping them down. \n\nStep 3 - Audience: The intended audience of the Primary Source is working-class Americans who feel left behind by the country's economic success. The song speaks to their situation, and offers a message of hope that things can change. It is also meant to inspire them to take action against the wealthy interests that are oppressing them. \n\nStep 4 - Historiographical Interpretation: Different historians would interpret this source differently, depending on their historiographical school of thought. For example, Marxist historians would focus on the class conflict inherent in the song, and see it as reflective of the wider struggle between workers and capitalists during the Gilded Age. Postcolonial historians might focus on the hayseed protagonist's position as an outsider within American society, and use the song to explore issues of race and ethnicity during the period. Gender historians might focus on the fact that the hayseed is male, and use the song to explore issues of masculinity during the Gilded Age."
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
primary_source_analysis = prompt + "\n" + historical_method + "\n\n" + histriography_options + "\n\n" + instructions + k_shot + "/nLet's try another." + "/nPrimary Source: " + "\n" + zexu_letter + "\n" + source_information + "\n" + instructions
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-003",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.0,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
#st.write("This prompt also uses a single-shot example of another primary source analysis to guide GPT-3's generation.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def mary_lease():
with col1:
with st.form('lease_speech'):
question = "Mary Lease"
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include: "
histriography_options = "Progressive, Consensus, Marxist, postmodern, social history, political history, gender history, and cultural history."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.header('Primary Source - Mary Lease, "Women in the Farmers Alliance" (1891)')
lease_speech = '"Madame President and Fellow Citizens:— If God were to give me my choice to live in any age of the world that has flown, or in any age of the world yet to be, I would say, O God, let me live here and now, in this day and age of the world’s history. We are living in a grand and wonderful time: we are living in a day when old ideas, old traditions, and old customs have broken loose from their moorings, and are hopelessly adrift on the great shoreless, boundless sea of human thought; we are living in a time when the gray old world begins to dimly comprehend that there is no difference between the brain of an intelligent woman and the brain of an intelligent man; no difference between the soul-power or brain power that nerved the arm of Charlotte Corday to deeds of heroism, and which swayed old John Brown behind his barricade at Ossawattomie; we are living in a day and age when the women of industrial societies and the Alliance women have become a mighty factor in the politics of this nation; when the mighty dynamite of thought is stirring the hearts of men of this world from centre to circumference, and social and political structure and stirring the hearts of men from centre to circumference, and this thought is crystallizing into action.'
source_information = 'Source Information: The Primary Source is a speech entitled "Women in the Farmers Alliance," given by Mary Lease to the National Council of Women National Meeting in Washington D.C. in 1891.'
st.image(image='./lease.png')
st.write("Source: National Council of Women of the United States, _Transactions of the National Council of Women of the United States: Assembled in Washington, D.C., February 22 to 25, 1891_, 214. Avaliable via [Google Books.](https://books.google.com/books?id=bpU0xGnVETsC&newbks=1&newbks_redir=0&dq=If%20God%20were%20to%20give%20me%20my%20choice%20to%20live%20in%20any%20age%20of%20the%20world%20that%20has%20flown%2C%20or%20in%20any%20age%20of%20the%20world%20yet%20to%20be%2C%20I%20would%20say%2C%20O%20God%2C%20let%20me%20live%20here%20and%20now%2C%20in%20this%20day%20and%20age%20of%20the%20world%E2%80%99s%20history.&pg=PA214#v=onepage&q&f=false)")
st.write(lease_speech)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
k_shot = "Step 1 - Contextualization: The Primary Source is a translation of an 1839 letter from Lin Zexu, the Chinese trade commissioner, to Queen Victoria of England. The letter reflects the historical period of the Opium Wars, when China was struggling to stop the illegal importation of opium from British traders. The letter offers insights into the larger historic events of the Opium Wars, including the conflict between China and Britain over the trade of opium. It also offers insight into the ideologies of the time, including the Chinese belief that opium was a poisonous drug that was harmful to society. \n\nStep 2 - Purpose: The purpose of the Primary Source is to persuade Queen Victoria to stop the illegal importation of opium into China. The letter argues that opium is a harmful drug that is causing great harm to Chinese society. It asks Queen Victoria to put an end to the trade, in order to protect the people of China. \n\nStep 3 - Audience: The intended audience of the Primary Source is Queen Victoria of England. The letter is meant to persuade her to put an end to the illegal opium trade, in order to protect the people of China. \n\nStep 4 - Historiographical Interpretation: Different historians would interpret this source differently, depending on their historiographical school of thought. For example, Marxist historians might focus on the economic conflict between China and Britain over the trade of opium. Postcolonial historians might focus on the power dynamics between China and Britain, and use the letter to explore issues of colonialism and imperialism. Gender historians might focus on the fact that opium was seen as a harmful drug that was harmful to society, and use the letter to explore issues of gender and sexuality during the Opium Wars."
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
primary_source_analysis = prompt + "\n" + historical_method + histriography_options + "\n\n" + instructions + k_shot + "\nLet's try another." + "\nPrimary Source: " + "\n" + lease_speech + "\n" + source_information + "\n" + instructions + "\n"
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-003",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.0,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
#st.write("This prompt also uses a single-shot example of another primary source analysis to guide GPT-3's generation.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def practical_housekeeping():
with col1:
with st.form('practical_housekeeping'):
question = "Practical Housekeeping"
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include the Marxist history, British history, economic history, gender history, labor history, women's history, social history, and the history of marriage."
histriography_options = "Marxist history, British history, economic history, gender history, labor history, women's history, social history, and the history of marriage."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.header('Primary Source - Mrs. Frederick Pauley, Practical Housekeeping (1867)')
practical_housekeeping = "Whatever information the following pages may contain, bears reference to wives who are their own housekeepers. A housekeeper, in the usual acceptance of the word, may be simply to be a paid employee, with no higher aim than a conscientious endeavor to acquit herself honestly of the trust confided to her charge. But a wife who keeps her husband's home has a higher interest at stake. Her responsibilities do not end with the dispensing of stores and checking of accounts. Health and happinness, joy and sorrow, are more or less dependent on the good or evil of her presence. Her rule extends from the attic to the cellar: her influence affects every dweller beneath her roof. She can neighter resign her place, no be dismissed from it, if by mismanagement she loses the confidence of her husband. Her engagement is life-long-'for better for worse, for richer for poorer.'"
source_information = "Source Information: The Primary Source is the introduction to Practical Housekeeping, a book published in London in 1867 by Mrs. Frederick Pauley."
st.image(image='./practical_housekeeping.png')
st.write("Source: Mrs. Frederick Pauley, _Practical Housekeeping_ (Routledge: London, 1867), 1. Avaliable via [Google Books](https://books.google.com/books?id=_z4CAAAAQAAJ&newbks=1&newbks_redir=0&dq=Routledge's%20manual%20of%20etiquette&pg=PA1#v=onepage&q&f=false)")
st.write(practical_housekeeping)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
k_shot = "Step 1 - Contextualization: The Primary Source is an American political campaign song popularized in 1890, and published by a Nebraska newspaper known as the Farmer's Alliance. The song reflects the historical period of America's Gilded Age, a time of great economic growth and prosperity. However, this prosperity was not evenly distributed, and many Americans were left behind. The song speaks to this inequality, with the 'hayseed' protagonist being oppressed by wealthy interests. This source provides insights into the larger historic events of the Gilded Age, including the rise of monopolies and the power of political bosses. It also offers insight into the ideologies of the time, including populism and progressivism. \n\nStep 2 - Purpose: The purpose of the Primary Source is to offer a populist critique of the Gilded Age status quo. The song argues that the rich are oppressing the poor, and that this needs to change. It calls for a return to more egalitarian values, and for Americans to stand up against the powerful interests that are keeping them down. \n\nStep 3 - Audience: The intended audience of the Primary Source is working-class Americans who feel left behind by the country's economic success. The song speaks to their situation, and offers a message of hope that things can change. It is also meant to inspire them to take action against the wealthy interests that are oppressing them. \n\nStep 4 - Historiographical Interpretation: Different historians would interpret this source differently, depending on their historiographical school of thought. For example, Marxist historians would focus on the class conflict inherent in the song, and see it as reflective of the wider struggle between workers and capitalists during the Gilded Age. Postcolonial historians might focus on the hayseed protagonist's position as an outsider within American society, and use the song to explore issues of race and ethnicity during the period. Gender historians might focus on the fact that the hayseed is male, and use the song to explore issues of masculinity during the Gilded Age."
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
primary_source_analysis = prompt + "\n" + historical_method + histriography_options + "\n\n" + instructions + k_shot + "/nLet's try another." + "/nPrimary Source: " + "\n" + practical_housekeeping + "\n" + source_information + "\n" + instructions + "\n"
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-003",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.0,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
#st.write("This prompt also uses a single-shot example of another primary source analysis to guide GPT-3's generation.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def len_letter_2():
with col1:
with st.form('lin_letter_2'):
question = "Lin Zexu destroys the Opium"
prompt = "You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
historical_method = "Step 1 - Contextualization: Apply the Source Information to provide a lengthy, detailed, and substantive analysis of how the Primary Source reflects the larger historical period in which it was created. In composing this lengthy, detailed, and substantive analysis, note specific events, personalities, and ideologies that shaped the the period noted in the Source Information. \n\nStep 2 - Purpose : Offer a substantive exploration of the purpose of the Primary Source, interpreting the author’s arguments through the Contextualization offered in Step 1. \n\nStep 3 - Audience: Compose a substantive assessment of the intended audience of the Primary Source. Note how this audience would shape the Primary Source's reception and historical impact in light of the Contextualization offered in Step 1. \n\nStep 4 - Historiographical Interpretation: Provide a substantive and incisive interpretation of how at least three specific schools of historiographical thought would interpret this source. Compare and contrast how this source could be interpreted by three different academic historiographical schools. Different historiographical approaches could include: "
histriography_options = "Marxist, postcolonial, World Systems Theory, social history, history of medicine, Diplomatic history, economic history."
instructions = "Instructions: Based on the Historical Method outlined above, provide a substantive and detailed analysis of the Primary Source in the manner of an academic historian. Let's take this step by step, and be sure to include every step."
st.header('Primary Source - Lin Zexu Burns the Opium (1909)')
lin_text = "In 1839, Lin Zexu arrived as Governor-general of Liangguang, and discovered that Western merchants held opium stores of 20,283 chests. He burnt them all on the beach. Later other foreign ships secretly stole into port with more opium. Lin took advantage of a dark night when the tide was low to send crack troops to capture them. He burnt 23 ships at Changsha bay. Subsequently, because these actions caused a diplomatic incident, opium imports kept on growing. Now the British government agrees that we must eliminate the poison of opium. Reflecting on past events, we have turned our misfortunes into a happy outcome."
source_information = "Source Information: The Primary Source is a translation of “Portraits of the Achievements of Our Dynasty’s Illustrious Officials,” an illustrated print published in the Shanghai newspaper Shishibao Tuhua in 1909."
st.image(image='./lin_print.jpg')
st.write('Source: Peter Perdue, “Production and Consumption",_The First Opium War: The Anglo-Chinese War of 1839-1942_. Visualizing Cultures (MIT, 2010), [link](https://visualizingcultures.mit.edu/opium_wars_01/ow1_essay02.html)')
st.write(lin_text)
st.write(source_information)
submit_button_1 = st.form_submit_button(label='Analyze Source. Please be patient - this will take a up to 30 seconds.')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
k_shot = "Step 1 - Contextualization: The Primary Source is an American political campaign song popularized in 1890, and published by a Nebraska newspaper known as the Farmer's Alliance. The song reflects the historical period of America's Gilded Age, a time of great economic growth and prosperity. However, this prosperity was not evenly distributed, and many Americans were left behind. The song speaks to this inequality, with the 'hayseed' protagonist being oppressed by wealthy interests. This source provides insights into the larger historic events of the Gilded Age, including the rise of monopolies and the power of political bosses. It also offers insight into the ideologies of the time, including populism and progressivism. \n\nStep 2 - Purpose: The purpose of the Primary Source is to offer a populist critique of the Gilded Age status quo. The song argues that the rich are oppressing the poor, and that this needs to change. It calls for a return to more egalitarian values, and for Americans to stand up against the powerful interests that are keeping them down. \n\nStep 3 - Audience: The intended audience of the Primary Source is working-class Americans who feel left behind by the country's economic success. The song speaks to their situation, and offers a message of hope that things can change. It is also meant to inspire them to take action against the wealthy interests that are oppressing them. \n\nStep 4 - Historiographical Interpretation: Different historians would interpret this source differently, depending on their historiographical school of thought. For example, Marxist historians would focus on the class conflict inherent in the song, and see it as reflective of the wider struggle between workers and capitalists during the Gilded Age. Postcolonial historians might focus on the hayseed protagonist's position as an outsider within American society, and use the song to explore issues of race and ethnicity during the period. Gender historians might focus on the fact that the hayseed is male, and use the song to explore issues of masculinity during the Gilded Age."
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
#primary_source_analysis = prompt + "\n" + historical_method + histriography_options + "\n\n" + instructions + k_shot + "/nLet's try another." + "/nPrimary Source: " + "\n" + household_text + "\n" + source_information + "\n" + instructions + "\n"
primary_source_analysis = prompt + "\n" + historical_method + histriography_options + "\n\n" + instructions + k_shot + "\nLet's try another." + "\nPrimary Source: " + "\n" + lin_text + "\n" + source_information + "\n" + instructions
response_length = 1500
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model="text-davinci-002",
prompt=primary_source_analysis,
temperature=0,
user="0",
max_tokens=response_length,
frequency_penalty=0.0,
presence_penalty=2.0)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.header("GPT-3's Analysis:")
st.write(output)
#st.write("\n\n\n\n")
#st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.header("Here is the prompt fed to GPT-3 for analyzing this source:")
st.write(prompt)
st.write(historical_method + histriography_options)
st.write(instructions)
#st.write("This prompt also uses a single-shot example of another primary source analysis to guide GPT-3's generation.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
st.subheader('Please click on the **Rank Resonses** button at the top of this screen to rank this reply for future improvement.')
def total_output_collection():
d1 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('total_outputs_primary_sources')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('primary_source_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('primary_source_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
with st.sidebar.form(key ='Form2'):
st.title("Primary Source Collection")
st.write("The following experiment pairs topically similar primary sources, one drawn from the A.P. curriculum and one from outside it. You are invited to rank GPT-3's responses. The rankings will help provide research data on GPT-3's performance.")
field_choice = st.radio("Choose a Primary Source:", ['"The Hayseed" (U.S. History)', '"Women in the Farmers Alliance" (U.S. History)', '"Book of Household Management" (European History)', '"Practical Housekeeping" (European History)', 'A letter from Lin Zexu to Queen Victoria (World History)', 'Lin Zexu Burns the Opium (World History)'])
button2 = st.form_submit_button("Click here to load the Primary Source.")
if field_choice == '"The Hayseed" (U.S. History)':
field_choice = hayseed_question()
elif field_choice == '"Women in the Farmers Alliance" (U.S. History)':
field_choice = mary_lease()
elif field_choice == '"Book of Household Management" (European History)':
field_choice = household_question()
elif field_choice == '"Practical Housekeeping" (European History)':
field_choice = practical_housekeeping()
elif field_choice == 'A letter from Lin Zexu to Queen Victoria (World History)':
field_choice = lin_zexu_1()
elif field_choice == 'Lin Zexu Burns the Opium (World History)':
field_choice = len_letter_2()
st.write("")
#with st.sidebar:
#st.write('Explore more about the life and times of Francis Bacon:')
#st.write('[Six Degrees of Francis Bacon](http://www.sixdegreesoffrancisbacon.com/), Carnegie Mellon University')
#st.write('[Jürgen Klein and Guido Giglioni, "Francis Bacon", The Stanford Encyclopedia of Philosophy](https://plato.stanford.edu/entries/francis-bacon/)')
#st.write('[Richard S. Westfall, "Francis Bacon", The Galileo Project, Rice University](http://galileo.rice.edu/Catalog/NewFiles/bacon.html)')
#pygsheets credentials for Google Sheets API
#with col2:
#bacon_pic = st.image(image='./bacon.png', caption="Portrait of Francis Bacon. National Portrait Gallery, London.")
def button_two():
#Rank Bacon_bot Responses
with col1:
st.write("Rank GPT-3's Interpretations:")
sh1 = gc.open('primary_source_temp')
wks1 = sh1[0]
question = wks1.get_value('A2')
histriography_options = wks1.get_value('B2')
output = wks1.get_value('C2')
st.subheader('Primary Source:')
st.write(question)
st.subheader("GPT-3's Interpretation")
st.write(output)
with st.form('form2'):
context_score = st.slider("How would you rank the Context?", 0, 10, key='context')
purpose_score = st.slider("How would you rank the Purpose?", 0, 10, key='purpose')
audience_rank = st.slider("How would you rank the Audience?", 0, 10, key='audience')
histriography_rank = st.slider("How would you rank the Historiographical Interpretation?", 0,10, key='historio')
st.write("Transmitting the rankings takes a few moments. Thank you for your patience.")
submit_button_2 = st.form_submit_button(label='Submit Ranking')
if submit_button_2:
sh1 = gc.open('primary_source_temp')
wks1 = sh1[0]
df = wks1.get_as_df(has_header=True, index_column=None, start='A1', end=('K2'), numerize=False)
p_source = df['question'][0]
p_output = df['output'][0]
output_label = df['filter_ranking'][0]
now = dt.now()
ranking_score = [context_score, purpose_score, audience_rank, histriography_rank]
ranking_average = mean(ranking_score)
def ranking_collection():
d4 = {'question':[question], 'histriographies':[histriography_options], 'output':[output], 'filter_ranking':[output_label], 'context_score':[context_score], 'purpose_score':[purpose_score],'audience_rank':[audience_rank], 'histriography_rank':[histriography_rank], 'overall_ranking':[ranking_average], 'date':[now]}
df4 = pd.DataFrame(data=d4, index=None)
sh4 = gc.open('primary_source_analyzer_rankings')
wks4 = sh4[0]
cells4 = wks4.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row4 = len(cells4)
wks4.set_dataframe(df4,(end_row4+1,1), copy_head=False, extend=True)
ranking_collection()
st.write('Rankings recorded - thank you! Feel free to submit another source for GPT-3.')
with col1:
st.write("Select the 'Load Primary Sources' button to explore how GPT-3 simulates analysis of primary sources. Select the 'Rank Responses' button to note your impressions of these interpretations.")
pages = {
0 : button_one,
1 : button_two,
}
if "current" not in st.session_state:
st.session_state.current = None
if st.button("Load Primary Sources"):
st.session_state.current = 0
if st.button("Rank Responses"):
st.session_state.current = 1
if st.session_state.current != None:
pages[st.session_state.current]()
| [
"<|endoftext|>PLACEHOLDER\n--\nLabel:",
"You are an AI historian specializing in primary source analysis and historiographical interpretation. When given a Primary Source, you will provide a detailed and substantive analysis of that source based on the Historical Method and Source Information below."
] |
2024-01-10 | Dr-Hutchinson/What-Do-AIs-Know-About-History | pages~BaconBot_1_8.py | import os
import openai
import streamlit as st
from datetime import datetime as dt
import pandas as pd
from numpy import mean
import streamlit_authenticator as stauth
import pygsheets
from google.oauth2 import service_account
import ssl
#st.set_page_config(
#page_title='Simulated Conversations with Francis Bacon',
#layout='wide',
#page_icon='🔍'
#)
def app():
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
st.title("Can an AI Simulate a Historical Worldview?")
st.header("BaconBot: An AI Imitation of Francis Bacon")
st.subheader("Public Demo")
col1, col2 = st.columns([3.0,3.5])
def button_one():
st.write("The following version of BaconBot permits users to pose a range of questions about the life and times of [Francis Bacon](https://en.wikipedia.org/wiki/Francis_Bacon) to a fine-tuned model of GPT-3.")
def bio_questions():
with col1:
with st.form('Biographical Questions'):
prompt_choice_freeform = "I am a representation of Francis Bacon, a key figure in the history of early modern Britain. You can ask me biographical questions about Francis Bacon's life and I will reply in the style of Bacon's Novum Organum."
model_choice = st.radio("Select AI model. GPT-3 is the general purpose AI model. The Novum Organum model is a GPT-3 fine-tuned on Bacon's classic work of scientific theory.", ["GPT-3: Davinci Engine model", "Novum Organum model"])
#prompt_choice = st.radio('Select Prompt. This will guide the frame of reference in which GPT-3 will respond.', [prompt_choice_freeform, prompt_choice_rationale])
temperature_choice = st.radio("Select the temperature of the AI's response. Low tends to produce more factually correct responses, but with greater repetition. High tends to produce more creative responses but which are less coherent.", ["Low", "Medium", "High"])
#with st.expander("Advanced Settings:"):
#prompt_booster = st.radio("Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.", ["None", "Question Booster", "Rationale Booster", "Haiku Booster"])
question = st.radio("Questions concerning Bacon's life and career.", ["Describe your youth and family.", "How did your education shape your later career?", "How would you describe your career?", "What do you think was the most significant achievement of your time as Lord Chancellor?", "What role did you play in the English exploration of the Americas?"])
submit_button_1 = st.form_submit_button(label='Submit Question')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
if model_choice == "GPT-3: Davinci Engine":
model_select = 'text-davinci-003'
else:
model_select = st.secrets['novum_organum_model']
if temperature_choice == "Low":
temperature_dial = 0
elif temperature_choice == "Medium":
temperature_dial = .5
else:
temperature_dial = 1
prompt_boost_question_1 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_question_2 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#prompt_boost_question_3 = "Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?"
#prompt_boost_question_4 = "Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction."
prompt_text = prompt_choice_freeform + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
#prompt_text = prompt_choice + "\n\nQ:"
response_length = 150
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model=model_select,
prompt= prompt_text + " " + question,
temperature=temperature_dial,
user="0",
max_tokens=response_length)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.write("Bacon's Response:")
st.write(output)
st.write("\n\n\n\n")
st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
def total_output_collection():
d1 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('bacon_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('bacon_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('bacon_outputs_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def philosophy_questions():
with col1:
with st.form('Philosophy of Science Questions'):
prompt_choice_freeform = "I am a representation of Francis Bacon, a key figure in the Scientific Revolution. You can ask me questions about the philosophy of science and I will answer in the style of Bacon's Novum Organum."
#prompt_choice_rationale = "I am an AI representation of Francis Bacon, a key figure in the early modern period. I will reply to your questions, and provide a historical rationale for my response."
#prompt_choice_haiku = "I am Lord Francis Bacon, a key figure in reign of King James I of England. I will answer your questions in the form of a haiku in a 5-7-5 syllabic structure."
model_choice = st.radio("Select AI model. GPT-3 is the general purpose AI model. The Novum Organum model is a GPT-3 fine-tuned on Bacon's classic work of scientific theory.", ["GPT-3: Davinci Engine model", "Novum Organum model"])
#prompt_choice = st.radio('Select Prompt. This will guide the frame of reference in which GPT-3 will respond.', [prompt_choice_freeform, prompt_choice_rationale])
#with st.expander("Advanced Settings:"):
#prompt_booster = st.radio("Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.", ["None", "Question Booster", "Rationale Booster", "Haiku Booster"])
temperature_choice = st.radio("Select the temperature of the AI's response. Low tends to produce more factually correct responses, but with greater repetition. High tends to produce more creative responses but which are less coherent.", ["Low", "Medium", "High"])
question = st.radio("Questions concerning Bacon's philosophy of science.", ["Which thinkers influenced your approach to science?", "What contributions did you make in the field of science?", "How much influence should the wisdom of the ancients have in guiding scientific inquiry?", "What is the proper method for scientific discovery?", "Is alchemy a legitimate form of science?", "What are the major arguments of your work the Novum Organum?"])
submit_button_1 = st.form_submit_button(label='Submit Question')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
if model_choice == "GPT-3: Davinci Engine":
model_select = 'text-davinci-003'
else:
model_select = st.secrets['novum_organum_model']
#prompt_boost_haiku_1 = "Compose a haiku on the events in London during the spring of 1610."
#prompt_boost_haiku_2 = "Haiku: The taverns are full of talk, Of the moons of Jupiter and of the Prince’s ship."
#prompt_boost_haiku_3 = "Compose a haiku in the style of Basho."
#prompt_boost_haiku_4 = "Haiku: On a withered branch, A crow has alighted, Nightfall in autumn."
#prompt_boost_rationale_1 = "Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?"
#prompt_boost_rationale_2 = "Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction."
#prompt_boost_rationale_3 = "Rationale: The critique of an ancient authority in medicine on the basis of his inadequate method is keeping with an important theme in Novum Organum and Bacon’s larger scientific philosophy. The specific mention of the “Idols of the Marketplace” is an important concept in the Novum Organum."
#prompt_boost_rationale_4 = "Question: What do you see as the hallmarks of the New Science?"
#prompt_boost_rationale_5 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#prompt_boost_rationale_6 = "Rationale: The generated response outlines one of the major contributions of Francis Bacon to the philosophy of science, what would become the modern scientific method."
prompt_boost_question_1 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_question_2 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#if prompt_booster == "None":
#prompt_text = prompt_choice + "\n\nQ:"
#elif prompt_booster == "Rationale Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_rationale_1 + "\n\n" + prompt_boost_rationale_2 + "\n\n" + prompt_boost_rationale_3 + "\n\n" + prompt_boost_rationale_4 + "\n\n" + prompt_boost_rationale_5 + "\n\n" + prompt_boost_rationale_6 + "\n\n" + "Question:"
#elif prompt_booster == "Haiku Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_haiku_1 + "\n\n" + prompt_boost_haiku_2 + "\n\n" + prompt_boost_haiku_3 + "\n\n" + prompt_boost_haiku_4
#else:
#prompt_text = prompt_choice + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
prompt_text = prompt_choice_freeform + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
#prompt_text = prompt_choice + "\n\nQ:"
if temperature_choice == "Low":
temperature_dial = 0
elif temperature_choice == "Medium":
temperature_dial = .5
else:
temperature_dial = 1
response_length = 150
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model=model_select,
prompt= prompt_text + " " + question,
temperature=temperature_dial,
user="0",
max_tokens=response_length)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.write("Bacon's Response:")
st.write(output)
st.write("\n\n\n\n")
st.subheader('As Lord Bacon says, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed. Consider adjusting the question or temperature, and ask again.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
def total_output_collection():
d1 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('bacon_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('bacon_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('bacon_outputs_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def court_questions():
with col1:
with st.form('Questions about Queen Elizabeth & King James'):
prompt_choice_freeform = "I am a representation of Francis Bacon, a key figure in reigns of Queen Elizabeth I and King James I. You can ask me questions about their reigns and I will answer in the style of Bacon's Novum Organum."
#prompt_choice_rationale = "I am an AI representation of Francis Bacon, a key figure in the early modern period. I will reply to your questions, and provide a historical rationale for my response."
#prompt_choice_haiku = "I am Lord Francis Bacon, a key figure in reign of King James I of England. I will answer your questions in the form of a haiku in a 5-7-5 syllabic structure."
model_choice = st.radio("Select AI model. GPT-3 is the general purpose AI model. The Novum Organum model is a GPT-3 fine-tuned on Bacon's classic work of scientific theory.", ["GPT-3: Davinci Engine model", "Novum Organum model"])
#prompt_choice = st.radio('Select Prompt. This will guide the frame of reference in which GPT-3 will respond.', [prompt_choice_freeform, prompt_choice_rationale])
#with st.expander("Advanced Settings:"):
#prompt_booster = st.radio("Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.", ["None", "Question Booster", "Rationale Booster", "Haiku Booster"])
temperature_choice = st.radio("Select the temperature of the AI's response. Low tends to produce more factually correct responses, but with greater repetition. High tends to produce more creative responses but which are less coherent.", ["Low", "Medium", "High"])
question = st.radio("Questions concerning the reigns of Queen Elizabeth I and King James I.", ["What similarities and differences did you observe in the courts of Queen Elizabeth and King James?", "What were some of the defining moments of Elizabeth I's reign?", "What were some of the defining moments of James I's reign?", "What differences marked Elizabeth and James's approach to foriegn policy?", "How did the personaliies of Elizabeth and James shape their reigns?"])
submit_button_1 = st.form_submit_button(label='Submit Question')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
if model_choice == "GPT-3: Davinci Engine":
model_select = 'text-davinci-003'
else:
model_select = st.secrets['novum_organum_model']
#prompt_boost_haiku_1 = "Compose a haiku on the events in London during the spring of 1610."
#prompt_boost_haiku_2 = "Haiku: The taverns are full of talk, Of the moons of Jupiter and of the Prince’s ship."
#prompt_boost_haiku_3 = "Compose a haiku in the style of Basho."
#prompt_boost_haiku_4 = "Haiku: On a withered branch, A crow has alighted, Nightfall in autumn."
#prompt_boost_rationale_1 = "Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?"
#prompt_boost_rationale_2 = "Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction."
#prompt_boost_rationale_3 = "Rationale: The critique of an ancient authority in medicine on the basis of his inadequate method is keeping with an important theme in Novum Organum and Bacon’s larger scientific philosophy. The specific mention of the “Idols of the Marketplace” is an important concept in the Novum Organum."
#prompt_boost_rationale_4 = "Question: What do you see as the hallmarks of the New Science?"
#prompt_boost_rationale_5 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#prompt_boost_rationale_6 = "Rationale: The generated response outlines one of the major contributions of Francis Bacon to the philosophy of science, what would become the modern scientific method."
prompt_boost_question_1 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_question_2 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#if prompt_booster == "None":
#prompt_text = prompt_choice + "\n\nQ:"
#elif prompt_booster == "Rationale Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_rationale_1 + "\n\n" + prompt_boost_rationale_2 + "\n\n" + prompt_boost_rationale_3 + "\n\n" + prompt_boost_rationale_4 + "\n\n" + prompt_boost_rationale_5 + "\n\n" + prompt_boost_rationale_6 + "\n\n" + "Question:"
#elif prompt_booster == "Haiku Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_haiku_1 + "\n\n" + prompt_boost_haiku_2 + "\n\n" + prompt_boost_haiku_3 + "\n\n" + prompt_boost_haiku_4
#else:
#prompt_text = prompt_choice + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
prompt_text = prompt_choice_freeform + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
#prompt_text = prompt_choice + "\n\nQ:"
if temperature_choice == "Low":
temperature_dial = 0
elif temperature_choice == "Medium":
temperature_dial = .5
else:
temperature_dial = 1
response_length = 150
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model=model_select,
prompt= prompt_text + " " + question,
temperature=temperature_dial,
user="0",
max_tokens=response_length)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.write("Bacon's Response:")
st.write(output)
st.write("\n\n\n\n")
st.subheader('As Lord Bacon once said, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed. Consider adjusting the question or temperature, and ask again.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
def total_output_collection():
d1 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('bacon_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('bacon_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('bacon_outputs_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
def events_questions():
with col1:
with st.form('Questions about Queen Elizabeth & King James'):
prompt_choice_freeform = "I am a representation of Francis Bacon, a key figure in English history. You can ask me questions about the period in which I lived, and I will answer in the style of Bacon's Novum Organum."
#prompt_choice_rationale = "I am an AI representation of Francis Bacon, a key figure in the early modern period. I will reply to your questions, and provide a historical rationale for my response."
#prompt_choice_haiku = "I am Lord Francis Bacon, a key figure in reign of King James I of England. I will answer your questions in the form of a haiku in a 5-7-5 syllabic structure."
model_choice = st.radio("Select AI model. GPT-3 is the general purpose AI model. The Novum Organum model is a GPT-3 fine-tuned on Bacon's classic work of scientific theory.", ["GPT-3: Davinci Engine model", "Novum Organum model"])
#prompt_choice = st.radio('Select Prompt. This will guide the frame of reference in which GPT-3 will respond.', [prompt_choice_freeform, prompt_choice_rationale])
#with st.expander("Advanced Settings:"):
#prompt_booster = st.radio("Zero Shot vs. Few Shot Prompting. If you chose one of the prompt boosters below, the AI model will be given pre-selected examples of the type of prompt you want to submit, increasing the chance of a better reply. However, this will also increase the chance the reply will repeat the booster choice. Choose 'None' to field questions without a booster.", ["None", "Question Booster", "Rationale Booster", "Haiku Booster"])
temperature_choice = st.radio("Select the temperature of the AI's response. Low tends to produce more factually correct responses, but with greater repetition. High tends to produce more creative responses but which are less coherent.", ["Low", "Medium", "High"])
question = st.radio("Questions concerning notable events during Bacon's lifetime.", ["Which performances of William Shakespeare recieved the most notable attention among your contemporaries?", "How would you evaluate the initial founding of Jamestown in Virginia?", "Describe the public mood during the Spanish Armada's approach towards England.", "In 1620, a group known known as the Pilgrims departed England for the New World. What did you make of them and the purposes of their journey?"])
submit_button_1 = st.form_submit_button(label='Submit Question')
#with st.expander("Test:"):
#test = st.radio("Test",["test1", "test2"])
if submit_button_1:
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
now = dt.now()
#model selection for OpenAI query
if model_choice == "GPT-3: Davinci Engine":
model_select = 'text-davinci-003'
else:
model_select = st.secrets['novum_organum_model']
#prompt_boost_haiku_1 = "Compose a haiku on the events in London during the spring of 1610."
#prompt_boost_haiku_2 = "Haiku: The taverns are full of talk, Of the moons of Jupiter and of the Prince’s ship."
#prompt_boost_haiku_3 = "Compose a haiku in the style of Basho."
#prompt_boost_haiku_4 = "Haiku: On a withered branch, A crow has alighted, Nightfall in autumn."
#prompt_boost_rationale_1 = "Question: Could you describe your impression of the scientific investigation of figures from antiquity like Galen?"
#prompt_boost_rationale_2 = "Answer: Galen was a great man, but he had not the advantage of a good method. His idols of the market place, as I have called them, were his errors and fancies, which have misled some and perverted others. He trusted too much to authority and to received systems, and too little to the examination of particulars. He was a practitioner and not a philosopher, and was therefore more intent upon practice than upon theory; and yet he was not a mere empiric. He was a great observer, and a man of clear sense and great experience, but he was a slave to the logic and philosophy of his age, and therefore was very deficient in the true principles of induction."
#prompt_boost_rationale_3 = "Rationale: The critique of an ancient authority in medicine on the basis of his inadequate method is keeping with an important theme in Novum Organum and Bacon’s larger scientific philosophy. The specific mention of the “Idols of the Marketplace” is an important concept in the Novum Organum."
#prompt_boost_rationale_4 = "Question: What do you see as the hallmarks of the New Science?"
#prompt_boost_rationale_5 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#prompt_boost_rationale_6 = "Rationale: The generated response outlines one of the major contributions of Francis Bacon to the philosophy of science, what would become the modern scientific method."
prompt_boost_question_1 = "Question: What do you see as the hallmarks of the New Science?"
prompt_boost_question_2 = "Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
#if prompt_booster == "None":
#prompt_text = prompt_choice + "\n\nQ:"
#elif prompt_booster == "Rationale Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_rationale_1 + "\n\n" + prompt_boost_rationale_2 + "\n\n" + prompt_boost_rationale_3 + "\n\n" + prompt_boost_rationale_4 + "\n\n" + prompt_boost_rationale_5 + "\n\n" + prompt_boost_rationale_6 + "\n\n" + "Question:"
#elif prompt_booster == "Haiku Booster":
#prompt_text = prompt_choice + "\n\n" + prompt_boost_haiku_1 + "\n\n" + prompt_boost_haiku_2 + "\n\n" + prompt_boost_haiku_3 + "\n\n" + prompt_boost_haiku_4
#else:
#prompt_text = prompt_choice + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
prompt_text = prompt_choice_freeform + "\n\n" + prompt_boost_question_1 + "\n\n" + prompt_boost_question_2 + "\n\n" + "Question:"
#prompt_text = prompt_choice + "\n\nQ:"
if temperature_choice == "Low":
temperature_dial = 0
elif temperature_choice == "Medium":
temperature_dial = .5
else:
temperature_dial = 1
response_length = 150
openai.api_key = os.getenv("OPENAI_API_KEY")
summon = openai.Completion.create(
model=model_select,
prompt= prompt_text + " " + question,
temperature=temperature_dial,
user="0",
max_tokens=response_length)
response_json = len(summon["choices"])
for item in range(response_json):
output = summon['choices'][item]['text']
response = openai.Completion.create(
engine="content-filter-alpha",
prompt= "<|endoftext|>"+output+"\n--\nLabel:",
temperature=0,
max_tokens=1,
user="0",
top_p=0,
logprobs=10)
output_label = response["choices"][0]["text"]
# OpenAI Content Filter code - comments in this section from OpenAI documentation: https://beta.openai.com/docs/engines/content-filter
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
def filter_function():
output_label = response["choices"][0]["text"]
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
# filter or display OpenAI outputs, record outputs to Google Sheets API
if int(filter_function()) < 2:
st.write("Bacon's Response:")
st.write(output)
st.write("\n\n\n\n")
st.subheader('As Lord Bacon once said, "Truth will sooner come out from error than from confusion." Please click on the Rank Bacon button above to rank this reply for future improvement.')
elif int(filter_function()) == 2:
st.write("The OpenAI content filter ranks Bacon's response as potentially offensive. Per OpenAI's use policies, potentially offensive responses will not be displayed. Consider adjusting the question or temperature, and ask again.")
st.write("\n\n\n\n")
st.write("OpenAI's Content Filter Ranking: " + output_label)
def total_output_collection():
d1 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('bacon_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
def output_collection_filtered():
d2 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df2 = pd.DataFrame(data=d2, index=None)
sh2 = gc.open('bacon_outputs_filtered')
wks2 = sh2[0]
cells2 = wks2.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row2 = len(cells2)
wks2.set_dataframe(df2,(end_row2+1,1), copy_head=False, extend=True)
def temp_output_collection():
d3 = {'user':["0"], 'user_id':["0"], 'model':[model_choice], 'prompt':[prompt_choice_freeform], 'prompt_boost':[prompt_boost_question_1 + "\n\n" + prompt_boost_question_2], 'question':[question], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'date':[now]}
df3 = pd.DataFrame(data=d3, index=None)
sh3 = gc.open('bacon_outputs_temp')
wks3 = sh3[0]
wks3.set_dataframe(df3,(1,1))
if int(filter_function()) == 2:
output_collection_filtered()
total_output_collection()
else:
temp_output_collection()
total_output_collection()
with st.sidebar.form(key ='Form2'):
field_choice = st.radio("Choose a Question Category:", ["Biographical", "Philosophy of Science", "The Courts of Queen Elizabeth & King James", "Major Events during Bacon's Life"])
button2 = st.form_submit_button("Click here to load question bank.")
if field_choice == "Biographical":
field_choice = bio_questions()
elif field_choice == "Philosophy of Science":
field_choice = philosophy_questions()
elif field_choice == "The Courts of Queen Elizabeth & King James":
field_choice = court_questions()
elif field_choice == "Major Events during Bacon's Life":
field_choice = events_questions()
with st.sidebar:
st.write('Explore more about the life and times of Francis Bacon:')
st.write('[Six Degrees of Francis Bacon](http://www.sixdegreesoffrancisbacon.com/), Carnegie Mellon University')
st.write('[Jürgen Klein and Guido Giglioni, "Francis Bacon", The Stanford Encyclopedia of Philosophy](https://plato.stanford.edu/entries/francis-bacon/)')
st.write('[Richard S. Westfall, "Francis Bacon", The Galileo Project, Rice University](http://galileo.rice.edu/Catalog/NewFiles/bacon.html)')
#pygsheets credentials for Google Sheets API
with col2:
bacon_pic = st.image(image='./bacon.png', caption="Portrait of Francis Bacon. National Portrait Gallery, London.")
def button_two():
#Rank Bacon_bot Responses
with col1:
st.write("Rank Bacon's Reply:")
sh1 = gc.open('bacon_outputs_temp')
wks1 = sh1[0]
submission_text = wks1.get_value('F2')
output = wks1.get_value('G2')
prompt_text = wks1.get_value('D2')
st.subheader('Prompt:')
st.write(prompt_text)
st.subheader('Your Question')
st.write(submission_text)
st.subheader("Bacon's Reply:")
st.write(output)
with st.form('form2'):
bacon_score = st.slider("How much does the reply resemble the style of Francis Bacon?", 0, 10, key='bacon')
worldview_score = st.slider("Is the reply consistent with Bacon's worldview?", 0, 10, key='worldview')
accuracy_rank = st.slider("Does the reply appear factually accurate?", 0, 10, key='accuracy')
coherence_rank = st.slider("How coherent and well-written is the reply?", 0,10, key='coherence')
st.write("Transmitting the rankings takes a few moments. Thank you for your patience.")
submit_button_2 = st.form_submit_button(label='Submit Ranking')
if submit_button_2:
sh1 = gc.open('bacon_outputs_temp')
wks1 = sh1[0]
df = wks1.get_as_df(has_header=True, index_column=None, start='A1', end=('K2'), numerize=False)
name = df['user'][0]
user_id = df['user_id'][0]
model_choice = df['model'][0]
prompt_choice = df['prompt'][0]
prompt_boost = df['prompt_boost'][0]
submission_text = df['question'][0]
output = df['output'][0]
temperature_dial = df['temperature'][0]
response_length = df['response_length'][0]
output_label = df['filter_ranking'][0]
now = dt.now()
ranking_score = [bacon_score, worldview_score, accuracy_rank, coherence_rank]
ranking_average = mean(ranking_score)
def ranking_collection():
d4 = {'user':["0"], 'user_id':[user_id],'model':[model_choice], 'prompt':[prompt_choice], 'prompt_boost':[prompt_boost],'question':[submission_text], 'output':[output], 'temperature':[temperature_dial], 'response_length':[response_length], 'filter_ranking':[output_label], 'bacon_score':[bacon_score], 'worldview_score':[worldview_score],'accuracy_rank':[accuracy_rank], 'coherence':[coherence_rank], 'overall_ranking':[ranking_average], 'date':[now]}
df4 = pd.DataFrame(data=d4, index=None)
sh4 = gc.open('bacon_rankings')
wks4 = sh4[0]
cells4 = wks4.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row4 = len(cells4)
wks4.set_dataframe(df4,(end_row4+1,1), copy_head=False, extend=True)
ranking_collection()
st.write('Rankings recorded - thank you! Feel free to continue your conversation with Francis Bacon.')
with col2:
bacon_pic = st.image(image='./bacon.png', caption="Portrait of Francis Bacon. National Portrait Gallery, London.")
with st.sidebar:
st.write('Explore more about the life and times of Francis Bacon:')
st.write('[Six Degrees of Francis Bacon](http://www.sixdegreesoffrancisbacon.com/), Carnegie Mellon University')
st.write('[Jürgen Klein and Guido Giglioni, "Francis Bacon", The Stanford Encyclopedia of Philosophy](https://plato.stanford.edu/entries/francis-bacon/)')
st.write('[Richard S. Westfall, "Francis Bacon", The Galileo Project, Rice University](http://galileo.rice.edu/Catalog/NewFiles/bacon.html)')
with col1:
st.write("Select the 'Ask Bacon' button to ask the AI questions. Select 'Rank Bacon' to note your impressions of its responses.")
pages = {
0 : button_one,
1 : button_two,
}
if "current" not in st.session_state:
st.session_state.current = None
if st.button("Ask Bacon"):
st.session_state.current = 0
if st.button("Rank Bacon"):
st.session_state.current = 1
if st.session_state.current != None:
pages[st.session_state.current]()
| [
"Question: What do you see as the hallmarks of the New Science?",
"I am a representation of Francis Bacon, a key figure in English history. You can ask me questions about the period in which I lived, and I will answer in the style of Bacon's Novum Organum.",
"I am a representation of Francis Bacon, a key figure in reigns of Queen Elizabeth I and King James I. You can ask me questions about their reigns and I will answer in the style of Bacon's Novum Organum.",
"I am a representation of Francis Bacon, a key figure in the history of early modern Britain. You can ask me biographical questions about Francis Bacon's life and I will reply in the style of Bacon's Novum Organum.",
"PLACEHOLDER PLACEHOLDER",
"<|endoftext|>PLACEHOLDER\n--\nLabel:",
"PLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER\n\nQuestion:",
"P",
"I am a representation of Francis Bacon, a key figure in the Scientific Revolution. You can ask me questions about the philosophy of science and I will answer in the style of Bacon's Novum Organum.",
"Answer: The New Science (as I would like to call it, so as not to offend the old) has two main indications. The first is to discover the occasions and causes of nature’s productions and actions; the second, by careful and well-ordered experiments (such as are derived from the light of nature), to acquire a competent knowledge of the power and office of each production and action."
] |
2024-01-10 | asurasdevaas/langchain-pinecone-summary | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
loader = UnstructuredURLLoader(urls=[url])
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 200-250 words:
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 200-250 words:\n \n {text}\n\n "
] |
2024-01-10 | jasonph2/polish-passion-project | server~aigenerator.py | from config import OPEN_AI_KEY
from openai import OpenAI
import string
import random
import ast
def with_turbo(conn):
try:
with conn.cursor() as cursor:
# get all the words the user is familiar with
sql = "SELECT translated_word FROM db.words WHERE familiarity = 5"
cursor.execute(sql)
all = cursor.fetchall()
translator = str.maketrans('', '', string.punctuation)
individual_words = [word.translate(translator) for sentence in all for word in sentence['translated_word'].split()]
individual_words = list(filter(lambda x: x.strip() != '', individual_words))
random.shuffle(individual_words)
individual_words = set(individual_words)
print(individual_words)
# make an OpenAI request
client = OpenAI(api_key=OPEN_AI_KEY)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You will be given a list of words or phrases in an arbitrary language. Use these words and only these words to formulate sentences, phrases, or questions in this language. Do not give the English translations. Only use the words given. Give your message content with each phrase being seperated by a new-line character"},
{"role": "user", "content": str(individual_words)}
]
)
# print(completion.choices[0].message.content)
generated_content = completion.choices[0].message.content
print(generated_content)
# new_translated_list = generated_content.split("\n")
# new_translated_list = list(filter(lambda x: x != '', new_translated_list))
# print(new_translated_list)
generated_content = generated_content.split("\n")
print(generated_content)
return generated_content
# return ['Teraz jestem gotowy.', 'Lubię uśmiech.', 'Moja siostra nazywa się Lisa.', 'Dzisiaj jestem bardzo podekscytowany.', 'Jeśli potrzebuję pomocy, mogę iść do biblioteki.', 'Chcę iść na imprezę.', 'Moje śniadanie dzisiaj jest bardzo smaczne.', 'Mój stary brat gra w golfa.', 'Idę na mszę.', 'Jason jest studentem.', 'Moja mama robi pyszny obiad.', 'Jak się nazywasz?', 'Czy możesz mi pomóc?', 'Dlaczego jesteś zmęczony?', 'Czy ty jesteś głodny?', 'Czy mogę kupić butelkę wody?', 'Co się dzieje później?', 'Dziękuję za pomoc.', 'Zapomniałem, co chciałem powiedzieć.', 'Czy rodzina jest dla ciebie ważna?', 'Czy masz brata?', 'Potrzebuję kupić makaron.', 'Czytać jest moją ulubioną czynnością.', 'Uczę się angielskiego.', 'Wczoraj byłem w bibliotece.', 'Jak się masz?', 'Czy ty jesteś gotowy na obiad?', 'Czy mogę iść do domu?', 'Kocham cię bardzo.', 'Co jest na stole?', 'Kiedy wracasz do domu?', 'Czy wiesz, gdzie jest mój telefon?', 'Czy mam tylko pięć minut?', 'Rozumiem, że byłeś zajęty.', 'Czy mogę pożyczyć twój długopis?', 'Czy możemy wrócić o osiem?', 'Czy to jest twój telefon?', 'Czy wiesz, gdzie jest moja rodzina?', 'Czy Masz czas na grę w golfa?', 'Czy masz zmęczony po całym dniu?', 'Czy mogę kupić ci piwo?', 'Czy ty mnie znasz?', 'Czy tam jest coś dla mnie w kuchni?', 'Jak wygląda twój tata?', 'Czy masz rodzeństwo?', 'Czy możemy kupić ser?', 'Czy masz coś do powiedzenia?', 'Czy mogę iść na dworzec?', 'Czy mogę iść do stołu?', 'Czy masz czas na naukę dzisiaj?', 'Czy wiesz, gdzie możemy kupić makaron?', 'Czy możemy pójść na zakupy?', 'Czy masz kosz na śmieci?', 'Czy możemy kupić nowy samochód?', 'Czy jesteś gotowy na urodziny?', 'Czy możemy iść na basen?', 'Czy możemy iść na spacer?', 'Czy masz moje zdjęcie?', 'Czy jesteś gotowy na kolację?', 'Czy możemy pić piwo?', 'Czy możemy iść na lunch?', 'Czy możemy iść do parku?', 'Czy możemy iść na obiad?', 'Czy możemy iść na plażę?', 'Czy możemy iść do teatru?', 'Czy to jest bardzo ważne?', 'Czy jesteś gotowy na kino?', 'Czy masz czystą wodę?', 'Czy możemy iść na pizze?', 'Czy mogę iść do twojego domu?', 'Czy masz kod do mojego telefonu?', 'Czy mogę iść na mokro?', 'Czy jesteś zmęczony po całym dniu?', 'Czy masz siedzenie dla mnie?', 'Czy mogę iść na koncert?', 'Czy mogę iść na spacer z tobą?', 'Czy dostanę deser po obiedzie?', 'Czy mogę iść na randkę z tobą?', 'Czy mogę iść na spotkanie?', 'Czy mogę iść na zakupy z tobą?', 'Czy mogę iść na plażę z tobą?', 'Czy masz czas na śniadanie?', 'Czy masz chociaż trochę wódki?', 'Czy mogę iść na cmentarz?', 'Czy mogę iść na zawody?', 'Czy mogę iść na trening?', 'Czy mogę iść do kina?', 'Czy mogę iść na zawody?', 'Czy masz coś do powiedzenia?', 'Czy mogę iść do toalety?', 'Czy mogę iść na basen z tobą?', 'Czy mogę iść na plażę z tobą?', 'Czy mogę iść na spacer z tobą?', 'Czy masz ulubiony deser?', 'Czy masz głowę do meczu?', 'Czy masz czas na pronunciowanie?', 'Czy mogę iść na pewno?', 'Czy mogę iść na studia?', 'Czy mogę iść do sklepu?', 'Czy mogę iść na boisko?', 'Czy mogę iść na przerwę?', 'Czy mogę iść na kawę?', 'Czy mogę iść na kolację?', 'Czy możemy rozmawiać poza tym?', 'Czy możemy iść na spacer?', 'Czy mogę iść na randkę z tobą?', 'Czy mogę iść do domu z tobą?', 'Czy mogę iść na obiad z tobą?', 'Czy mogę iść na drinka z tobą?', 'Czy masz wiadomości o mojej rodzinie?', 'Czy masz jakieś pytania do mnie?', 'Czy możemy się spotkać jutro?', 'Czy mogę pożyczyć twój długopis?', 'Czy możemy być przyjacielami?', 'Czy mogę iść na lunch z tobą?', 'Czy mogę iść na wózek z tobą?', 'Czy można z tobą porozmawiać?', 'Czy mogę iść na powrót?', 'Czy możemy zostać przyjaciółmi?', 'Czy mogę naprawdę iść do domu?']
except Exception as e:
print({"message": f"Error: {str(e)}"})
| [
"You will be given a list of words or phrases in an arbitrary language. Use these words and only these words to formulate sentences, phrases, or questions in this language. Do not give the English translations. Only use the words given. Give your message content with each phrase being seperated by a new-line character"
] |
2024-01-10 | jaocode/open-teddy | open_teddy.py | from operator import truediv
import tts.tts_factory as tts_factory
from ai.openai_chatgpt import OpenAIChatGPT as AI_Engine
import os.path
import sounddevice as sd
import soundfile as sf
def speak(text):
print ("Synthesizing speech.")
audio_file = TTS.create_speech_audio (text)
print ("Speaking.")
data, fs = sf.read(audio_file)
sd.play(data, fs, blocking=True)
def basic_response_processing(query, TTS, AI):
resp = AI.query_ai (query)
print ("Response: " + resp)
speak (resp)
if __name__ == '__main__':
prompt = "The following is a conversation with Teddy Ruxpin. He is funny, creative, clever, and sarcastic at times. Sometimes he will tell stories about his old friends. He likes meeting people and making new friends."
# See tts_factory.py file for available options.
TTS = tts_factory.use_elevenlabs()
AI = AI_Engine(prompt=prompt)
go = True
while go:
print ("I'm ready for your next query.\n")
query = input()
if query == "!exit":
go = False
continue
print ("Processing response.\n")
basic_response_processing(query, TTS, AI)
print ("done") | [
"The following is a conversation with Teddy Ruxpin. He is funny, creative, clever, and sarcastic at times. Sometimes he will tell stories about his old friends. He likes meeting people and making new friends."
] |
2024-01-10 | gochipon/DIS23-c | backend-2~Rapi.py | from flask import Flask, request, jsonify
import openai
from flask_cors import CORS
import json
import os
#my funcs
import libList
myapp = Flask(__name__)
CORS(myapp)
@myapp.route("/chatGPT",methods=["POST"])
def gpt():
APIKEY = ""
openai.api_key = APIKEY
date = request.json
NowPrompt = date["prompt"]
pluginName = date["pluginName"]
print(NowPrompt,pluginName)
# if pluginName not in dat.funcList or pluginName == "default":
if pluginName == "default":
print("default")
path = "env.json"
with open(path,"r",encoding="utf-8") as f:
env = json.load(f)
env = env["char"][0]
name,age,tail,c,type = env["name"],env["age"],env["tail"],env["c"],env["type"]
prompt = f"あなたの名前は{name}です。{type}という生き物で、{age}歳です。"
t = []
for i in tail:
i = "'" + i + "'"
t.append(i)
s = "語尾には、" + "や".join(t) + "がつきます。"
prompt += s
prompt += c
prompt = NowPrompt + prompt
else:
path = "C:\\Users\\xbzdc\\reazon-internship-backend-2\lib\developer\dog.json"
with open(path,"r",encoding="utf-8") as f:
env = json.load(f)
env = env["char"][0]
name,age,tail,c,type = env["name"],env["age"],env["tail"],env["c"],env["type"]
prompt = f"あなたの名前は{name}です。{type}という生き物で、{age}歳です。"
t = []
for i in tail:
i = "'" + i + "'"
t.append(i)
s = "語尾には、" + "や".join(t) + "がつきます。"
prompt += s
prompt += c
prompt = NowPrompt + prompt
# print("else")
# prompt,img = dat.getFunc(pluginName)
# print(NowPrompt, dat.prompt)
# prompt = NowPrompt + dat.prompt
# print(prompt)
# print(img)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": prompt
}
],
temperature=1,
max_tokens=256,
top_p=1,
#requency_penalty=0,
#presence_penalty=0
)
# response = [{"prompt":response["choices"][0]["message"]["content"]}]
return response["choices"][0]["message"]["content"]
@myapp.route("/upload",methods=["POST"])
def upload():
dat = request.files["file"]
dat.pluginName = dat.filename.replace(".py","")
print(dat.filename, dat.pluginName)
# os.mkdir("lib\\" + dat.filename)
path = os.path.join(*["lib", dat.pluginName, "main.py"])
print(path)
# dat.save("lib\\" + dat.filename + "\\" + "main.py")
dat.save(path)
return "Received: " + dat.filename
if __name__ == "__main__":
dat = libList.libHandler()
myapp.run(port=8080,debug=True) | [
"あなたの名前はPLACEHOLDERです。<class 'type'>という生き物で、PLACEHOLDER歳です。",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Akirato/LLM-KG-Reasoning | src~gpt_engine.py | import openai
import os
import time
class GPTAnswer:
def __init__(self, modelname="gpt-3.5-turbo"):
openai.organization = os.getenv("OPENAI_ORG_ID")
openai.api_key = os.getenv("OPENAI_API_KEY")
self.modelname = modelname
def generate_answer(self, premise_questions):
answers = []
if "gpt-3.5" in self.modelname:
for premise_question in premise_questions:
response = openai.ChatCompletion.create(
model=self.modelname,
messages=[
{"role": "user", "content": premise_question}
],
max_tokens=512,
n=1,
stop=None,
temperature=0,
)
answers.append(response["choices"][0].message.content)
else:
completion = openai.Completion.create(
engine=self.modelname,
prompts=premise_questions,
max_tokens=512,
n=1,
stop=None,
temperature=0,
)
for choice in completion.choices:
answers.append(choice.text)
return answers
def log_answer(self, qtype, premise_questions={}, output_path=""):
question_ids = list(premise_questions.keys())
premise_questions = list(premise_questions.values())
predicted_answers = self.generate_answer(premise_questions)
time.sleep(60)
for idx, prediction in enumerate(predicted_answers):
with open(os.path.join(f"{output_path}",f"{qtype}_{question_ids[idx]}_predicted_answer.txt"),"w") as prediction_file:
print(prediction, file=prediction_file) | [] |
2024-01-10 | iamaziz/chat_with_images | multimodal_app.py | import streamlit as st
from langchain.llms import Ollama
from multimodal_ollama import convert_to_base64, plt_img_base64
# wide mode
st.set_page_config(layout="centered")
st.title("Chat with Images locally hosted")
st.subheader("Multi-modal LLMs. Streamlit + Ollama + LangChain = 🚀🫶")
# choose model
model = st.selectbox("Choose a model", ["llava", "llava:13b", "bakllava", "bakllava:7b"])
st.session_state["model"] = model
# chatbot stuff
st.markdown("---")
# upload images
def upload_image():
images = st.file_uploader("Upload an image to chat about", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
# assert max number of images, e.g. 7
assert len(images) <= 7, (st.error("Please upload at most 7 images"), st.stop())
if images:
# convert images to base64
images_b64 = []
for image in images:
image_b64 = convert_to_base64(image)
images_b64.append(image_b64)
# display images in multiple columns
cols = st.columns(len(images_b64))
for i, col in enumerate(cols):
col.markdown(f"**Image {abs((i+1)-len(cols))+1}**")
col.markdown(plt_img_base64(images_b64[i]), unsafe_allow_html=True)
st.markdown("---")
return images_b64
st.stop()
# init session state of the uploaded image
image_b64 = upload_image()
# ask question
q = st.chat_input("Ask a question about the image(s)")
if q:
question = q
else:
# if isinstance(image_b64, list):
if len(image_b64) > 1:
question = f"Describe the {len(image_b64)} images:"
else:
question = "Describe the image:"
# run model
@st.cache_data(show_spinner=False)
def run_llm(question, image_b64, model):
llm_with_image_context = mllm.bind(images=image_b64)
res = llm_with_image_context.invoke(question)
return res
# create mmodel
mllm = Ollama(model=st.session_state["model"])
with st.chat_message("question"):#, avatar="🧑🚀"):
st.markdown(f"**{question}**", unsafe_allow_html=True)
with st.spinner("Thinking..."):
res = run_llm(question, image_b64, model=st.session_state["model"])
with st.chat_message("response"):#, avatar="🤖"):
st.write(res)
| [] |
2024-01-10 | iamaziz/chat_with_images | multimodal_ollama.py | import base64
from io import BytesIO
from PIL import Image
def convert_to_base64(image_file_path):
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
pil_image = Image.open(image_file_path)
buffered = BytesIO()
pil_image.save(buffered, format="png") # You can change the format if needed
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def plt_img_base64(img_base64):
"""
Disply base64 encoded string as image
:param img_base64: Base64 string
"""
# Create an HTML img tag with the base64 string as the source
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" style="max-width: 100%;"/>'
return image_html
# To display the image by rendering the HTML
# from IPython.display import HTML, display
# display(HTML(image_html))
if __name__ == "__main__":
# https://python.langchain.com/docs/integrations/llms/ollama#multi-modal
# example
file_path = "/Users/aziz/Desktop/style.png"
# pil_image = Image.open(file_path)
# image_b64 = convert_to_base64(pil_image)
# plt_img_base64(image_b64)
image_b64 = convert_to_base64(file_path)
plt_img_base64(image_b64)
# create mmodel
from langchain.llms import Ollama
bakllava = Ollama(model="bakllava")
# run model
llm_with_image_context = bakllava.bind(images=[image_b64])
res = llm_with_image_context.invoke("Describe the image:")
print(res) | [] |
2024-01-10 | Espenbfo/chat_project | backend~flask_server.py | import pika
import time
from pathlib import Path
from flask import Flask, flash, request, redirect, url_for
import uuid
import psycopg2
from openai_functions import respond
UPLOAD_DIRECTORY = Path("files")
UPLOAD_DIRECTORY.mkdir(exist_ok=True)
MAX_AUDIO_PARSE_TIME = 10
POLLING_INTERVAL = 0.3
app = Flask(__name__)
def get_connection():
conn =psycopg2.connect(user="postgres", password="secret",
host="localhost", port="5533",
database="voice")
conn.autocommit = True
return conn
@app.route('/greet', methods=['GET'])
def get_uuid():
print("PostgreSQL server information")
id = uuid.uuid4()
conn = get_connection()
cur = conn.cursor()
cur.execute("INSERT INTO client (client_id, ip) VALUES (%s, %s);", (str(id), request.remote_addr))
cur.execute(
f"SELECT * FROM client;")
print(cur.fetchall())
conn.commit()
cur.close()
conn.close()
return {"id": id}
@app.route('/speak', methods=['POST'])
def upload_file():
if request.method == 'POST':
id = request.form['id']
print("id:", id)
if not len(id):
return
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
file_id = uuid.uuid4()
filename = UPLOAD_DIRECTORY / f"{file_id}.{file.filename.split('.')[-1]}"
print(filename)
file.save(filename)
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue=str(file_id))
channel.basic_publish(exchange='', routing_key="test",
body=bytes(str(filename)+"|"+str(file_id), "utf-8"))
text = ""
for i in range(int(MAX_AUDIO_PARSE_TIME/POLLING_INTERVAL)+1):
time.sleep(0.3)
method_frame, header_frame, body = channel.basic_get(str(file_id))
if method_frame:
text = bytes.decode(body)
break
channel.queue_delete(queue=str(file_id))
connection.close()
if len(text):
print("New text", text)
conn = get_connection()
cur = conn.cursor()
cur.execute(
f"SELECT query, response FROM session WHERE client_id='{id}' ORDER BY created_time;")
log = cur.fetchall()
cur.close()
response = respond(text, log)
cur = conn.cursor()
cur.execute(
f"INSERT INTO session (client_id, query, response) "
f"VALUES (%s, %s, %s)", (str(id), text, response))
conn.commit()
cur.close()
conn.close()
conn = get_connection()
cur = conn.cursor()
cur.execute(
f"SELECT query, response FROM session WHERE client_id='{id}' ORDER BY created_time;")
print(cur.fetchall())
cur.close()
conn.close()
else:
return {"query": "", "response": ""}
return {"query": text, "response": response } | [] |
2024-01-10 | robbieraphaelday/Tripple-Barrel-Chatbot | faiss_storing.py | import os
import pickle
from langchain.text_splitter import CharacterTextSplitter
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
with open('openai_api.txt', 'r') as f:
openai_apikey = f.read().strip()
os.environ['OPENAI_API_KEY'] = openai_apikey
def get_text_content(path):
with open(path, 'r') as f:
content = f.read()
return content
def main():
# Get all text files in the 'text_docs' directory
text_files = [f for f in os.listdir('text_docs') if f.endswith('.txt')]
print(f"\nFound {len(text_files)} text files\n")
# Concatenate the contents of all text files
corpus = ""
for text_file in text_files:
corpus += get_text_content(os.path.join('text_docs', text_file))
print("\nFinished processing all text files\n")
# Splitting up the text into smaller chunks for indexing
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200, # striding over the text
length_function=len,
)
texts = text_splitter.split_text(corpus)
print(f"\n\nText split into {len(texts)} chunks\n")
# Download embeddings from OpenAI
print("\nDownloading embeddings from OpenAI\n")
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
print("\nCreated FAISS index\n")
# Save embeddings to a pickle file
with open('embeddings.pickle', 'wb') as f:
pickle.dump(docsearch, f)
print("\nEmbeddings saved to embeddings.pickle\n")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | dpasca/ChatAI | devwork~app_simple.py | import os
import json
import time
from pyexpat.errors import messages
from flask import Flask, redirect, render_template, request, jsonify, session, url_for
from openai import OpenAI
import datetime
# Load configuration from config.json
with open('config.json') as f:
config = json.load(f)
ASSISTANT_NAME = config["ASSISTANT_NAME"]
ASSISTANT_ROLE = "\n".join(config["ASSISTANT_ROLE"])
# Initialize OpenAI API
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
# Initialize Flask app
app = Flask(__name__)
app.secret_key = os.environ.get("CHATAI_FLASK_SECRET_KEY")
# Messages management
def get_messages():
return session['messages']
def on_messages_change():
session.modified = True
def append_message(message):
get_messages().append(message)
on_messages_change()
#===============================================================================
@app.route('/clear_chat', methods=['POST'])
def clear_chat():
session['messages'] = []
return redirect(url_for('index'))
#===============================================================================
@app.route('/')
def index():
# HACK: for now we always reset the messages
#session['messages'] = []
# if there are no messages in the session, add the role message
if 'messages' not in session:
logmsg("Adding role message")
session['messages'] = []
append_message({"role": "system", "content": ASSISTANT_ROLE})
else:
logmsg("Messages already in session")
# Get the last modified date of app.py
version_date = datetime.datetime.fromtimestamp(os.path.getmtime(__file__)).strftime('%Y-%m-%d %H:%M')
return render_template(
'chat.html',
assistant_name=ASSISTANT_NAME,
messages=get_messages(),
app_version=config["app_version"])
#===============================================================================
def countWordsInMessages():
count = 0
for message in get_messages():
count += len(message["content"].split())
return count
def logmsg(msg):
print(msg)
@app.route('/send_message', methods=['POST'])
def send_message():
# Count the number of words in all the messages
while countWordsInMessages() > 7900 and len(get_messages()) > 3:
# remove the second message
logmsg("Removing message")
get_messages().pop(1)
on_messages_change()
user_message = request.json['message']
# Append user message to messages list
append_message({
"role": "user",
"content": user_message
})
try:
response = client.chat.completions.create(
model=config["model_version"],
#model="gpt-3.5-turbo",
messages=get_messages(),
)
except Exception as e:
logmsg(f"OpenAI API Error: {e}")
return jsonify({'reply': 'Error in processing the request.'}), 500
# Extract AI reply
if response.choices and response.choices[0].message:
ai_reply = response.choices[0].message.content
# Append AI reply to messages list
append_message({
"role": "assistant",
"content": ai_reply
})
return jsonify({'reply': ai_reply}), 200
else:
return jsonify({'reply': 'No response from API.'}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
| [] |
2024-01-10 | dpasca/ChatAI | devwork~test_langchain.py | import os
import json
import time
from pyexpat.errors import messages
from openai import OpenAI
import datetime
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
assistant = OpenAIAssistantRunnable.create_assistant(
name="Test Assistant",
instructions="You are a very helpful assistant",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview"
)
output = assistant.invoke({"content": "Hello, my name is Davide"})
for message in output:
#print(message.content[0].text.value)
print(message.content)
output = assistant.invoke({"content": "What's my name again?"})
for message in output:
print(message.content) | [
"Hello, my name is Davide",
"What's my name again?"
] |
2024-01-10 | dpasca/ChatAI | devwork~test_assist.py | import os
import json
import time
from pyexpat.errors import messages
from flask import Flask, redirect, render_template, request, jsonify, session, url_for
from openai import OpenAI
import datetime
# Load configuration from config.json
with open('config.json') as f:
config = json.load(f)
ASSISTANT_NAME = config["ASSISTANT_NAME"]
ASSISTANT_ROLE = "\n".join(config["ASSISTANT_ROLE"])
# Initialize OpenAI API
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
def list_assistants():
assistant_object = client.beta.assistants.list()
return assistant_object
#def delete_assistant(assistant_id):
# """Delete an assistant by ID."""
# delete_url = f"{BASE_URL}/{assistant_id}"
# response = requests.delete(delete_url, headers=HEADERS)
# if response.status_code == 200:
# print(f"Deleted assistant with ID: {assistant_id}")
# else:
# print(f"Failed to delete assistant with ID: {assistant_id}. Status Code: {response.status_code}")
#
#def delete_all_assistants():
# """Delete all assistants."""
# a_list = list_assistants()
# assitant_obj_list = a_list.data
# for i in range(len(assitant_obj_list)):
# delete_assistant(assitant_obj_list[i].id)
def select_assistant(assistant_id):
# Use the 'beta.assistants' attribute, not 'Assistant'
assistant = client.beta.assistants.retrieve(assistant_id)
return assistant.id
def create_assistant(name, instructions, tools, model):
assistant = client.beta.assistants.create(
name=name,
instructions=instructions,
tools=tools,
model=model
)
return assistant.id # Return the assistant ID
def get_assistant_by_id(assistant_id):
assistant = client.beta.assistants.retrieve(assistant_id)
return assistant.id
def create_thread():
thread = client.beta.threads.create()
return thread
def select_assistant(assistant_id):
return get_assistant_by_id(assistant_id)
print("List of assistants:")
assistants = list_assistants()
for i in range(len(assistants.data)):
ass = assistants.data[i]
print(i)
print(ass.name)
print(ass.id)
print(ass.instructions) | [] |
2024-01-10 | whitemithrandir/Generative_AI | Python%20Codes~InformationRetrievalAssistant.py.py | import os
import openai
import sys
sys.path.append('../..')
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
openai.api_key = os.environ['OPENAI_API_KEY']
class Embedding:
def __init__(self, path):
self.path = path
# def process_data(self):
# loaders = [
# PyPDFLoader(f"{self.path}/docs/cs229_lectures/MachineLearning-Lecture01.pdf"),
# PyPDFLoader(f"{self.path}/docs/cs229_lectures/MachineLearning-Lecture01.pdf"),
# PyPDFLoader(f"{self.path}/docs/cs229_lectures/MachineLearning-Lecture02.pdf"),
# PyPDFLoader(f"{self.path}/docs/cs229_lectures/MachineLearning-Lecture03.pdf")
# ]
# docs_ = []
# for loader in loaders:
# docs_.extend(loader.load())
# text_splitter = RecursiveCharacterTextSplitter(
# chunk_size=1500,
# chunk_overlap=150
# )
# splits = text_splitter.split_documents(docs_)
def VectorStoreEmbedding(self):
persist_directory = f'{self.path}docs/chroma/'
embedding = OpenAIEmbeddings()
vectordb = Chroma(
persist_directory=persist_directory,
embedding_function=embedding
)
return vectordb, embedding
# question = "is there an email i can ask for help"
# result_docs = vectordb.similarity_search(question, k=3)
# return result_docs
class Retrieval:
def __init__(self, path):
self.path = path
# if os.path.isdir(os.path.abspath(persist_directory)) == True:
def metadata(self):
x = Embedding(path)
vectordb, embedding = x.VectorStoreEmbedding()
metadata_field_info = [
AttributeInfo(
name="source",
description=f"The lecture the chunk is from, should be one of `{self.path}/docs/cs229_lectures/MachineLearning-Lecture01.pdf`, `{self.path}/docs/cs229_lectures/MachineLearning-Lecture02.pdf`, or `{self.path}/docs/cs229_lectures/MachineLearning-Lecture03.pdf`",
type="string",
),
AttributeInfo(
name="page",
description="The page from the lecture",
type="integer",
),
]
document_content_description = "Lecture notes"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm,
vectordb,
document_content_description,
metadata_field_info,
verbose=True
)
return retriever
# Kullanım
if __name__ == "__main__":
path = "/home/sabankara/coding/Learning Platform Python/"
embedding_instance = Retrieval(path)
retriever = embedding_instance.metadata()
question = "what did they say about regression in the third lecture?"
print(retriever.get_relevant_documents(question))
c=5
| [] |
2024-01-10 | whitemithrandir/Generative_AI | Python%20Codes~LanguageModelAssistant.py.py | import os
import openai
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI # model
from langchain.prompts import ChatPromptTemplate # prompt
from langchain.chains import LLMChain # chain
from langchain.chains.router import MultiPromptChain
from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser
from langchain.prompts import PromptTemplate
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools.python.tool import PythonREPLTool
from langchain.python import PythonREPL
from langchain.agents import tool
from datetime import date
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
class Memory:
def __init__(self):
self.conversation = None
def function(self):
llm = ChatOpenAI(temperature=0.0)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
verbose=True,
memory = memory)
self.conversation = conversation
def conv_func(self,string):
x = self.conversation.predict(input = string)
return x
class llmchain:
def __init__(self):
self.MULTI_PROMPT_ROUTER_TEMPLATE = None
self.prompt_infos = None
self.prompt_infos_()
self.prompt_router()
self.function_llm()
def prompt_router(self):
MULTI_PROMPT_ROUTER_TEMPLATE = """Given a raw text input to a \
language model select the model prompt best suited for the input. \
You will be given the names of the available prompts and a \
description of what the prompt is best suited for. \
You may also revise the original input if you think that revising\
it will ultimately lead to a better response from the language model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
```json
{{{{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}}}}
```
REMEMBER: "destination" MUST be one of the candidate prompt \
names specified below OR it can be "DEFAULT" if the input is not\
well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input \
if you don't think any modifications are needed.
<< CANDIDATE PROMPTS >>
{destinations}
<< INPUT >>
{{input}}
<< OUTPUT (remember to include the ```json)>>"""
self.MULTI_PROMPT_ROUTER_TEMPLATE = MULTI_PROMPT_ROUTER_TEMPLATE
def prompt_infos_(self):
electronic_engineer_template = """You are a skilled electronic engineer\
specializing in various aspects of electronics and electrical engineering. \
You have a deep understanding of circuit design, microelectronics, and electrical systems. \
Your expertise allows you to provide clear and precise answers to questions related to electronic engineering. \
However, when faced with a question beyond your knowledge, you readily admit it. \
Here is a question:
{input}"""
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise\
and easy to understand manner. \
When you don't know the answer to a question you admit\
that you don't know.
Here is a question:
{input}"""
biology_template = """You are a knowledgeable biologist with expertise in various aspects of biology. \
You excel at providing clear and insightful answers to questions related to biology. \
However, if you ever come across a question you can't answer, you openly acknowledge it.
Here is a question:
{input}"""
math_template = """You are a very good mathematician. \
You are great at answering math questions. \
You are so good because you are able to break down \
hard problems into their component parts,
answer the component parts, and then put them together\
to answer the broader question.
Here is a question:
{input}"""
history_template = """You are a very good historian. \
You have an excellent knowledge of and understanding of people,\
events and contexts from a range of historical periods. \
You have the ability to think, reflect, debate, discuss and \
evaluate the past. You have a respect for historical evidence\
and the ability to make use of it to support your explanations \
and judgements.
Here is a question:
{input}"""
computerscience_template = """ You are a successful computer scientist.\
You have a passion for creativity, collaboration,\
forward-thinking, confidence, strong problem-solving capabilities,\
understanding of theories and algorithms, and excellent communication \
skills. You are great at answering coding questions. \
You are so good because you know how to solve a problem by \
describing the solution in imperative steps \
that a machine can easily interpret and you know how to \
choose a solution that has a good balance between \
time complexity and space complexity.
Here is a question:
{input}"""
prompt_infos = [
{
"name": "electronics",
"description": "Good for answering questions about electronic circuits and devices",
"prompt_template": electronic_engineer_template
},
{
"name": "physics",
"description": "Good for answering questions about physics",
"prompt_template": physics_template
},
{
"name": "math",
"description": "Good for answering math questions",
"prompt_template": math_template
},
{
"name": "History",
"description": "Good for answering history questions",
"prompt_template": history_template
},
{
"name": "computer science",
"description": "Good for answering computer science questions",
"prompt_template": computerscience_template
}
]
self.prompt_infos = prompt_infos
def function_llm(self):
llm = ChatOpenAI(temperature=0)
destination_chains = {}
for p_info in self.prompt_infos:
name = p_info["name"]
prompt_template = p_info["prompt_template"]
prompt = ChatPromptTemplate.from_template(template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
destination_chains[name] = chain
destinations = [f"{p['name']}: {p['description']}" for p in self.prompt_infos]
destinations_str = "\n".join(destinations)
default_prompt = ChatPromptTemplate.from_template("{input}")
default_chain = LLMChain(llm=llm, prompt=default_prompt)
self.destinations_str = destinations_str
self.default_chain = default_chain
self.llm = llm
self.destination_chains = destination_chains
def router_chain(self, string):
router_template = self.MULTI_PROMPT_ROUTER_TEMPLATE.format(
destinations=self.destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
router_chain = LLMRouterChain.from_llm(self.llm, router_prompt)
chain = MultiPromptChain(router_chain=router_chain,
destination_chains=self.destination_chains,
default_chain=self.default_chain, verbose=True
)
c = chain.run(string)
return c
class LLMtool:
def __init__(self):
self.test()
def test(self):
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math","wikipedia"], llm=llm)
agent= initialize_agent(
tools + [self.time],
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
try:
result = agent("whats the date today?")
except:
print("exception on external access")
@tool
def time(text: str) -> str:
"""Returns todays date, use this for any \
questions related to knowing todays date. \
The input should always be an empty string, \
and this function will always return todays \
date - any date mathmatics should occur \
outside this function."""
return str(date.today())
if __name__ == "__main__":
instance = LLMtool()
if __name__ == "__main__":
instance = llmchain()
str_list = ["What is black body radiation?", "What is the importance of Fourier transform in electronics engineering?"]
for string in str_list:
x = instance.router_chain(string)
print("\n")
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print(x)
if __name__ == "__main__":
instance = Memory()
instance.function()
str_list = ["Hi, my name is Şaban", "What is 1+1?", "What is my name?"]
for string in str_list:
x = instance.conv_func(string)
print(x)
c=5
| [
"next_inputs",
"{input}",
"You are a skilled electronic engineer specializing in various aspects of electronics and electrical engineering. You have a deep understanding of circuit design, microelectronics, and electrical systems. Your expertise allows you to provide clear and precise answers to questions related to electronic engineering. However, when faced with a question beyond your knowledge, you readily admit it. \n Here is a question:\n {input}",
"destination",
"Returns todays date, use this for any questions related to knowing todays date. The input should always be an empty string, and this function will always return todays date - any date mathmatics should occur outside this function.",
"You are a very good historian. You have an excellent knowledge of and understanding of people, events and contexts from a range of historical periods. You have the ability to think, reflect, debate, discuss and evaluate the past. You have a respect for historical evidence and the ability to make use of it to support your explanations and judgements.\n\n Here is a question:\n {input}",
"input",
"You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\n\n Here is a question:\n {input}",
"[{'name': 'electronics', 'description': 'Good for answering questions about electronic circuits and devices', 'prompt_template': 'You are a skilled electronic engineer specializing in various aspects of electronics and electrical engineering. You have a deep understanding of circuit design, microelectronics, and electrical systems. Your expertise allows you to provide clear and precise answers to questions related to electronic engineering. However, when faced with a question beyond your knowledge, you readily admit it. \\n Here is a question:\\n {input}'}, {'name': 'physics', 'description': 'Good for answering questions about physics', 'prompt_template': \"You are a very smart physics professor. You are great at answering questions about physics in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\\n\\n Here is a question:\\n {input}\"}, {'name': 'math', 'description': 'Good for answering math questions', 'prompt_template': 'You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, \\n answer the component parts, and then put them together to answer the broader question.\\n\\n Here is a question:\\n {input}'}, {'name': 'History', 'description': 'Good for answering history questions', 'prompt_template': 'You are a very good historian. You have an excellent knowledge of and understanding of people, events and contexts from a range of historical periods. You have the ability to think, reflect, debate, discuss and evaluate the past. You have a respect for historical evidence and the ability to make use of it to support your explanations and judgements.\\n\\n Here is a question:\\n {input}'}, {'name': 'computer science', 'description': 'Good for answering computer science questions', 'prompt_template': ' You are a successful computer scientist. You have a passion for creativity, collaboration, forward-thinking, confidence, strong problem-solving capabilities, understanding of theories and algorithms, and excellent communication skills. You are great at answering coding questions. You are so good because you know how to solve a problem by describing the solution in imperative steps that a machine can easily interpret and you know how to choose a solution that has a good balance between time complexity and space complexity. \\n\\n Here is a question:\\n {input}'}]",
"You are a knowledgeable biologist with expertise in various aspects of biology. You excel at providing clear and insightful answers to questions related to biology. However, if you ever come across a question you can't answer, you openly acknowledge it.\n\n Here is a question:\n {input}",
"Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n\n << FORMATTING >>\n Return a markdown code snippet with a JSON object formatted to look like:\n ```json\n {{{{\n \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n \"next_inputs\": string \\ a potentially modified version of the original input\n }}}}\n ```\n\n REMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\n REMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n\n << CANDIDATE PROMPTS >>\n {destinations}\n\n << INPUT >>\n {{input}}\n\n << OUTPUT (remember to include the ```json)>>",
"You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, \n answer the component parts, and then put them together to answer the broader question.\n\n Here is a question:\n {input}",
"prompt_template",
" You are a successful computer scientist. You have a passion for creativity, collaboration, forward-thinking, confidence, strong problem-solving capabilities, understanding of theories and algorithms, and excellent communication skills. You are great at answering coding questions. You are so good because you know how to solve a problem by describing the solution in imperative steps that a machine can easily interpret and you know how to choose a solution that has a good balance between time complexity and space complexity. \n\n Here is a question:\n {input}"
] |
2024-01-10 | agarwl/neural-symbolic-machines | nsm~graph_factory.py | """Implements several tensorflow graphs and capsulate them as Graph."""
from __future__ import division
import collections
import os
import time
import numpy as np
import tensorflow as tf
from nsm import data_utils
from nsm import score_utils
from nsm import tf_utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
RNN_CELL_DICT = dict(
rnn=tf.contrib.rnn.RNNCell,
lstm=tf.contrib.rnn.BasicLSTMCell,
layernorm_lstm=tf.contrib.rnn.LayerNormBasicLSTMCell,
gru=tf.contrib.rnn.GRUCell)
OPTIMIZER_DICT = dict(
sgd=tf.train.GradientDescentOptimizer,
adam=tf.train.AdamOptimizer,
momentum=tf.train.MomentumOptimizer,
adagrad=tf.train.AdagradOptimizer,
rmsprop=tf.train.RMSPropOptimizer)
ACTIVATION_DICT = dict(relu=tf.nn.relu, sigmoid=tf.nn.sigmoid, tanh=tf.nn.tanh)
# Graph replace fn
graph_replace = tf.contrib.graph_editor.graph_replace
# Bind a variable length tensor with its sequence_length.
SeqTensor = collections.namedtuple('SeqTensor', ['tensor', 'sequence_length'])
def with_graph_variable_scope(func):
def func_wrapper(*args, **kwargs):
self = args[0]
with self._graph.as_default():
pid = os.getpid()
container_name = 'worker{}'.format(pid)
# print(container_name)
with self._graph.container(container_name):
with tf.variable_scope(self.vs):
return func(*args, **kwargs)
return func_wrapper
class Graph(object):
"""A TensorFlow graph with simpler interface to interact with it.
The neural network architecture (basically all the
tensorflow code) should live within this class. A new
architecture (for example, Seq2seq) should implement a new
subclass (Seq2seqGraph).
"""
def __init__(self, name):
self.node_dict = {'summaries': []}
self._graph = tf.Graph()
self.vs_name = name
self.meta_learn = False
self.use_gpu = False
with tf.variable_scope(name) as vs:
self.vs = vs
@property
def graph(self):
return self._graph
@with_graph_variable_scope
def launch(self,
init_model_path='',
trainable_only=True,
ckpt_from_another=False,
init_score_path=None):
"""Launch and initialize the graph."""
if self.use_gpu:
n_gpu = 1
else:
n_gpu = 0
session_config = tf.ConfigProto(
device_count={'GPU': n_gpu},
allow_soft_placement=True, # False,
log_device_placement=False,
)
if n_gpu:
session_config.gpu_options.allow_growth = True
tf.logging.info('number of gpu used {}'.format(n_gpu))
self.session = tf.Session(graph=self._graph, config=session_config)
self.saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
self.session.run(init)
def check_vars(name):
name_list = name.split('/')
for x in ['score_fn', 'Training']:
if x in name_list:
return False
return True
if trainable_only:
variables_to_restore = tf.trainable_variables()
variables_to_restore = [
v for v in variables_to_restore if check_vars(v.name)
]
saver = tf.train.Saver(variables_to_restore)
elif ckpt_from_another:
# Hack for loading a model trained on cloud machine.
variables_to_restore = tf.global_variables()
variables_to_restore = [
v for v in variables_to_restore
if (check_vars(v.name) and v != self.node_dict['global_step'])
]
saver = tf.train.Saver(variables_to_restore)
else:
saver = self.saver
if init_model_path:
saver.restore(self.session, init_model_path)
if init_score_path:
score_variables = [
v for v in tf.global_variables() if 'score_fn' in v.name.split('/')
]
score_saver = tf.train.Saver(score_variables)
score_saver.restore(self.session, init_score_path)
self._graph.finalize()
return self.session
def restore(self, model_path):
self.saver.restore(self.session, model_path)
def save(self, model_path, global_step):
return self.saver.save(self.session, model_path, global_step)
def run(self, fetch_list, feed_dict, writer=None):
"""Main interface to interact with the tensorflow graph.
Args:
fetch_list: a list of names (strings) indicating the name of result
operations.
feed_dict: a dictionary with the names of the nodes as keys and the
corresponding values that are fed as values.
writer: a tensorflow summary writer
Returns:
outputs: a dictionary with the names in the fetch_list as
keys, and the outputs from the executing graph as values.
"""
fetch_dict = dict([(name, self.node_dict[name])
for name in fetch_list
if name in self.node_dict])
if writer is not None:
fetch_dict['summaries'] = self.node_dict['summaries']
fetch_dict['global_step'] = self.node_dict['global_step']
outputs = self.session.run(fetch_dict, map_dict(self.node_dict, feed_dict))
if (writer is not None) and self._plot_summaries:
writer.add_summary(outputs['summaries'], outputs['global_step'])
writer.flush()
return outputs
@with_graph_variable_scope
def add_train(self,
aux_loss_list=None,
optimizer='adam',
learning_rate=0.01,
max_grad_norm=5.0,
decay_after_n_steps=1000,
decay_every_n_steps=1000,
lr_decay_factor=1.0,
debug=False,
l2_coeff=0.0,
adam_beta1=0.9,
meta_lr=1e-3,
momentum=0.9,
plot_summaries=True,
name='Training'):
"""Construct part of the graph that controls training (SGD optimization)."""
self.node_dict['max_batch_size'] = tf.placeholder(tf.int32, None)
self._plot_summaries = plot_summaries
with tf.variable_scope(name):
global_step = tf.Variable(0, trainable=False, dtype=tf.int32)
self.node_dict['global_step'] = global_step
if self.eval_graph:
return
all_summaries = []
batch_size = tf.cast(self.node_dict['max_batch_size'], dtype=tf.float32)
# No need to divide by batch size since the scores are already normalized
loss = self.node_dict['loss'] # / batch_size
if self.meta_learn:
loss_original = self.node_dict['loss_nometa']
all_summaries.append(
tf.summary.scalar(self.vs_name + '/' + 'loss_orig', loss_original))
all_summaries.append(tf.summary.scalar(self.vs_name + '/' + 'loss', loss))
total_loss = loss
if aux_loss_list is not None:
for loss_name, w in aux_loss_list:
if w: # Consider non-zero coefficients which can be negative too
aux_loss = self.node_dict[loss_name]
if loss_name == 'ent_reg':
aux_loss *= -1 # Since we want to maximize the entropy
aux_loss *= w / batch_size
total_loss += aux_loss
aux_loss_summary = tf.summary.scalar(self.vs_name + '/' + loss_name,
aux_loss)
all_summaries.append(aux_loss_summary)
if debug:
total_loss = tf.Print(
total_loss, [self.node_dict['sequence_loss']],
message='seq_loss:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['weights']],
message='weights:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['targets'].tensor],
message='targets:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['probs'].tensor],
message='probs:',
summarize=10000)
total_loss = tf.Print(
total_loss, [self.node_dict['logits'].tensor],
message='logits:',
summarize=10000)
if self.meta_learn:
total_loss = tf.Print(
total_loss, [self.node_dict['scores']],
message='scores:',
summarize=10000)
total_loss_summary = tf.summary.scalar(self.vs_name + '/' + 'total_loss',
total_loss)
all_summaries.append(total_loss_summary)
lr = tf.Variable(
float(learning_rate),
trainable=False,
name='learning_rate',
constraint=tf.keras.constraints.non_neg())
new_lr = tf.placeholder(dtype=tf.float32, shape=(), name='new_lr')
update_lr = lr.assign(new_lr)
meta_lr = tf.Variable(float(meta_lr), trainable=False)
update_meta_lr = meta_lr.assign(new_lr)
lr_summary = tf.summary.scalar(self.vs_name + '/' + 'learning_rate', lr)
all_summaries.append(lr_summary)
meta_hparams = []
all_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.vs_name)
score_fn_vars = [v for v in all_params if 'score_fn' in v.name.split('/')]
meta_vars = score_fn_vars + meta_hparams
params = [v for v in all_params if v not in meta_vars]
n_params = 0
tf.logging.info('trainable parameters:')
for tv in params:
n_tv_params = np.product(tv.get_shape().as_list())
n_params += n_tv_params
tf.logging.info('{}: {}'.format(tv.name, n_tv_params))
if 'weights' in tv.name or 'kernel' in tv.name:
total_loss += tf.reduce_sum(tf.nn.l2_loss(tv)) * l2_coeff
tf.logging.info(
'total number of trainable parameters {}'.format(n_params))
tf.logging.info('Calculate gradients wrt model params...')
scores = self.node_dict['scores']
log_scores = self.node_dict['log_scores']
score_node = log_scores if self._use_log_scores else scores
gradients = tf.gradients(total_loss, params, stop_gradients=[score_node])
clipped_gradients, grad_norm = tf.clip_by_global_norm(
gradients, max_grad_norm)
if optimizer == 'adam':
tf.logging.info('adam beta1: {}'.format(adam_beta1))
opt = OPTIMIZER_DICT[optimizer](lr, beta1=adam_beta1)
elif optimizer == 'momentum':
tf.logging.info('Using momentum optimizer')
opt = OPTIMIZER_DICT[optimizer](lr, momentum=momentum)
else:
opt = OPTIMIZER_DICT[optimizer](lr)
# Create the update op for theta (model parameters)
update = opt.apply_gradients(
zip(clipped_gradients, params), global_step=global_step)
if self.meta_learn:
t1 = time.time()
if optimizer == 'momentum':
accum = [opt.get_slot(p, 'momentum') for p in params]
grads = [
momentum * acc + g for (acc, g) in zip(accum, clipped_gradients)
]
else:
grads = clipped_gradients
# Create the meta training loss
updated_params = [p - lr * g for p, g in zip(params, grads)]
replaced_params = dict(zip([p.value() for p in params], updated_params))
self.create_val_meta_loss(replaced_params)
val_meta_loss = self.node_dict['val_meta_loss']
tf.logging.info('Creating meta optimizer...')
meta_opt = tf.train.AdamOptimizer(learning_rate=meta_lr)
# Calculate the partial gradients wrt scores only for the meta
# validation loss
# Used score_node because tensorflow can't handle indirect dependency
# structure for calculating gradients
# Example: y = x + 1; z = x + 2; tf.gradients(y, z) --> returns None
score_grads = tf.gradients(val_meta_loss, score_node)
clipped_score_grads, score_grad_norm = tf.clip_by_global_norm(
score_grads, max_grad_norm)
# Optimize only the score function variables using the meta optimizer
meta_gradients = tf.gradients([score_node],
score_fn_vars,
grad_ys=clipped_score_grads)
meta_clipped_gradients, meta_grad_norm = tf.clip_by_global_norm(
meta_gradients, max_grad_norm)
if meta_hparams:
meta_hparams_grad = tf.gradients(val_meta_loss, meta_hparams)
meta_clipped_gradients += meta_hparams_grad
update_score_fn = meta_opt.apply_gradients(
zip(meta_clipped_gradients, meta_vars))
self.node_dict.update(meta_train=update_score_fn)
t2 = time.time()
tf.logging.info('Time taken for meta learning setup {}'.format(t2 - t1))
grad_norm_summary = tf.summary.scalar(self.vs_name + '/' + 'grad_norm',
grad_norm)
all_summaries.append(grad_norm_summary)
# Summaries for meta learning related stuff
if self.meta_learn:
val_loss_summary = tf.summary.scalar(
'val_loss', self.node_dict['val_loss'], family='meta_train')
meta_grad_norm_summary = tf.summary.scalar(
'meta_grad_norm', meta_grad_norm, family='meta_train')
score_grad_norm_summary = tf.summary.scalar(
'score_grad_norm', score_grad_norm, family='meta_train')
scores_summary = tf.summary.histogram(
'scores', scores, family='meta_train')
all_summaries.extend([
val_loss_summary, meta_grad_norm_summary, score_grad_norm_summary,
scores_summary
])
# Code for logging the feature weights for the linear softmax case
if self.score_fn.score_model == 'linear':
weight_summaries = []
for v in score_fn_vars:
tensor_name = v.name.split('/')[-1]
if 'weights' in tensor_name:
weight_summaries += [
tf.summary.scalar(
'w{}'.format(i), v[i], family='linear_score_fn')
for i in range(self.score_fn.num_features)
]
elif 'alpha' in tensor_name:
weight_summaries.append(
tf.summary.scalar('alpha', v, family='linear_score_fn'))
elif 'bias' in tensor_name:
weight_summaries.append(
tf.summary.scalar('bias', v[0], family='linear_score_fn'))
all_summaries.extend(weight_summaries)
if debug:
_, clipped_grad_norm = tf.clip_by_global_norm(clipped_gradients,
max_grad_norm)
clipped_grad_norm_summary = tf.summary.scalar(
self.vs_name + '/' + 'clipped_grad_norm', clipped_grad_norm)
n_summary = tf.summary.scalar(self.vs_name + '/' + 'n',
self.node_dict['n'])
seq_loss_summary = tf.summary.histogram(self.vs_name + '/' + 'seq_loss',
self.node_dict['sequence_loss'])
weights_summary = tf.summary.histogram(self.vs_name + '/' + 'weights',
self.node_dict['weights'])
all_summaries += [
clipped_grad_norm_summary, n_summary, seq_loss_summary,
weights_summary
]
if self.meta_learn:
total_loss = tf.Print(
total_loss, [score_grads],
message='score_grads:',
summarize=10000)
batch_size_summary = tf.summary.scalar(self.vs_name + '/' + 'batch_size',
self.node_dict['batch_size'])
all_summaries.append(batch_size_summary)
if 'ent_reg' in self.node_dict:
ent_reg_summary = tf.summary.scalar(
self.vs_name + '/' + 'polic_entropy',
(self.node_dict['ent_reg'] / tf.cast(self.node_dict['n'],
tf.float32)))
ent_reg_ppl_summary = tf.summary.scalar(
self.vs_name + '/' + 'policy_entropy_ppl',
tf.exp(self.node_dict['ent_reg'] / tf.cast(self.node_dict['n'],
tf.float32)))
all_summaries.append(ent_reg_summary)
all_summaries.append(ent_reg_ppl_summary)
if self._plot_summaries:
for s in self.node_dict['summaries']:
all_summaries.append(s)
merged = tf.summary.merge(inputs=all_summaries)
else:
merged = tf.no_op(name='no_summary_op')
self.node_dict.update(
train=update,
global_step=global_step,
summaries=merged,
update_lr=update_lr,
update_meta_lr=update_meta_lr,
new_lr=new_lr)
@property
def final_state(self):
return 'final_state'
@property
def outputs(self):
return 'outputs'
@property
def initial_state(self):
return 'initial_state'
@property
def en_outputs(self):
return 'en_outputs'
@property
def n_examples(self):
return 'n_examples'
@property
def prediction_probs(self):
return 'probs'
@property
def samples(self):
return 'samples'
@property
def predictions(self):
return 'predictions'
@property
def en_initial_state(self):
return 'en_initial_state'
def add_outputs(self, output_type, output_config):
'Create part of the graph that compute final outputs from the RNN output.'
if output_type == 'softmax':
self.add_softmax_outputs(**output_config)
# self.add_val_softmax_outputs(**output_config)
elif output_type == 'regression':
self.add_regression_outputs(**output_config)
else:
raise NotImplementedError(
'Output type {} not supported!'.format(output_type))
@with_graph_variable_scope
def add_softmax_outputs(self,
output_vocab_size=None,
use_logits=None,
sampling_strategy='probs',
name='Softmax'):
"""Add softmax layer on top of RNN outputs."""
maxlen = self.node_dict['outputs'].tensor.shape.as_list()[1]
with tf.variable_scope(name):
seq_targets = create_seq_inputs(
shape=tf.TensorShape([None, maxlen]), dtype=tf.int32)
if self.meta_learn:
self.node_dict['val_targets'] = create_seq_inputs(
shape=tf.TensorShape([None, maxlen]),
dtype=tf.int32,
name='val_targets')
if use_logits:
# Feeding logits instead of outputs (thus no linear transformation needed).
logits, probs, predictions, samples, temperature = create_softmax_from_logits(
self.node_dict['outputs'].tensor)
else:
logits, probs, predictions, samples, temperature = create_softmax(
self.node_dict['outputs'].tensor,
output_vocab_size=output_vocab_size)
sequence_length = self.node_dict['outputs'].sequence_length
# From openai baselines to avoid numerical issue.
a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
clipped_entropy = p0 * (tf.log(z0) - a0)
seq_entropy = (
tf.reduce_sum(clipped_entropy, axis=-1) * tf.sequence_mask(
sequence_length, maxlen=maxlen, dtype=tf.float32))
policy_entropy = tf.reduce_sum(
tf.reduce_sum(clipped_entropy, axis=-1) * tf.sequence_mask(
sequence_length, maxlen=maxlen, dtype=tf.float32))
seq_logits, seq_probs, seq_predictions, seq_samples = [
SeqTensor(x, sequence_length)
for x in (logits, probs, predictions, samples)
]
# Compute the probs
sequence_probs, sequence_logprobs, step_logprobs = create_probs(
seq_logits.tensor, seq_targets.tensor, sequence_length)
sequence_neg_logprobs = -1 * sequence_logprobs
if not self.eval_graph:
# Compute sequence cross entropy loss.
with tf.name_scope('cross_entropy_loss'):
weights = tf.placeholder(name='weights', shape=[None], dtype=tf.float32)
baselines = tf.placeholder(
name='baselines', shape=[None], dtype=tf.float32)
# Code for using score_fn as true reward
batch_size = self.node_dict['batch_size']
with tf.variable_scope('score_fn', reuse=tf.AUTO_REUSE):
scores, log_scores = self.score_fn.get_scores(n=batch_size)
self.node_dict.update(scores=scores, log_scores=log_scores)
self._use_log_scores = (sampling_strategy !=
'probs') or self.score_fn.is_linear_softmax
if sampling_strategy != 'probs':
weights_to_use = weights
if sampling_strategy == 'reward':
# Sampling according to the reward distribution
unweighted_loss = tf_utils.dice(log_scores) * sequence_neg_logprobs
elif sampling_strategy == 'probs_and_reward':
# Sampling according to the distribution induced by product of
# rewards and probs
unweighted_loss = -tf_utils.dice(log_scores + sequence_logprobs)
elif sampling_strategy == 'st_estimator':
weights_to_use = tf_utils.st_estimator(
1.0, weights * tf_utils.dice(log_scores))
unweighted_loss = sequence_neg_logprobs
elif sampling_strategy == 'urex':
# The first half of the batch corresponds to sampling using the
# scores while the second half of the batch is sampled using the
# policy model
loss_score_sampling = tf_utils.dice(
log_scores) * sequence_neg_logprobs
loss_model_sampling = -tf_utils.dice(sequence_logprobs) * log_scores
batch_mask = tf.sequence_mask(
lengths=batch_size // 2,
maxlen=batch_size,
dtype=tf.float32,
name='batch_mask')
unweighted_loss = batch_mask * loss_score_sampling + \
(1.0 - batch_mask) * loss_model_sampling
else:
# dirac_delta = lambda x: tf.cond(
# tf.equal(x, 0.0), lambda: 1.0, lambda: 0.0)
# scores_sum = tf.reduce_sum(scores)
# scores_normalization = scores_sum + dirac_delta(scores_sum)
# scores_to_use = scores / tf.stop_gradient(scores_normalization)
if self.score_fn.is_linear_softmax:
scores_to_use = log_scores
else:
scores_to_use = scores
weights_to_use = weights * scores_to_use
unweighted_loss = -tf_utils.dice(sequence_logprobs)
sequence_loss = weights_to_use * unweighted_loss
# if sampling_strategy == 'probs':
# xent_loss = tf.reduce_mean(sequence_loss)
# else:
xent_loss = tf.reduce_mean(sequence_loss)
self.node_dict.update(
sequence_loss=sequence_loss,
loss=xent_loss,
weights=weights,
baselines=baselines)
if self.meta_learn:
# Create this loss to be used for creating val loss via
# `graph_replace`, also used for plotting on tensorboard
xent_loss_nometa = tf.reduce_mean(
weights * sequence_neg_logprobs, name='loss_nometa')
val_weights = tf.placeholder(
name='val_weights', shape=[None], dtype=tf.float32)
self.node_dict.update(
val_weights=val_weights, loss_nometa=xent_loss_nometa)
# Add new nodes to the node_dict.
self.node_dict.update(
targets=seq_targets,
temperature=temperature,
ent_reg=policy_entropy,
seq_entropy=seq_entropy,
probs=seq_probs,
sequence_probs=sequence_probs,
sequence_logprobs=sequence_logprobs,
step_logprobs=step_logprobs,
samples=seq_samples,
predictions=seq_predictions,
logits=seq_logits)
def create_val_meta_loss(self, replaced_params):
"""Run graph replace to create the meta learning loss."""
replacement_tuples = []
for key in [
'targets', 'inputs', 'en_inputs', 'en_input_features',
'output_features', 'n_constants', 'constant_spans',
'constant_value_embeddings', 'context', 'batch_size', 'weights'
]:
if key not in self.node_dict:
continue
val_key = 'val_{}'.format(key)
x, y = self.node_dict[key], self.node_dict[val_key]
if isinstance(x, tuple):
if isinstance(x.tensor, tuple):
replacement_tuples += zip(x.tensor, y.tensor)
replacement_tuples += [(x.sequence_length, y.sequence_length)]
else:
replacement_tuples += zip(x, y)
else:
replacement_tuples += [(x, y)]
replacement_ts = dict(replacement_tuples)
# Fix the dropout values to be zero, for deterministic validation loss
dropout_placeholders = ['rnn_dropout', 'en_rnn_dropout', 'en_input_dropout']
zero_tensor = tf.constant(0.0)
replacement_ts.update(
{self.node_dict[pc]: zero_tensor for pc in dropout_placeholders})
with tf.name_scope('validation'):
tf.logging.info('Running graph replace for creating val loss...')
val_loss = graph_replace(self.node_dict['loss_nometa'], replacement_ts)
tf.logging.info('Running graph replace for meta val loss...')
val_meta_loss = graph_replace(val_loss, replaced_params)
self.node_dict.update(val_loss=val_loss, val_meta_loss=val_meta_loss)
class SeqGraph(Graph):
'TensorFlow graph for RNN sequence model.'
def __init__(self, graph_config, name='seq_graph'):
super(SeqGraph, self).__init__(name)
self.add_seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq(self,
input_shape,
input_vocab_size=None,
hidden_size=128,
n_layers=2,
cell_type='lstm',
bidirectional=False,
dropout=0.0,
use_embeddings=True,
embedding_size=64,
name='Sequence'):
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=(), name='batch_size')
if use_embeddings:
embeddings = tf.get_variable(
'embeddings',
shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, _) = create_seq_graph(
input_shape,
batch_size=batch_size,
hidden_size=hidden_size,
n_layers=n_layers,
cell_type=cell_type,
bidirectional=bidirectional,
embeddings=embeddings)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
embeddings=embeddings,
batch_size=batch_size,
final_state=final_state,
outputs=seq_outputs,
n=n,
initial_state=initial_state)
class Seq2seqGraph(Graph):
"""TensorFlow graph for seq2seq model.
A basic seq2seq model with attention. The model supports
all the common specifications for a seq2seq model such as
number of layers, whether to use bidirectional encoder,
attention type, etc.
"""
def __init__(self, graph_config, name='seq2seq_graph'):
super(Seq2seqGraph, self).__init__(name)
self.add_seq2seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq2seq(self,
en_input_shape,
input_shape,
use_attn=True,
attn_size=128,
attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128,
en_n_layers=2,
hidden_size=128,
n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
use_embeddings=True,
en_embedding_size=64,
embedding_size=64,
name='Seq2seq'):
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=[], name='batch_size')
# Create encoder.
with tf.variable_scope('Encoder'):
if en_use_embeddings:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
en_embeddings = None
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, _) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size)
if use_attn:
attn_inputs = en_seq_outputs
else:
attn_inputs = None
if en_bidirectional:
en_final_state = en_final_state[0]
# Create decoder.
with tf.variable_scope('Decoder'):
if use_embeddings:
embeddings = tf.get_variable(
'embeddings',
shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, _) = create_seq_graph(
input_shape,
batch_size=batch_size,
hidden_size=hidden_size,
n_layers=n_layers,
cell_type=cell_type,
bidirectional=False,
embeddings=embeddings,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
attn_inputs=attn_inputs,
initial_state=en_final_state)
# Count number of steps.
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state,
n=n,
encoded_context=en_seq_outputs,
context=en_seq_inputs,
en_embeddings=en_embeddings,
embeddings=embeddings)
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
class MemorySeq2seqGraph(Graph):
def __init__(self, graph_config, name='memory_seq2seq_graph'):
super(MemorySeq2seqGraph, self).__init__(name)
self.use_gpu = graph_config['use_gpu']
self.meta_learn = graph_config['meta_learn']
self.eval_graph = not graph_config['train_config']
if self.use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = graph_config['gpu_id']
else:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
dict_to_pass = graph_config['core_config'].copy()
self.score_model = graph_config['score_fn_config'].pop('score_model', None)
dict_to_pass.update(graph_config['score_fn_config'])
self.add_memory_seq2seq(**dict_to_pass)
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
# For evaluation, train_config would be set to {}
self.add_train(**graph_config['train_config'])
self.config = graph_config
@with_graph_variable_scope
def add_memory_seq2seq(self,
max_n_valid_indices=None,
n_mem=None,
n_builtin=None,
use_attn=True,
attn_size=128,
attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128,
en_n_layers=2,
hidden_size=128,
n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
en_embedding_size=4,
value_embedding_size=128,
en_pretrained_vocab_size=None,
en_pretrained_embedding_size=-1,
tie_en_embeddings=True,
add_lm_loss=False,
n_en_input_features=1,
n_de_output_features=1,
en_attn_on_constants=False,
max_programs=None,
num_envs=None,
maxlen=25,
en_maxlen=75,
num_features=11,
score_norm_fn=None,
name='MemorySeq2seq',
**kwargs):
"""Create seq2seq with key variable memory.
Seq2seq with key variable memory is used for semantic
parsing (generating programs from natural language
instructions/questions).
A MemorySeq2seq Model uses a memory cell in decoder.
There are 3 types of tokens in a program:
1) constants that are provided at the before the program
is generated (added before decoding, different for
different examples); 2) variables that saves the results
from executing past expressions (added during decoding,
different for different examples); 3) language
primitives such as built-in functions and reserved
tokens (for example, "(", ")"). (the same for different
examples).
There are two kinds of constants: 1) constants from the
question, whose representation is from the span the
annotated constants; 2) constants from the context,
whose representation is from the constant value
embeddings, for example, table columns.
So the decoder vocab is organized as
[primitives, constants, variables].
For a constant, its embedding is computed as sum of two
parts: 1) embedding of the span (from encoder) on which
the constant is annotated with, for example the span
"barack obama" in "who is barack obama's wife" or the
span "one" in "what is one plus one"; 2) embedding of
the constant, for example, the embedding of the entity
Obama or the embedding of the number one.
For a variable, its embedding is the decoder RNN output
at the step where the variable is created.
For a primitive, its embedding is initialized randomly
and tuned by SGD.
Inspired by the code asistance (such as autocompletion)
in modern IDE, we also apply semantic and syntax
constraint on the decoder vocabulary so that at each
step, only some of the tokens are valid. So the decoder
has a dynamic vocabulary that is changing through
different steps.
"""
if not self.eval_graph:
# Code for score fn
args_to_pass = dict(
num_envs=num_envs,
num_features=num_features,
max_programs=max_programs,
score_temperature=kwargs['score_temperature'],
score_norm_fn=score_norm_fn)
self.score_fn = score_utils.ScoreFunction(
self.score_model, trainable=self.meta_learn, **args_to_pass)
self.node_dict.update(self.score_fn.score_dict)
input_shape = tf_utils.MemoryInputTuple(
tf.TensorShape([None, maxlen]), tf.TensorShape([None, maxlen]),
tf.TensorShape([None, maxlen, max_n_valid_indices]))
input_dtype = tf_utils.MemoryInputTuple(tf.int32, tf.int32, tf.int32)
en_input_shape = tf.TensorShape([None, en_maxlen])
constant_span_shape = tf.TensorShape([None, n_mem, 2])
constant_value_embedding_shape = tf.TensorShape(
[None, n_mem, value_embedding_size])
builtin_de_embeddings_shape = tf.TensorShape([n_builtin, hidden_size])
with tf.variable_scope('ConstantInput'):
# constant_span_embedding encodes the information
# from the span where the constant is referred to,
# for example the span "obama" in "who is the wife
# of obama".
# constant_value_embedding encodes the information
# from the value of the constant, for example, the
# embedding of the entity Obama.
# constant_span: (B, n_mem, 2)
constant_spans_placeholder = tf.placeholder(tf.int32, constant_span_shape)
constant_spans = constant_spans_placeholder
n_constants_placeholder = tf.placeholder(tf.int32, [None, 1])
n_constants = tf.squeeze(n_constants_placeholder, [-1])
# constant_spans: (B, n_mem, 1)
# 0.0 if the span is [-1, -1], else 1.0.
constant_span_masks = tf.cast(
tf.greater(tf.reduce_sum(constant_spans, axis=2), 0), tf.float32)
constant_span_masks = tf.expand_dims(constant_span_masks, -1)
# constant_spans: (B, n_mem, 2, 1)
constant_spans = tf.maximum(constant_spans, 0)
constant_spans = tf.expand_dims(constant_spans, axis=-1)
if constant_value_embedding_shape is not None:
constant_value_embeddings_placeholder = tf.placeholder(
tf.float32, shape=constant_value_embedding_shape)
constant_value_embeddings = constant_value_embeddings_placeholder
constant_value_embeddings = tf.layers.dense(
constant_value_embeddings, hidden_size, use_bias=True)
constant_value_masks = tf.squeeze(1 - constant_span_masks, [-1])
if n_en_input_features > 0:
en_input_features_shape = tf.TensorShape(
[None, en_maxlen, n_en_input_features])
else:
en_input_features_shape = None
with tf.variable_scope(name):
batch_size = tf.placeholder(dtype=tf.int32, shape=(), name='batch_size')
with tf.variable_scope('Encoder'):
if en_use_embeddings:
if en_pretrained_embedding_size < 0:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.1))
else:
en_embeddings = tf.get_variable(
'embeddings',
shape=(en_input_vocab_size - en_pretrained_vocab_size,
en_embedding_size),
initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.1))
en_pretrained_embeddings = tf.get_variable(
'pretrained_embeddings',
shape=(en_pretrained_vocab_size, en_pretrained_embedding_size),
trainable=False,
initializer=tf.zeros_initializer())
en_pretrained_embeddings_placeholder = tf.placeholder(
tf.float32,
[en_pretrained_vocab_size, en_pretrained_embedding_size])
en_pretrained_embeddings_init = en_pretrained_embeddings.assign(
en_pretrained_embeddings_placeholder)
en_pretrained_embeddings = tf.layers.dense(
inputs=en_pretrained_embeddings,
units=en_embedding_size,
use_bias=True)
en_embeddings = tf.concat(
values=[en_embeddings, en_pretrained_embeddings], axis=0)
else:
en_embeddings = None
if en_attn_on_constants:
tf.logging.info('Using attention in encoder!!!')
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape,
attn_inputs=constant_value_embeddings,
attn_masks=constant_value_masks,
attn_size=attn_size,
attn_vec_size=attn_vec_size)
else:
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape,
batch_size=batch_size,
hidden_size=en_hidden_size,
n_layers=en_n_layers,
cell_type=cell_type,
bidirectional=en_bidirectional,
embeddings=en_embeddings,
output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape)
if n_en_input_features > 0:
en_seq_input_features = SeqTensor(en_seq_inputs.tensor[1],
tf.placeholder(tf.int32, [None]))
en_seq_inputs = SeqTensor(en_seq_inputs.tensor[0],
en_seq_inputs.sequence_length)
if add_lm_loss:
sequence_length = tf.maximum(en_seq_inputs.sequence_length - 1, 0)
en_n = tf.cast(tf.reduce_sum(sequence_length), tf.float32)
mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
if en_bidirectional:
en_fw_outputs = en_rnn_outputs[0]
en_bw_outputs = en_rnn_outputs[1]
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(en_fw_outputs[:, :-1, :],
tf.transpose(en_embeddings))
en_bw_logits = tf_utils.tensormul(en_bw_outputs[:, 1:, :],
tf.transpose(en_embeddings))
else:
# Use 0 to n-2 to compute logits.
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_bw_logits = tf.layers.dense(
en_bw_outputs[:, 1:, :], en_input_vocab_size, use_bias=True)
# Use 1 to n-1 as targets.
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_bw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, :-1], logits=en_bw_logits) * mask
en_lm_loss = tf.reduce_sum(en_fw_lm_loss + en_bw_lm_loss) / en_n
else:
en_fw_outputs = en_rnn_outputs
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(en_fw_outputs[:, :-1, :],
tf.transpose(en_embeddings))
else:
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_lm_step_loss = en_fw_lm_loss
en_lm_loss = tf.reduce_sum(en_lm_step_loss) / en_n
if use_attn:
attn_inputs = en_seq_outputs.tensor
attn_masks = tf.sequence_mask(
en_seq_outputs.sequence_length, maxlen=en_maxlen, dtype=tf.float32)
else:
attn_inputs = None
attn_masks = None
with tf.variable_scope('ConstantEncoder'):
batch_ind = tf.range(batch_size)
# batch_ind: (B, 1, 1, 1)
for i in range(3):
batch_ind = tf.expand_dims(batch_ind, axis=-1)
# batch_ind: (B, n_mem, 2, 1)
batch_ind = tf.tile(batch_ind, [1, n_mem, 2, 1])
# constant_span: (B, n_mem, 2, 2)
constant_spans = tf.concat([batch_ind, constant_spans], axis=-1)
# constant_span_embedding: (B, n_mem, 2, en_output_size)
constant_span_embeddings = tf.gather_nd(en_seq_outputs.tensor,
constant_spans)
# constant_embedding: (B, n_mem, en_output_size)
constant_embeddings = tf.reduce_mean(constant_span_embeddings, axis=2)
constant_embeddings = constant_embeddings * constant_span_masks
if constant_value_embedding_shape is not None:
constant_embeddings = constant_embeddings + constant_value_embeddings
# mask out the bad constants.
# constant mask: (B, n_mem)
constant_masks = tf.sequence_mask(
n_constants, maxlen=n_mem, dtype=tf.float32)
# constant mask: (B, n_mem, 1)
constant_masks = tf.expand_dims(constant_masks, -1)
constant_masks = tf.tile(constant_masks, [1, 1, hidden_size])
# constant_embeddings: (B, n_mem, hidden_size)
constant_embeddings = constant_embeddings * constant_masks
# builtin_de_embeddings: (n_builtin, embed_size)
builtin_de_embeddings = tf.get_variable(
'builtin_de_embeddings',
builtin_de_embeddings_shape,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
# builtin_de_embeddings: (1, n_builtin, embed_size)
builtin_de_embeddings = tf.expand_dims(builtin_de_embeddings, axis=0)
# builtin_de_embeddings: (B, n_builtin, embed_size)
builtin_de_embeddings = tf.tile(builtin_de_embeddings,
[batch_size] + [1] * 2)
# initial_memory: (B, n_builtin + n_mem, embed_size)
initial_memory = tf.concat([builtin_de_embeddings, constant_embeddings],
axis=1)
# concatenate static and constant embeddings to form
# new memory to create initial states.
if en_bidirectional:
initial_state = en_final_state[0]
else:
initial_state = en_final_state
with tf.variable_scope('Decoder'):
initial_state = tf_utils.MemoryStateTuple(initial_memory, initial_state)
seq_inputs = create_seq_inputs(shape=input_shape, dtype=input_dtype)
inputs = seq_inputs.tensor
sequence_length = seq_inputs.sequence_length
rnn_dropout = tf.placeholder_with_default(
0.0, shape=None, name='rnn_dropout')
# Create multilayer attention cell then wrap with memory cell.
cell = multilayer_dropout_cell(
cell_fn=RNN_CELL_DICT[cell_type],
hidden_size=hidden_size,
n_layers=n_layers,
dropout=rnn_dropout)
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
mem_size = builtin_de_embeddings_shape[0] + constant_span_shape[1]
embed_size = hidden_size
use_attn_scores = (self.score_model == 'attn') and (not self.eval_graph)
cell = tf_utils.MemoryWrapper(
cell,
mem_size,
embed_size,
max_n_valid_indices,
use_score_wrapper=use_attn_scores,
activation=score_norm_fn)
flat_inputs = data_utils.flatten(inputs)
flat_inputs = [tf.expand_dims(in_, -1) for in_ in flat_inputs[:2]
] + flat_inputs[2:]
flat_inputs_unstacked = [tf.unstack(x, axis=1) for x in flat_inputs]
inputs = [
tf_utils.MemoryInputTuple(
read_ind=x[0], write_ind=x[1], valid_indices=x[2])
for x in zip(*flat_inputs_unstacked)
]
cell_outputs, final_state = tf.nn.static_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32)
if use_attn_scores:
outputs = [x[0] for x in cell_outputs]
scores_per_timestep = [x[1] for x in cell_outputs]
self.score_fn.create_attn_based_scores(
scores_per_timestep, sequence_length)
else:
outputs = cell_outputs
outputs = tf.stack(outputs, axis=1)
if n_de_output_features > 0:
de_seq_output_features = create_seq_inputs(
shape=tf.TensorShape(
[None, maxlen, max_n_valid_indices, n_de_output_features]),
dtype=tf.int32,
name='de_output_features')
output_feature_weights = tf.get_variable(
'de_output_feature_weights',
shape=tf.TensorShape([n_de_output_features, 1]),
initializer=tf.zeros_initializer())
outputs = outputs + tf.squeeze(
tf_utils.tensormul(
tf.cast(de_seq_output_features.tensor, tf.float32),
output_feature_weights),
axis=-1)
seq_outputs = SeqTensor(outputs, sequence_length)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
constant_spans=constant_spans_placeholder,
constant_embeddings=constant_embeddings,
constant_masks=constant_masks,
n_constants=n_constants_placeholder,
rnn_dropout=rnn_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state,
n=n,
encoded_context=en_seq_outputs,
context=en_seq_inputs,
en_embeddings=en_embeddings)
if en_pretrained_embedding_size > 0:
self.node_dict[
'en_pretrained_embeddings'] = en_pretrained_embeddings_placeholder
self.node_dict[
'en_pretrained_embeddings_init'] = en_pretrained_embeddings_init
if constant_value_embedding_shape is not None:
self.node_dict[
'constant_value_embeddings'] = constant_value_embeddings_placeholder
if add_lm_loss:
self.node_dict['en_lm_loss'] = en_lm_loss
# self.node_dict['en_lm_step_loss'] = en_lm_step_loss
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
if n_en_input_features > 0:
self.node_dict['en_input_features'] = en_seq_input_features
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'en_input_features_sum',
tf.reduce_sum(en_seq_input_features.tensor)))
if n_de_output_features > 0:
self.node_dict['output_features'] = de_seq_output_features
self.node_dict['output_feature_weights'] = output_feature_weights
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'output_feature_weights_0',
output_feature_weights[0][0]))
self.node_dict['summaries'].append(
tf.summary.scalar(self.vs_name + '/' + 'output_features_sum',
tf.reduce_sum(de_seq_output_features.tensor)))
if self.meta_learn:
val_en_seq_inputs = create_seq_inputs(
en_input_shape, en_seq_inputs.tensor.dtype, name='val_en_inputs')
val_seq_inputs = create_seq_inputs(
shape=input_shape, dtype=input_dtype, name='val_inputs')
self.node_dict.update(
val_inputs=val_seq_inputs,
val_en_inputs=val_en_seq_inputs,
val_context=val_en_seq_inputs)
if n_en_input_features:
self.node_dict['val_en_input_features'] = create_seq_inputs(
en_input_features_shape,
en_seq_input_features.tensor.dtype,
name='val_en_input_features')
if n_de_output_features:
self.node_dict['val_output_features'] = create_seq_inputs(
shape=de_seq_output_features.tensor.shape,
dtype=de_seq_output_features.tensor.dtype,
name='val_output_features')
with tf.name_scope('val_constants'):
for key in [
'batch_size', 'n_constants', 'constant_spans',
'constant_value_embeddings'
]:
val_key = 'val_{}'.format(key)
self.node_dict[val_key] = create_placeholder_copy(self.node_dict[key])
class MonitorGraph(object):
"""A tensorflow graph to monitor some values during training.
Generate tensorflow summaries for the values to monitor
them through tensorboard.
"""
def __init__(self):
self.node_dict = {}
self._graph = tf.Graph()
def launch(self):
with self._graph.as_default():
self.merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.session = tf.Session(graph=self._graph)
self.session.run(init)
def add_scalar_monitor(self, name, dtype):
with self._graph.as_default():
x = tf.placeholder_with_default(
input=tf.zeros(shape=(), dtype=dtype), shape=(), name=name)
# x = tf.placeholder(dtype=dtype, shape=(), name=name)
tf.summary.scalar(name, x)
self.node_dict[name] = x
def generate_summary(self, feed_dict):
summary_str = self.session.run(self.merged,
map_dict(self.node_dict, feed_dict))
return summary_str
# Utility functions for creating TensorFlow graphs.
# FNN
def create_multilayer_fnn(inputs, dropout, hidden_sizes, activation='relu'):
x = inputs
for size in hidden_sizes:
x = tf.nn.dropout(x, 1 - dropout)
x = tf.layers.dense(
inputs=x, units=size, activation=ACTIVATION_DICT[activation])
return x
# Loss
def create_seq_mse_loss(outputs, targets, weights, sequence_length):
mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
loss = tf.reduce_sum(tf.squared_difference(outputs, targets) * weights * mask)
return loss
def create_probs(logits, targets, sequence_length, use_sparse=False):
"""Create graph nodes for step and sequence probabilities."""
mask = tf.sequence_mask(
sequence_length, maxlen=tf.shape(targets)[1], dtype=tf.float32)
if use_sparse:
step_neg_logprobs = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
else:
# Second order derivative of sparse_cross_entropy is not defined, needed
# for the meta gradient
one_hot_targets = tf.one_hot(targets, depth=logits.shape.as_list()[-1])
step_neg_logprobs = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_targets, logits=logits)
step_logprobs = -1 * step_neg_logprobs * mask
sequence_logprobs = tf.reduce_sum(step_logprobs, axis=1)
sequence_probs = tf.exp(sequence_logprobs)
return sequence_probs, sequence_logprobs, step_logprobs
def create_softmax(inputs,
softmax_w=None,
output_vocab_size=None,
use_bias=False,
name='Softmax_layer'):
"""Create nodes for linear transformation of inputs/softmax computation."""
with tf.name_scope(name):
# inputs = tf.nn.dropout(inputs, 1-dropout)
if softmax_w is None:
logits = tf.layers.dense(
inputs=inputs, units=output_vocab_size, use_bias=use_bias)
else:
logits = tf_utils.tensormul(inputs, softmax_w)
if use_bias:
softmax_b = tf.Variable(
initial_value=np.zeros((1, output_vocab_size), dtype=tf.float32),
name='softmax_bias')
logits += softmax_b
return create_softmax_from_logits(logits)
def create_softmax_from_logits(logits):
"""Create nodes for softmax computation from logits."""
temperature = tf.placeholder_with_default(1.0, shape=(), name='temperature')
logits = logits / temperature
logits_shape = tf.shape(logits)
logits_dim = logits_shape[-1]
logits_2d = tf.reshape(logits, [-1, logits_dim])
samples = tf.multinomial(logits_2d, 1)
samples = tf.reshape(samples, logits_shape[:-1])
probs = tf.nn.softmax(logits)
predictions = tf.argmax(probs, axis=2)
return logits, probs, predictions, samples, temperature
# Embedding
def embed_inputs(inputs, embeddings, name='Embedding_layer'):
with tf.name_scope(name):
embedded_inputs = tf.nn.embedding_lookup(embeddings, inputs)
return embedded_inputs
# RNN
def create_rnn(cell,
initial_state,
inputs,
sequence_length,
hidden_size,
bidirectional,
cell_bw=None,
name='RNN'):
with tf.name_scope(name):
if bidirectional:
# Note that you can't use bidirectional RNN if you
# want to do decoding.
initial_state_fw = initial_state[0]
initial_state_bw = initial_state[1]
outputs, final_state_fw, final_state_bw = tf.nn.static_bidirectional_rnn(
cell,
cell_bw,
inputs,
sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=tf.float32)
final_state = (final_state_fw, final_state_bw)
else:
outputs, final_state = tf.nn.static_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32)
outputs = tf.stack(outputs, axis=1)
return outputs, final_state
# RNN Cell
def multilayer_dropout_cell(cell_fn,
hidden_size,
n_layers,
dropout,
use_skip_connection=True):
"""Create multilayer RNN cell with dropout."""
cells = []
for i in xrange(n_layers):
cell = cell_fn(hidden_size)
if i > 0 and use_skip_connection:
cell = tf.nn.rnn_cell.ResidualWrapper(cell)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=1.0 - dropout)
# variational_recurrent=True,
# state_keep_prob = 1.0 - dropout,
# dtype=tf.float32)
cells.append(cell)
final_cell = tf.contrib.rnn.MultiRNNCell(cells)
return final_cell
# Input placeholders.
def create_seq_inputs(shape, dtype=tf.float32, name='inputs'):
with tf.name_scope(name):
if isinstance(shape, tuple):
flat_input_shape = data_utils.flatten(shape)
assert isinstance(dtype, tuple)
flat_dtype = data_utils.flatten(dtype)
flat_inputs = [
tf.placeholder(dt, sh, name='inputs')
for dt, sh in zip(flat_dtype, flat_input_shape)
]
inputs = data_utils.pack_sequence_as(shape, flat_inputs)
else:
inputs = tf.placeholder(dtype, shape)
sequence_length = tf.placeholder(tf.int32, [None], name='sequence_length')
return SeqTensor(inputs, sequence_length)
def create_tuple_placeholders_with_default(inputs, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder_with_default(inputs, list(extra_dims) + [shape])
else:
subplaceholders = [
create_tuple_placeholders_with_default(subinputs, extra_dims, subshape)
for subinputs, subshape in zip(inputs, shape)
]
t = type(shape)
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
def create_placeholder_copy(p):
return tf.placeholder(dtype=p.dtype, shape=p.shape)
def create_tuple_placeholders(dtype, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder(dtype, list(extra_dims) + [shape])
else:
subplaceholders = [
create_tuple_placeholders(dtype, extra_dims, subshape)
for subshape in shape
]
t = type(shape)
# Handles both tuple and LSTMStateTuple.
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
# Sequence models.
def create_seq_graph(
input_shape,
batch_size=None,
# input_vocab_size=None,
attn_inputs=None,
attn_size=128,
attn_vec_size=128,
# output_size=128,
input_size=None,
hidden_size=128,
n_layers=2,
cell_type='lstm',
bidirectional=False,
initial_state=None,
embeddings=None,
output_proj_size=None,
input_features_shape=None,
attn_masks=None,
inputs_name='inputs'):
# Create inputs.
seq_inputs = create_seq_inputs(
shape=input_shape,
dtype=tf.int32 if embeddings is not None else tf.float32,
name=inputs_name)
rnn_dropout = tf.placeholder_with_default(0.0, shape=None, name='rnn_dropout')
# Create embedding layer.
if embeddings is not None:
embedded_inputs = embed_inputs(seq_inputs.tensor, embeddings=embeddings)
else:
embedded_inputs = seq_inputs.tensor
input_dropout = tf.placeholder_with_default(
0.0, shape=None, name='input_dropout')
embedded_inputs = tf.nn.dropout(embedded_inputs, 1 - input_dropout)
# If we include features in inputs, then add them here.
if input_features_shape is not None:
seq_input_features = create_seq_inputs(
shape=input_features_shape, dtype=tf.int32)
embedded_inputs = tf.concat(
[embedded_inputs,
tf.cast(seq_input_features.tensor, tf.float32)],
axis=-1)
seq_inputs = SeqTensor((seq_inputs.tensor, seq_input_features.tensor),
seq_inputs.sequence_length)
else:
seq_input_features = None
embedded_seq_inputs = SeqTensor(embedded_inputs, seq_inputs.sequence_length)
# Create RNN cell
cell = multilayer_dropout_cell(RNN_CELL_DICT[cell_type], hidden_size,
n_layers, rnn_dropout)
if bidirectional:
cell_bw = multilayer_dropout_cell(RNN_CELL_DICT[cell_type], hidden_size,
n_layers, rnn_dropout)
else:
cell_bw = None
# Add attention.
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
if bidirectional:
cell_bw = tf_utils.SeqAttentionCellWrapper(
cell_bw,
attn_inputs=attn_inputs,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
output_size=hidden_size,
attn_masks=attn_masks)
if initial_state is None:
# Create zero state.
zero_state = cell.zero_state(batch_size, tf.float32)
if bidirectional:
zero_state_bw = cell_bw.zero_state(batch_size, tf.float32)
zero_state = (zero_state, zero_state_bw)
initial_state = zero_state
inputs = tf.unstack(embedded_seq_inputs.tensor, axis=1)
# inputs = embedded_seq_inputs.tensor
# Create RNN.
outputs, final_state = create_rnn(
cell,
initial_state,
inputs,
embedded_seq_inputs.sequence_length,
hidden_size=hidden_size,
bidirectional=bidirectional,
cell_bw=cell_bw)
rnn_outputs = outputs
if bidirectional:
# Comment this if using static api
# outputs = tf.concat(outputs, axis=2)
hidden_size *= 2
# Whether to add linear transformation to outputs.
if output_proj_size is not None:
outputs = tf.layers.dense(
inputs=outputs, units=output_proj_size, use_bias=True)
seq_outputs = SeqTensor(
outputs,
tf.placeholder_with_default(seq_inputs.sequence_length, shape=[None]))
return (seq_inputs, initial_state, seq_outputs, final_state, input_dropout,
rnn_dropout, rnn_outputs)
# General utility functions.
def map_dict(dict_1, main_dict):
new_dict = {}
for k in main_dict.keys():
if k in dict_1:
new_dict[dict_1[k]] = main_dict[k]
return new_dict
| [] |
2024-01-10 | pcuenca/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | lindsayroney/intro_AI_project | Python~55056c2e-309e-4e02-bd42-18395c2e942d_0.py | from abc import ABC, abstractmethod
import redis
import openai
import numpy as np
from redis.commands.search.query import Query
from redis.commands.search.field import VectorField, TextField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
class EmbeddingModel(ABC):
@abstractmethod
def create_embedding(self, text: str):
pass
class OpenAIEmbeddingModel(EmbeddingModel):
def create_embedding(self, text: str):
embedding = openai.Embedding.create(input=text, model="text-embedding-ada-002")
vector = embedding["data"][0]["embedding"]
vector = np.array(vector).astype(np.float32).tobytes()
return vector
class EmbeddingStorage(ABC):
@abstractmethod
def store(self, key: str, vector):
pass
@abstractmethod
def retrieve(self, key: str):
pass
@abstractmethod
def search(self, vector, top_k=5):
pass
class RedisEmbeddingStorage(EmbeddingStorage):
def __init__(self, host='localhost', port=6379, db=0):
self.r = redis.Redis(host=host, port=port, db=db, encoding='utf-8', decode_responses=True)
self.SCHEMA = [
TextField("url"),
VectorField("embedding", "HNSW", {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"}),
]
try:
self.r.ft("posts").create_index(fields=self.SCHEMA, definition=IndexDefinition(prefix=["post:"], index_type=IndexType.HASH))
except Exception as e:
print("Index already exists")
def store(self, key: str, vector):
post_hash = {
"url": key,
"embedding": vector
}
self.r.hset(name=f"post:{key}", mapping=post_hash)
def retrieve(self, key: str):
return self.r.hget(name=f"post:{key}", key="embedding")
def search(self, vector, top_k=5):
base_query = f"*=>[KNN {top_k} @embedding $vector AS vector_score]"
query = Query(base_query).return_fields("url", "vector_score").sort_by("vector_score").dialect(2)
try:
results = self.r.ft("posts").search(query, query_params={"vector": vector})
except Exception as e:
print("Error calling Redis search: ", e)
return None
return results
| [] |
2024-01-10 | lindsayroney/intro_AI_project | Python~4a60bf97-0058-41d1-8a93-983230b02169_0.py | import os
import openai
from time import sleep
openai.api_key = os.getenv("OPENAI_API_KEY")
chat_models = ["gpt-4", "gpt-3.5-turbo"]
message_history = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Write a unique, surprising, extremely randomized story with highly unpredictable changes of events."}
]
completion_models = ["text-davinci-003", "text-davinci-001", "davinci-instruct-beta", "davinci"]
prompt = "[System: You are a helpful assistant]\n\nUser: Write a unique, surprising, extremely randomized story with highly unpredictable changes of events.\n\nAI:"
results = []
# Testing chat models
for model in chat_models:
sequences = set()
for _ in range(30):
completion = openai.ChatCompletion.create(
model=model,
messages=message_history,
max_tokens=256,
temperature=0
)
sequences.add(completion.choices[0].message['content'])
sleep(1)
print(f"\nModel {model} created {len(sequences)} unique sequences:")
for seq in sequences:
print(seq)
results.append((len(sequences), model))
# Testing completion models
for model in completion_models:
sequences = set()
for _ in range(30):
completion = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=256,
temperature=0
)
sequences.add(completion.choices[0].text)
sleep(1)
print(f"\nModel {model} created {len(sequences)} unique sequences:")
for seq in sequences:
print(seq)
results.append((len(sequences), model))
# Printing table of results
print("\nTable of Results:")
print("Num_Sequences\tModel_Name")
for num_sequences, model_name in results:
print(f"{num_sequences}\t{model_name}")
| [
"Write a unique, surprising, extremely randomized story with highly unpredictable changes of events.",
"You are a helpful assistant.",
"[System: You are a helpful assistant]\n\nUser: Write a unique, surprising, extremely randomized story with highly unpredictable changes of events.\n\nAI:"
] |
2024-01-10 | lindsayroney/intro_AI_project | Python~6a070d12-0a35-489a-a771-95c720765de8_1.py | import os
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import (AIMessage, HumanMessage, SystemMessage)
# 環境変数読み込み
load_dotenv()
#OpenAIの接続情報など
api_key = os.environ.get('OPEN_AI_KEY')
def main():
# 言語モデル(OpenAIのチャットモデル)のラッパークラスをインスタンス化
openai = ChatOpenAI(
model="gpt-3.5-turbo",
openai_api_key=api_key,
temperature=0.0
)
# モデルにPrompt(入力)を与えCompletion(出力)を取得する
# SystemMessage: OpenAIに事前に連携したい情報。キャラ設定や前提知識など。
# HumanMessage: OpenAIに聞きたい質問
response = openai([
SystemMessage(content="あなたは沖縄出身です。沖縄の方言で返答してください。"),
HumanMessage(content="調子はどうですか?")
])
print(response)
if __name__ == "__main__":
main()
| [
"あなたは沖縄出身です。沖縄の方言で返答してください。",
"調子はどうですか?"
] |
2024-01-10 | lindsayroney/intro_AI_project | Python~c89a2b32-e45a-4134-9088-e28ba068f816_0.py | import openai
import command_manager
import voice_feedback
bot = None # the ChatGPT bot object
openai.api_key = 'your-api-key' # replace 'your-api-key' with your actual OpenAI API key
def chat(text):
"""
handles user-chatgpt interactions
"""
if command_manager.hasText(text, command_manager.deactivateChatMode):
voice_feedback.speak('deactivating chatgpt mode', wait=True)
command_manager.chatMode = False
return
global bot
if not bot:
try:
bot = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
]
)
except Exception as e:
print(e)
print(f"You to ChatGPT: {text}")
response = bot['choices'][0]['message']['content']
voice_feedback.speak(response, wait=True)
| [
"You are a helpful assistant."
] |
2024-01-10 | lindsayroney/intro_AI_project | Python~5dba4c81-dd67-4247-becc-32e90d1bda5e_0.py | import os
import requests
import json
import openai
# Set your API keys
TYPEFORM_API_KEY = os.getenv("TYPEFORM_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set your form ID
FORM_ID = "Your_Form_ID" # replace with your form ID
# Set the Typeform API endpoint
TYPEFORM_API = f"https://api.typeform.com/forms/{FORM_ID}/responses"
# Set OpenAI API key
openai.api_key = OPENAI_API_KEY
# Headers for the Typeform API
headers = {
"Authorization": f"Bearer {TYPEFORM_API_KEY}",
}
def get_responses(since=None):
params = {}
if since:
params['since'] = since
response = requests.get(TYPEFORM_API, headers=headers, params=params)
return response.json()
def get_summarized_points(text):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Summarize the following text into 3 key points"},
{"role": "user", "content": text}
]
)
return completion.choices[0].message['content']
def get_classification_groups(responses):
text = "; ".join(responses)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Come up with 5 phrases that can be used to semantically group the following form responses"},
{"role": "user", "content": text}
]
)
return completion.choices[0].message['content']
def main():
summarized_responses = []
response_data = get_responses()
# Loop through pages of responses
while True:
for item in response_data['items']:
text_responses = [answer['text'] for answer in item['answers'] if answer['type'] in ['text', 'short_text', 'long_text']]
response_text = " ".join(text_responses)
summarized_response = get_summarized_points(response_text)
summarized_responses.append(summarized_response)
if response_data['page_count'] == 1:
break
else:
response_data = get_responses(response_data['items'][-1]['submitted_at'])
groups = get_classification_groups(summarized_responses)
print(groups)
if __name__ == "__main__":
main()
| [
"Summarize the following text into 3 key points",
"You are a helpful assistant.",
"Come up with 5 phrases that can be used to semantically group the following form responses"
] |
2024-01-10 | lindsayroney/intro_AI_project | Python~a942b81c-7859-44c3-98b4-d22e73818567_1.py | import os
import frontmatter
import sqlite3
import openai
import json
# define path and API key
path = "~/Documents/websites/swizec.com/src/pages/blog"
openai.api_key = "Your OpenAI Key"
# connect to SQLite database
conn = sqlite3.connect('embedding_vectors.db')
cursor = conn.cursor()
# create table if not exists
cursor.execute('''
CREATE TABLE IF NOT EXISTS vectors (
path TEXT PRIMARY KEY,
filename TEXT,
title TEXT,
vector JSON
)
''')
# walk through the directories
for root, dirs, files in os.walk(os.path.expanduser(path)):
for file in files:
if file == "index.mdx":
# get the full file path
full_path = os.path.join(root, file)
print(f'Processing {full_path}')
# read the file
with open(full_path, 'r') as f:
try:
post = frontmatter.load(f)
title = post.get('title', 'No Title')
except Exception as e:
print(f'Error parsing file {full_path}: {e}')
continue
# get the embedding
try:
response = openai.Embedding.create(
model="text-embedding-ada-002",
input=f.read()
)
embedding = response['data'][0]['embedding']
except Exception as e:
print(f'Error generating embedding for {full_path}: {e}')
continue
# save the embedding
cursor.execute('''
INSERT INTO vectors (path, filename, title, vector)
VALUES (?, ?, ?, ?)
''', (full_path, file, title, json.dumps(embedding)))
# commit changes and close connection
conn.commit()
conn.close()
print('Done.')
| [] |
2024-01-10 | lindsayroney/intro_AI_project | Python~d32b7a1c-1b5d-4d2f-895e-edc2e6576164_1.py | from openai_decorator import openaifunc, get_openai_funcs
@openaifunc
def add_numbers(a: int, b: int):
"""
This function adds two numbers.
"""
return a + b
print(get_openai_funcs())
| [] |
2024-01-10 | Kusnezow94/VIDEOAI-GPT-4 | services~gpt_service.py | ```python
import openai
openai.api_key = 'your-api-key'
def generate_prompt(channel_name, channel_description):
prompt = f"Generate video ideas for a YouTube channel named {channel_name} which is about {channel_description}."
return prompt
def generate_ideas(prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=100
)
ideas = response.choices[0].text.strip().split('\n')
return ideas
def generate_script(title):
prompt = f"Generate a script for a YouTube short video with the title {title}."
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=500
)
script = response.choices[0].text.strip()
return script
def generate_video_metadata(title):
prompt = f"Generate a description and hashtags for a YouTube short video with the title {title}."
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=100
)
metadata = response.choices[0].text.strip().split('\n')
description = metadata[0]
hashtags = metadata[1:]
return description, hashtags
``` | [
"Generate a description and hashtags for a YouTube short video with the title PLACEHOLDER.",
"Generate video ideas for a YouTube channel named PLACEHOLDER which is about PLACEHOLDER.",
"Generate a script for a YouTube short video with the title PLACEHOLDER."
] |
2024-01-10 | elphen-wang/FreeAI | old~gpt_academic_old~get_freeai_api.py | # -*- coding: utf-8 -*-
from os import path
import requests
from OpenAIAuth import Auth0
def run():
expires_in = 0
unique_name = 'my share token'
current_dir = path.dirname(path.abspath(__file__))
credentials_file = path.join(current_dir, 'credentials.txt')
share_tokens_file = path.join(current_dir, 'share_tokens.txt')
with open(credentials_file, 'r', encoding='utf-8') as f:
credentials = f.read().split('\n')
credentials = [credential.split(',', 1) for credential in credentials]
count = 0
token_keys = []
for credential in credentials:
progress = '{}/{}'.format(credentials.index(credential) + 1, len(credentials))
if not credential or len(credential) != 2:
continue
count += 1
username, password = credential[0].strip(), credential[1].strip()
token_info = {
'token': 'None',
'share_token': 'None',
}
token_keys.append(token_info)
try:
auth = Auth0(email=username, password=password)
token_info['token'] = auth.get_access_token()
#print('Login success: {}, {}'.format(username, progress))
except Exception as e:
err_str = str(e).replace('\n', '').replace('\r', '').strip()
#print('Login failed: {}, {}'.format(username, err_str))
token_info['token'] = err_str
continue
data = {
'unique_name': unique_name,
'access_token': token_info['token'],
'expires_in': expires_in,
}
resp = requests.post('https://ai.fakeopen.com/token/register', data=data)
if resp.status_code == 200:
token_info['share_token'] = resp.json()['token_key']
else:
continue
with open(share_tokens_file, 'w', encoding='utf-8') as f:
# 如果账号大于一个,优先使用pool;只有一个时,使用单独的api;没有,则有公共pool。
if count==0:
f.write('pk-this-is-a-real-free-pool-token-for-everyone\n')
f.write('pk-this-is-a-real-free-api-key-pk-for-everyone\n')
elif count==1:
f.write('{}\n'.format(token_keys[0]['share_token']))
else:
data = {
'share_tokens': '\n'.join([token_info['share_token'] for token_info in token_keys]),
}
resp = requests.post('https://ai.fakeopen.com/pool/update', data=data)
if resp.status_code == 200:
f.write('{}\n'.format(resp.json()['pool_token']))
for token_info in token_keys:
f.write('{}\n'.format(token_info['share_token']))
f.close()
if __name__ == '__main__':
run()
| [] |
2024-01-10 | ahmedbakr12/Sensy | GPT3-ChatBot.py |
import openai as ai
import turtle
from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification
def behavioral_analysis(question) :
tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
emotion = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
emotion_labels = emotion(question)
emotion= emotion_labels[0]['label']
return emotion
def chat(question,chat_log = None) -> str:
if(chat_log == None):
chat_log = start_chat_log
prompt = f"{chat_log}Human: {question}\nAI:"
response = completion.create(prompt = prompt, engine = "davinci", temperature = 0.85,top_p=1, frequency_penalty=0,
presence_penalty=0.7, best_of=2,max_tokens=100,stop = "\nHuman: ")
return response.choices[0].text
def modify_start_message(chat_log,question,answer) -> str:
if chat_log == None:
chat_log = start_chat_log
chat_log += f"Human: {question}\nAI: {answer}\n"
return chat_log
if __name__ == "__main__":
ai.api_key = "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"
completion = ai.Completion()
start_chat_log = ""
train = input("\nDo you want to train the openai chatbot (True/False): ")
if(train == "True"):
print("\n(To stop the training enter stop in the question)\n")
while(True):
question = input("Question: ")
if question == "stop":
break
answer = input("Answer: ")
start_chat_log = modify_start_message(start_chat_log,question,answer)
print("\n")
question = ""
print("\nEnter the questions to openai (to quit type \"stop\")")
while True:
question = input("Question: ")
print("\n emotion now: "+behavioral_analysis(question))
if question == "stop":
break
ch=chat(question,start_chat_log)
print("AI: "+ch)
f= open("chatlist.txt","a")
f.write("\n Human: "+question)
f.write("\n emotion: "+behavioral_analysis(question))
f.write("\n AI: "+ch)
f.close() | [
"PLACEHOLDERHuman: PLACEHOLDER\nAI:"
] |
2024-01-10 | AugustKarlstedt/chroma | chromadb~utils~embedding_functions.py | from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
class SentenceTransformerEmbeddingFunction(EmbeddingFunction):
def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ValueError(
"sentence_transformers is not installed. Please install it with `pip install sentence_transformers`"
)
self._model = SentenceTransformer(model_name)
def __call__(self, texts: Documents) -> Embeddings:
return self._model.encode(list(texts), convert_to_numpy=True).tolist()
class OpenAIEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "text-embedding-ada-002"):
try:
import openai
except ImportError:
raise ValueError(
"The openai python package is not installed. Please install it with `pip install openai`"
)
openai.api_key = api_key
self._client = openai.Embedding
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# replace newlines, which can negatively affect performance.
texts = [t.replace("\n", " ") for t in texts]
# Call the OpenAI Embedding API in parallel for each document
return [
result["embedding"]
for result in self._client.create(
input=texts,
engine=self._model_name,
)["data"]
]
class CohereEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "large"):
try:
import cohere
except ImportError:
raise ValueError(
"The cohere python package is not installed. Please install it with `pip install cohere`"
)
self._client = cohere.Client(api_key)
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# Call Cohere Embedding API for each document.
return [
embeddings for embeddings in self._client.embed(texts=texts, model=self._model_name)
]
| [] |
2024-01-10 | patrick-nanys/podcast-summarization-app | workflow.py | import subprocess
import csv
import json
import os
import io
import openai
import boto3
import pandas as pd
import numpy as np
from pytube import YouTube
from elevenlabslib import ElevenLabsUser
from experiments.asr.asr import whisper_asr
from experiments.tts.tts import elevenlabs_tts
from experiments.summarization.openai.summarize import summarize_text, num_tokens_from_text
def download_video(video_url, destination):
yt = YouTube(video_url, use_oauth=True, allow_oauth_cache=True)
print(f"Downloading video: {yt.title} from author {yt.author}")
video = yt.streams.filter(only_audio=True).first()
out_file = video.download(output_path=destination)
base, ext = os.path.splitext(out_file)
new_target_file_name = base + ".wav"
subprocess.run(
[
"ffmpeg",
"-i",
out_file,
"-ar",
"16000",
"-ac",
"1",
"-b:a",
"96K",
"-acodec",
"pcm_s16le",
new_target_file_name,
]
)
return new_target_file_name
def load_files_and_timestamps(target_dir, target_file_name, output_directory):
"""
Reading back the text files. This could be done later asynchronously/with multithreading
Also saves the transcription df to @param output_directory
"""
target_file_name_base, ext = os.path.splitext(target_file_name)
dfs = []
# This trick is needed to sort the filenames by index instead of alphabetically
correct_files = []
for file_name in os.listdir(target_dir):
if target_file_name_base in file_name and file_name.endswith("csv"):
correct_files.append(file_name)
base_file_name = correct_files[0][: correct_files[0].rfind("_")]
for file_idx in range(len(correct_files)):
file_name = base_file_name + f"_{file_idx}" + ".csv"
print(file_name)
dfs.append(
pd.read_csv(
os.path.join(target_dir, file_name),
delimiter=";",
names=["start", "end", "text"],
encoding="ISO-8859-1",
quoting=csv.QUOTE_NONE,
)
)
df = pd.concat(dfs).reset_index(drop=True)
df["text"] = df["text"].astype(str)
final_lines = " ".join(df["text"])
df["text_token_counts"] = df["text"].map(num_tokens_from_text)
df["token_sum"] = np.cumsum(df["text_token_counts"])
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
df.to_csv(os.path.join(output_directory, "transcription.csv"), index=False, sep=";")
token_sums = [0] + list(df["token_sum"])
timestamp_values = list(df["end"])
timestamp_values.insert(0, df["start"].iloc[0])
timestamps_dict = dict(zip(token_sums, timestamp_values))
return final_lines, timestamps_dict
def list_files_in_bucket(bucket_name, s3):
my_bucket = s3.Bucket(bucket_name)
for my_bucket_object in my_bucket.objects.all():
print(my_bucket_object.key)
def upload_directory_to_bucket(path, bucket_name, s3):
for file_name in os.listdir(path):
s3.meta.client.upload_file(os.path.join(path, file_name), bucket_name, f"podcasts/{path}/{file_name}")
def workflow(video_url, video_download_folder, output_directory, ELEVENLABS_API_KEY, AWS_ACCESS_KEY, AWS_SECRET_KEY):
# Download video
target_file_name = download_video(video_url, video_download_folder)
target_file_name = os.path.basename(target_file_name)
target_file_name_base, ext = os.path.splitext(target_file_name)
podcast_sound_path = os.path.join(video_download_folder, target_file_name)
# Speech to text
whisper_asr(podcast_sound_path)
# Loading transcriptions, saving it
text, timestamps_dict = load_files_and_timestamps(video_download_folder, target_file_name, output_directory)
# Text summary, saving the generated files to json
input_text, chunks, chunk_start_timestamps = summarize_text(text, timestamps_dict)
with open(os.path.join(output_directory, "summarized_text.json"), "w") as f:
json.dump(input_text, f, indent=2)
with open(os.path.join(output_directory, "chunks.json"), "w") as f:
json.dump(chunks, f, indent=2)
with open(os.path.join(output_directory, "chunk_start_timestamps.json"), "w") as f:
json.dump(chunk_start_timestamps, f, indent=2)
# Check if ElevenLabs can be used
summary_length = sum([len(chunk) for chunk in chunks])
print("Summary length:", summary_length)
user = ElevenLabsUser(ELEVENLABS_API_KEY)
remaining_characters = user.get_character_limit() - user.get_current_character_count()
print("Remaining ElevenLabs characters:", remaining_characters)
if summary_length > remaining_characters:
raise ValueError(
"Not enough characters for TTS. Provide an ElevenLabs API token with enough remaining characters."
)
# TTS with elevenlabs
elevenlabs_tts(
chunks, ELEVENLABS_API_KEY, "Adam", os.path.join(output_directory, "read_summary.mp3")
) # Male voice
# create config
config = {
'name': target_file_name_base
}
with open(os.path.join(output_directory, 'config.json'), 'w') as f:
json.dump(config, f)
# upload to s3 bucket
session = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY,
)
s3 = session.resource("s3")
bucket_name = "breviocast-prod"
upload_directory_to_bucket(output_directory, bucket_name, s3)
list_files_in_bucket(bucket_name, s3)
if __name__ == "__main__":
# Set parameters
video_url = ""
video_download_folder = "downloaded_videos_test"
output_directory = ""
ELEVENLABS_API_KEY = ""
AWS_ACCESS_KEY = ""
AWS_SECRET_KEY = ""
openai.api_key = ""
# Run the script
workflow(video_url, video_download_folder, output_directory, ELEVENLABS_API_KEY, AWS_ACCESS_KEY, AWS_SECRET_KEY)
| [] |
2024-01-10 | trampham1104/penguinpal | penguinpal_voicebot.py | import openai
import pyttsx3
import speech_recognition as sr
import time
#Set OpenAI API key
openai.api_key = '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'
# Initialize the tts enginie
engine = pyttsx3.init()
def transcribe_audio_to_text(filename):
recognizer = sr.Recognizer()
with sr.AudioFile(filename) as source:
audio = recognizer.record(source)
try:
return recognizer.recognize_google(audio)
except:
print('Skipping unknown error')
def generate_response(prompt):
response = openai.Completion.create(
engine = "text-davinci-003",
prompt = prompt,
max_tokens = 400,
n = 1,
stop = None,
tempertature = 0.5,
)
return response["choices"][0]["text"]
def speak_text(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
#Wait for user to say "Hey Penguin"
print("Say 'Hey Penguin' to start recording your request...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
aidop = recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio)
if transcription.lower() == "penguin":
# Record audio
filename = "input.wav"
print("Say your request...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
source.pause_threshold = 1
audio = recognizer.listen(source, phrase_time_limit=None, timeout=None)
with open(filename, "wb") as f:
f.write(audio.get_wav_data())
# Transcribe audio to text
text = transcribe_audio_to_text(filename)
if text:
print(f"You said: {text}")
# Generate response using GPT-3
response = generate_response(text)
print(f"Penguin Pal says: {response}")
# Read response using text-to-speech
speak_text(response)
except Exception as e:
print("An error occurred: {}".format(e))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | anshitag/memit_csk | memit_csk_dataset_script~data_creation_prompts.py | import argparse
import re
from typing import List, Dict, Any
from gptinference import utils
from gptinference.base_prompt import Prompt
from gptinference.openai_wrapper import OpenAIWrapper
from tqdm import tqdm
"""
A script that uses zero shot prompting to:
(i) Naturalize 20Q and PEP3K dataset.
For example: input c = whale eat plankton
Rephrase to make grammatical:
output1 = whales eat plankton
Negate the sentence:
output2 = whales do not eat plankton
(ii) Prepare neighborhood evaluation dataset to ensure relevant neighborhood is affected
and irrelevant facts are unaffected as a result of model editing.
Example of neighborhood
---------
"affected_paraphrase": [
"People can be bitten by wasps.",
"Wasps can puncture human skin with their sting.",
"Wasps can deliver a painful bite to people.",
"Wasps can cause a person to experience a bite.",
"Wasps can inflict a sting on humans."
]
"affected_reasoning": [
"Wasps are predators that hunt for food",
"Humans can provide food source for wasps."
]
"affected_neighborhood":
"subject_replaced": [
"Bees sting humans",
"Yellowjackets sting humans",
"Hornets sting humans",
"Fire ants sting humans",
"Mosquitoes sting humans."
],
"verb_replaced": [
"Wasps attacked humans",
"Wasps stung humans",
"Wasps harassed humans",
"Wasps provoked humans",
"Wasps irritated humans."
],
"object_replaced": [
"Wasps bite people",
"Wasps bite victims.",
"Wasps bite victims"
]
"Unaffected_neighborhood":
"subject_replaced": [
"Dogs bite humans",
"Monkeys bite humans",
"Fish bite humans",
"Birds bite humans",
"Cats bite humans"
],
"object_replaced": [
"Wasps bite fish",
"Wasps bite animals",
"Wasps bite insects",
"Wasps bite birds",
"Wasps bite reptiles"
]
...
Classes
----------
Naturalizing and negating facts (TFGrammatiker* and TFGrammatikerNeg*)
Affected (Neighborhood, Paraphrase, Reasoning)
Unaffected (Neighborhood)
"""
def parse_enumerated(s: str) -> str:
# Parse an item from an enumeration generated by GPT.
# 1. Apples are red ==> Apples are red
# 1: Apples are red ==> Apples are red
# First: Apples are red ==> Apples are red
return re.sub(r'\d\.', '', s.strip()).split(":")[-1].split(". ")[-1].strip()
# Given: 1.abc\n2.def ==> [abc, def]
def parse_contracted_list_of_spo(q1s):
q1s_new = []
if len(q1s) < 3:
for q1 in q1s:
for q in q1.split("\n" if "\n" in q1 else (";" if ";" in q1 else ",")):
q1s_new.append(q)
else:
for q1 in q1s:
q1s_new.append(q1)
return [x.replace("Q ", "") for x in q1s_new if len(x) > 5]
def compress_obj_text(x):
# reduce o in svo to 1 word
return x.strip().replace(" the ", " ").replace(" a ", " ").replace(" an ", " ")
class TFGrammatikerPos(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str) -> str:
return f"Given the text: {s}.\nFix the grammar and write the grammatical sentences.\n"
def __call__(self, s: str, subj: str = None, obj: str = None, verb: str = None) -> Dict[str, Any]:
generation_query = self.make_query(s=s)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=20, stop_token="###", temperature=0.0)
return {"TFGrammatikerPos": generated_sent,
"grammatical": generated_sent.strip()
}
class TFGrammatikerPosBatched(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, triples_csv: str) -> str:
question_prefix_template = f"""You are given some input sentences. Fix the grammar and write the grammatical sentences.
inputs: {triples_csv}
outputs:
"""
query = f"""{self.question_prefix}{question_prefix_template}"""
return query
def __call__(self, triples_csv: str) -> Dict[str, Any]:
generation_query = self.make_query(triples_csv=triples_csv)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=300, stop_token="###", temperature=0.0)
return {"TFGrammatikerPosBatched": generated_sent,
"grammaticalBatched": [compress_obj_text(x) for x in generated_sent.split(",")]
}
class TFGrammatikerNegBatched(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, triples_csv: str) -> str:
question_prefix_template = f"""You are given some input sentences. Fix the grammar and write the negated sentence by replacing the verb.
inputs: {triples_csv}
outputs:
"""
query = f"""{self.question_prefix}{question_prefix_template}"""
return query
def __call__(self, triples_csv: str) -> Dict[str, Any]:
generation_query = self.make_query(triples_csv=triples_csv)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=300, stop_token="###", temperature=0.0)
return {
"TFGrammatikerNegBatched": generated_sent,
"negatedBatched": [compress_obj_text(x) for x in generated_sent.split(",")]
}
class TFGrammatikerNeg(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str) -> str:
return f"Given the text: {s}.\nWrite the negated sentence by replacing the verb.\n"
def __call__(self, s: str, subj: str= None, obj: str= None, verb: str= None) -> Dict[str, Any]:
generation_query = self.make_query(s=s)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=20, stop_token="###", temperature=0.0)
return {
"UnAffectedNeighborhoodMaker_gptinput": generation_query,
"UnAffectedNeighborhoodMaker_gptoutput": generated_sent,
"negated": generated_sent.strip()
}
class AffectedParaphraseMaker(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str) -> str:
return f"Provide 5 paraphrases of: {s}\n"
def __call__(self, s: str, subj: str= None, obj: str= None, verb: str= None) -> Dict[str, Any]:
generation_query = self.make_query(s=s)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=100, stop_token="###", temperature=0.0)
return {
"AffectedParaphraseMaker_gptinput": generation_query,
"AffectedParaphraseMaker_gptoutput": generated_sent,
"affected_paraphrase": dedup([parse_enumerated(x) for x in generated_sent.split("\n") if len(x) > 10])
}
class AffectedNeighborhoodMaker(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str, subj: str, obj: str, verb: str) -> str:
# "[X] eat plankton": subject [X] can be a subtype of [.] e.g., Minke whales eat plankton
# "Whales [X] plankton": verb [X] can be an event that follows or precedes [.] e.g., Whales hunt plankton, Whales look for plankton
# "Whales eat [X]": object [X] can be subtypes or similar neighbors of [.], e.g., Whales eat Phytoplankton, Zooplankton
return f"Given the text: {s}\nsubject token: {subj}\nobject token: {obj}\n" \
f"Q1. In the text, replace just the subject token with a different word. The replaced text should be a valid sentence. The replaced token can be a hyponym or similar word of the original subject token. Write up to 5 such variants.\n" \
f"Q2. In the text, replace just the verb token with a different word. The replaced text should be a valid sentence. The replaced token can be a verb that follows or precedes the original verb token. Write up to 5 such variants.\n" \
f"Q3. In the text, replace just the object token with a different word. The replaced text should be a valid sentence. The replaced token can be a hyponym or similar word of the original object token. Write up to 5 such variants.\n"\
def __call__(self, s: str, subj: str, obj: str, verb: str) -> Dict[str, Any]:
generation_query = self.make_query(s=s, subj=subj, obj=obj, verb=verb)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=300, stop_token="###", temperature=0.0)
q1, q2 = generated_sent.strip().split("\nQ2.")
q2, q3 = q2.split("\nQ3.")
split_enumeration_on = "\n" if "\n" in q2.strip() else "," # GPT can return a list as a csv or as enumeration
q1s = dedup([parse_enumerated(x) for x in q1.strip().replace("Q1. ", "").split(split_enumeration_on) if len(x) > 10])
q2s = dedup([parse_enumerated(x) for x in q2.strip().replace("Q2. ", "").split(split_enumeration_on) if len(x) > 10])
q3s = dedup([parse_enumerated(x) for x in q3.strip().replace("Q3. ", "").split(split_enumeration_on) if len(x) > 10])
q1s = parse_contracted_list_of_spo(q1s)
q2s = parse_contracted_list_of_spo(q2s)
q3s = parse_contracted_list_of_spo(q3s)
return {
"AffectedNeighborhoodMaker_gptinput": generation_query,
"AffectedNeighborhoodMaker_gptoutput": generated_sent,
"subject_replaced": q1s,
"verb_replaced": q2s,
"object_replaced": q3s
}
def dedup(lst):
return list(set(lst))
class UnAffectedNeighborhoodMaker(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str, subj: str, obj: str) -> str:
# "[X] eat plankton": subject [X] is different from [.] e.g., Snails eat plankton
# "Whales eat [X]": object [X] is different from [.], e.g., Whales eat seaweed.
# return f"Given the text: {s}\nsubject token: {subj}\nobject token: {obj}\nQ1. Replace the subject token with an unrelated word to make a new text.\nQ2. Replace the object token with an unrelated word to make a new text.\n"
return f"""Given:
text: {s}
subject token: {subj}
object token: {obj}
Q1. Replace the subject token with a completely unrelated word and make a new text. Make 5 such replacements.
Q2. Replace the object token with a completely unrelated word and make a new text. Make 5 such replacements.
"""
def __call__(self, s: str, subj: str, obj: str, verb: str= None) -> Dict[str, Any]:
generation_query = self.make_query(s=s, subj=subj, obj=obj)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=300, stop_token="###", temperature=0.0)
split_on_1 = "Replacements for the object token:"
split_on_2 = "Replacing the object token:"
split_on_3 = "\n\n"
split_on = "\nQ2." if "\nQ2." in generated_sent else (split_on_1 if split_on_1 in generated_sent else (split_on_2 if split_on_2 in generated_sent else split_on_3))
q1, q2 = generated_sent.split(split_on)
q1s = dedup([parse_enumerated(x) for x in q1.strip().replace("Q1. ", "").replace("Replacements for the subject token", "").split("\n") if len(x) > 15 and " " in x])
q2s = dedup([parse_enumerated(x) for x in q2.strip().replace("Q2. ", "").replace("Replacements for the object token", "").split("\n") if len(x) > 15 and " " in x])
q1s = parse_contracted_list_of_spo(q1s)
q2s = parse_contracted_list_of_spo(q2s)
return {
"UnAffectedNeighborhoodMaker_gptinput": generation_query,
"UnAffectedNeighborhoodMaker_gptoutput": generated_sent,
"subject_replaced": q1s,
"object_replaced": q2s
}
class AffectedReasoningStepsMaker(Prompt):
def __init__(self, engine: str, openai_wrapper: OpenAIWrapper) -> None:
super().__init__()
self.openai_wrapper = openai_wrapper
self.engine = engine
def make_query(self, s: str) -> str:
return f"{s}. Explain this with a 2-step reasoning chain of very short, simple, connected sentences:\n"
def __call__(self, s: str, subj: str= None, obj: str= None, verb: str= None) -> Dict[str, Any]:
generation_query = self.make_query(s=s)
generated_sent = self.openai_wrapper.call(
prompt=generation_query, engine=self.engine, max_tokens=100, stop_token="###", temperature=0.0)
split_enumeration_on = "\n" if "\n" in generated_sent.strip() else ". " # GPT can return a list as a csv or as enumeration
return {
"AffectedReasoningStepsMaker_gptinput": generation_query,
"AffectedReasoningStepsMaker_gptoutput": generated_sent,
"affected_reasoning": [compress_obj_text(x).split(". ")[-1] for x in generated_sent.split(split_enumeration_on) if x.strip()]
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
supported_inference_types = ["TFGrammatikerPos", "TFGrammatikerNeg",
"AffectedReasoningStepsMaker", "UnAffectedNeighborhoodMaker",
"AffectedNeighborhoodMaker", "AffectedParaphraseMaker",
"TFGrammatikerPosBatched", "TFGrammatikerNegBatched"]
parser.add_argument("--inference_types", required=True, type=str, help=f"Use one or more (csv) of {','.join(supported_inference_types)}.")
parser.add_argument("--in_path", required=True, type=str, help="dataset csv json file.")
parser.add_argument("--out_path", required=True, type=str, help="output stored in this json file.")
parser.add_argument("--num_samples", default=-1, required=False, type=int, help="Number of samples from in_path to use (default=-1 i.e., all)")
parser.add_argument("--cache_path", default="memit_csk_dataset/data/cache/cache.jsonl", required=False, type=str, help="GPT3 responses will be cached.")
parser.add_argument("--gpt3_engine", default="text-davinci-003", required=False, type=str, help="GPT3 model to use.")
parser.add_argument("--csv_batch_size", default=1, required=False, type=int, help="For multiple inputs to work in one prompt. num csv entries (e.g., 30) to process at a time.")
args = parser.parse_args()
openai_wrapper = OpenAIWrapper(cache_path=args.cache_path)
inferences = []
if "TFGrammatikerPos" in args.inference_types:
inferences.append(TFGrammatikerPos(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "TFGrammatikerPosBatched" in args.inference_types:
inferences.append(TFGrammatikerPosBatched(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "TFGrammatikerNeg" in args.inference_types:
inferences.append(TFGrammatikerNeg(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "TFGrammatikerNegBatched" in args.inference_types:
inferences.append(TFGrammatikerNegBatched(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "AffectedReasoningStepsMaker" in args.inference_types:
inferences.append(AffectedReasoningStepsMaker(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "UnAffectedNeighborhoodMaker" in args.inference_types:
inferences.append(UnAffectedNeighborhoodMaker(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "AffectedNeighborhoodMaker" in args.inference_types:
inferences.append(AffectedNeighborhoodMaker(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
if "AffectedParaphraseMaker" in args.inference_types:
inferences.append(AffectedParaphraseMaker(engine=args.gpt3_engine, openai_wrapper=openai_wrapper))
print(f"\nReading the input from {args.in_path}")
outputs = []
arr = utils.read_jsonl_or_json(args.in_path)
inputs = utils.take(num=args.num_samples, arr=arr)
print(f"\nDoing inference...")
if args.csv_batch_size > 1:
N = args.csv_batch_size
num_batches = (len(inputs) // N) + (0 if len(inputs)% N ==0 else 1)
batches = [inputs[i * N: (i * N) + N] for i in range(num_batches)]
for input_entry in tqdm(batches):
for inference in inferences:
assert "Batched" in str(inference.__class__), f"Requested batched output but inference type is {inference.__class__}"
outputs.append(inference(", ".join(input_entry)))
else:
for input_entry in tqdm(inputs):
local_outputs = []
for inference in inferences:
entry = input_entry["prompt"]
if "prompt_polarity_grounded" in input_entry and "AffectedReasoningStepsMaker" in str(inference.__class__):
entry = input_entry.get("prompt_polarity_grounded")
try:
local_outputs.append(inference(s=entry, subj=input_entry["subject"], obj=input_entry["object"], verb=input_entry["verb"]))
except Exception as exc:
print(f"Exception ({exc}) in {entry}")
local_outputs.append({})
# The following is a special case when the ground truth `label=False`, then prompt must be negated
# in order to get meaningful reasoning steps. Run this as a standalone task as listed in memit/memit-data.sh
if args.inference_types == "TFGrammatikerNeg":
# Sample "input_entry" data point.
#
# {
# "id": 1004,
# "prompt": "People do not eat reefer",
# "gpt2-xl_predicted_wrong": true,
# "gpt2-l_predicted_wrong": true,
# "label": " True",
# "subject": "People",
# "verb": "eat",
# "object": "reefer",
# "neg": "not"
# }
#
input_entry["prompt_polarity_grounded"] = local_outputs[0]["negated"] if input_entry["label"].strip().lower() == "false" else input_entry["prompt"]
outputs.append(input_entry)
#
# This is the usual case.
else:
outputs.append(
{
"inputs": input_entry,
"outputs": local_outputs
}
)
print(f"\nWriting the output to {args.out_path}\n\n\n")
utils.write_jsonl(outpath=args.out_path, data_points=[f for f in outputs if f is not None])
| [
"You are given some input sentences. Fix the grammar and write the negated sentence by replacing the verb.\n \ninputs: PLACEHOLDER\n \noutputs:\n",
"You are given some input sentences. Fix the grammar and write the grammatical sentences.\n\ninputs: PLACEHOLDER\n\noutputs:\n"
] |
2024-01-10 | DFKI-NLP/DISTRE | tre~dataset_readers~semeval_2010_task_8_reader.py | from typing import Dict, List, Tuple
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, SpanField
from allennlp.data.tokenizers.token import Token
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers.word_splitter import OpenAISplitter
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class SemEval2010Task8Reader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r') as semeval_file:
logger.info("Reading SemEval 2010 Task 8 instances from jsonl dataset at: %s", file_path)
for line in semeval_file:
example = json.loads(line)
tokens = example["tokens"]
label = example["label"]
entity_indices = example["entities"]
start_e1, end_e1 = entity_indices[0]
start_e2, end_e2 = entity_indices[1]
entity_1 = (start_e1, end_e1 - 1)
entity_2 = (start_e2, end_e2 - 1)
yield self.text_to_instance(tokens, entity_1, entity_2, label)
@overrides
def text_to_instance(self, # type: ignore
tokens: List[str],
entity_1: Tuple[int],
entity_2: Tuple[int],
label: str = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
tokens = [OpenAISplitter._standardize(token) for token in tokens]
tokens = ['__start__'] + tokens[entity_1[0]:entity_1[1]+1] + ['__del1__'] + tokens[entity_2[0]:entity_2[1]+1] + ['__del2__'] + tokens + ['__clf__']
sentence = TextField([Token(text=t) for t in tokens], self._token_indexers)
fields['sentence'] = sentence
#fields['entity1'] = SpanField(*entity_1, sequence_field=sentence)
#fields['entity2'] = SpanField(*entity_2, sequence_field=sentence)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
| [] |
2024-01-10 | DFKI-NLP/DISTRE | tre~dataset_readers~open_nre_nyt_reader.py | from typing import Dict, List, Tuple
import json
import logging
from itertools import groupby
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, MetadataField
from allennlp.data.tokenizers.token import Token
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers.word_splitter import OpenAISplitter
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("open_nre_nyt_reader")
class OpenNreNYTReader(DatasetReader):
def __init__(self,
masking_mode: str=None,
token_indexers: Dict[str, TokenIndexer]=None,
lazy: bool=False) -> None:
super().__init__(lazy)
if masking_mode and masking_mode.lower() not in ['ner_least_specific', 'ner_most_specific']:
raise ValueError(f"Masking mode '{masking_mode}' not supported.")
self._masking_mode = masking_mode
self._token_splitter = OpenAISplitter()
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'rb') as f:
nyt_dataset = json.load(f)
for mention in nyt_dataset:
sentence = mention['sentence']
head = mention['head']['word']
tail = mention['tail']['word']
relation = mention['relation']
head_type = None
tail_type = None
if self._masking_mode == 'ner_least_specific':
head_types = mention['head']['corrected_type']
tail_types = mention['tail']['corrected_type']
if head_types:
head_type = list(sorted(head_types, key=lambda t: t.count('/')))[0]
else:
head_type = 'n/a'
if tail_types:
tail_type = list(sorted(tail_types, key=lambda t: t.count('/')))[0]
else:
head_type = 'n/a'
head_type = '__' + head_type + '__'
tail_type = '__' + tail_type + '__'
yield self.text_to_instance(sentence=sentence, head=head, tail=tail, label=relation,
head_type=head_type, tail_type=tail_type)
@overrides
def text_to_instance(self, # type: ignore
sentence: str,
head: str,
tail: str,
head_type: str=None,
tail_type: str=None,
label: str=None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
instance_id = f'{head}#{tail}'
if label:
instance_id = f'{instance_id}#{label}'
fields['metadata'] = MetadataField({'instance_id': instance_id.lower()})
tokens = self._token_splitter.split_words(sentence)
head = self._token_splitter.split_words(head)
tail = self._token_splitter.split_words(tail)
# TODO: this should not be done here
if self._masking_mode == 'ner_least_specific':
logger.info(f"Using masking mode 'ner_least_specific'.")
tokens = ([Token('__start__')]
+ head + [Token('__del1__')] + head_type + [Token('__ent1__')]
+ tail + [Token('__del2__')] + tail_type + [Token('__ent2__')]
+ tokens + [Token('__clf__')])
else:
tokens = [Token('__start__')] + head + [Token('__del1__')] + tail + [Token('__del2__')] + tokens + [Token('__clf__')]
fields['sentence'] = TextField(tokens, self._token_indexers)
if label:
fields['label'] = LabelField(label)
return Instance(fields)
| [] |
2024-01-10 | leondgarse/keras_cv_attention_models | keras_cv_attention_models~clip~torch_data.py | import os
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
def read_from_tsv(data_path):
import csv
delimiter = "\t" if data_path.endswith(".tsv") else ","
train_images, train_captions, test_images, test_captions, base_path, is_train = [], [], [], [], "", True
with open(data_path) as ff:
for ii in csv.reader(ff, delimiter=delimiter):
if ii[0] == "base_path": # special keys for info
base_path = os.path.expanduser(ii[1])
elif ii[0] == "TEST": # Use this as indicator for start of test set
is_train = False
elif is_train:
train_images.append(ii[0])
train_captions.append(ii[1])
else:
test_images.append(ii[0])
test_captions.append(ii[1])
if len(base_path) > 0:
train_images = [os.path.join(base_path, ii) for ii in train_images]
test_images = [os.path.join(base_path, ii) for ii in test_images]
return train_images, train_captions, test_images, test_captions
class CaptionDataset(Dataset):
def __init__(self, images, captions, tokenizer, is_train=True, image_size=224):
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, Resize, InterpolationMode, ToTensor
self.images, self.captions, self.tokenizer = images, captions, tokenizer
self.context_length = self.tokenizer.context_length
# self.mean, self.std = (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) # value from openai/CLIP
self.mean, self.std = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
interpolation = InterpolationMode.BICUBIC
image_size = image_size if isinstance(image_size, (list, tuple)) else (image_size, image_size)
self.transforms = Compose(
[
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=interpolation) if is_train else Resize(image_size, interpolation=interpolation),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize(mean=self.mean, std=self.std),
]
)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
texts = torch.from_numpy(self.tokenizer(str(self.captions[idx])))
return images, texts
def collate_wrapper(batch):
images, texts = list(zip(*batch))
return (torch.stack(images), torch.stack(texts)), torch.arange(len(batch))
def init_dataset(data_path, caption_tokenizer, batch_size=64, image_size=224, num_workers=8):
train_images, train_captions, test_images, test_captions = read_from_tsv(data_path)
train_dataset = CaptionDataset(train_images, train_captions, tokenizer=caption_tokenizer, is_train=True, image_size=image_size)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, collate_fn=collate_wrapper, pin_memory=True, sampler=None, drop_last=True
)
test_dataset = CaptionDataset(test_images, test_captions, tokenizer=caption_tokenizer, is_train=False, image_size=image_size)
test_dataloader = DataLoader(
test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=collate_wrapper, pin_memory=True, sampler=None, drop_last=True
)
return train_dataloader, test_dataloader
| [] |
2024-01-10 | leondgarse/keras_cv_attention_models | keras_cv_attention_models~common_layers.py | import numpy as np
from keras_cv_attention_models import backend
from keras_cv_attention_models.backend import layers, models, functional, initializers, image_data_format
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
TF_BATCH_NORM_EPSILON = 0.001
LAYER_NORM_EPSILON = 1e-5
""" Wrapper for default parameters """
@backend.register_keras_serializable(package="kecamCommon")
def hard_swish(inputs):
"""`out = xx * relu6(xx + 3) / 6`, arxiv: https://arxiv.org/abs/1905.02244"""
return inputs * functional.relu6(inputs + 3) / 6
@backend.register_keras_serializable(package="kecamCommon")
def hard_sigmoid_torch(inputs):
"""https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html
toch.nn.Hardsigmoid: 0 if x <= −3 else (1 if x >= 3 else x / 6 + 1/2)
keras.activations.hard_sigmoid: 0 if x <= −2.5 else (1 if x >= 2.5 else x / 5 + 1/2) -> tf.clip_by_value(inputs / 5 + 0.5, 0, 1)
"""
return functional.clip_by_value(inputs / 6 + 0.5, 0, 1)
@backend.register_keras_serializable(package="kecamCommon")
def mish(inputs):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function.
Paper: [Mish: A Self Regularized Non-Monotonic Neural Activation Function](https://arxiv.org/abs/1908.08681)
Copied from https://github.com/tensorflow/addons/blob/master/tensorflow_addons/activations/mish.py
"""
return inputs * functional.tanh(functional.softplus(inputs))
@backend.register_keras_serializable(package="kecamCommon")
def phish(inputs):
"""Phish is defined as f(x) = xTanH(GELU(x)) with no discontinuities in the f(x) derivative.
Paper: https://www.techrxiv.org/articles/preprint/Phish_A_Novel_Hyper-Optimizable_Activation_Function/17283824
"""
return inputs * functional.tanh(functional.gelu(inputs))
def gelu_quick(inputs):
"""https://github.com/huggingface/transformers/blob/main/src/transformers/activations.py#L90-L98
Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
"""
return inputs * functional.sigmoid(1.702 * inputs)
def gelu_linear(inputs):
"""
>>> from keras_cv_attention_models.common_layers import gelu_linear
>>> xx = np.arange(-4, 4, 0.01)
>>> plt.plot(xx, tf.nn.gelu(xx), label='gelu')
>>> plt.plot(xx, tf.nn.gelu(xx, approximate=True), label='gelu, approximate')
>>> plt.plot(xx, gelu_linear(xx), label='gelu_linear')
>>> plt.legend()
>>> plt.grid(True)
"""
inputs_abs = functional.abs(inputs)
inputs_sign = functional.sign(inputs)
erf = inputs_abs * -0.7071
erf = functional.relu(erf + 1.769)
erf = erf**2 * -0.1444 + 0.5
return inputs * (erf * inputs_sign + 0.5)
def activation_by_name(inputs, activation="relu", name=None):
"""Typical Activation layer added hard_swish and prelu."""
if activation is None:
return inputs
layer_name = name and activation and name + activation
activation_lower = activation.lower()
if activation_lower == "hard_swish":
return layers.Activation(activation=hard_swish, name=layer_name)(inputs)
if activation_lower == "leaky_relu":
return layers.LeakyReLU(name=layer_name)(inputs)
elif activation_lower == "mish":
return layers.Activation(activation=mish, name=layer_name)(inputs)
elif activation_lower == "phish":
return layers.Activation(activation=phish, name=layer_name)(inputs)
elif activation_lower == "prelu":
shared_axes = list(range(1, len(inputs.shape)))
shared_axes.pop(-1 if backend.image_data_format() == "channels_last" else 0)
# print(f"{shared_axes = }")
return layers.PReLU(shared_axes=shared_axes, alpha_initializer=initializers.Constant(0.25), name=layer_name)(inputs)
elif activation_lower.startswith("gelu/app"):
# gelu/approximate
return functional.gelu(inputs, approximate=True)
elif activation_lower.startswith("gelu/linear"):
return gelu_linear(inputs)
elif activation_lower.startswith("gelu/quick"):
return gelu_quick(inputs)
elif activation_lower.startswith("leaky_relu/"):
# leaky_relu with alpha parameter
alpha = float(activation_lower.split("/")[-1])
return layers.LeakyReLU(alpha=alpha, name=layer_name)(inputs)
elif activation_lower == ("hard_sigmoid_torch"):
return layers.Activation(activation=hard_sigmoid_torch, name=layer_name)(inputs)
elif activation_lower == ("squaredrelu") or activation_lower == ("squared_relu"):
return functional.pow(functional.relu(inputs), 2) # Squared ReLU: https://arxiv.org/abs/2109.08668
elif activation_lower == ("starrelu") or activation_lower == ("star_relu"):
from keras_cv_attention_models.nfnets.nfnets import ZeroInitGain
# StarReLU: s * relu(x) ** 2 + b
return ZeroInitGain(use_bias=True, weight_init_value=1.0, name=layer_name)(functional.pow(functional.relu(inputs), 2))
else:
return layers.Activation(activation=activation, name=layer_name)(inputs)
@backend.register_keras_serializable(package="kecamCommon")
class EvoNormalization(layers.Layer):
def __init__(self, nonlinearity=True, num_groups=-1, zero_gamma=False, momentum=0.99, epsilon=0.001, data_format="auto", **kwargs):
# [evonorm](https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py)
# EVONORM_B0: nonlinearity=True, num_groups=-1
# EVONORM_S0: nonlinearity=True, num_groups > 0
# EVONORM_B0 / EVONORM_S0 linearity: nonlinearity=False, num_groups=-1
# EVONORM_S0A linearity: nonlinearity=False, num_groups > 0
super().__init__(**kwargs)
self.data_format, self.nonlinearity, self.zero_gamma, self.num_groups = data_format, nonlinearity, zero_gamma, num_groups
self.momentum, self.epsilon = momentum, epsilon
self.is_channels_first = (
True if data_format == "channels_first" or (data_format == "auto" and backend.image_data_format() == "channels_first") else False
)
def build(self, input_shape):
all_axes = list(range(len(input_shape)))
param_shape = [1] * len(input_shape)
if self.is_channels_first:
param_shape[1] = input_shape[1]
self.reduction_axes = all_axes[:1] + all_axes[2:]
else:
param_shape[-1] = input_shape[-1]
self.reduction_axes = all_axes[:-1]
self.gamma = self.add_weight(name="gamma", shape=param_shape, initializer="zeros" if self.zero_gamma else "ones", trainable=True)
self.beta = self.add_weight(name="beta", shape=param_shape, initializer="zeros", trainable=True)
if self.num_groups <= 0: # EVONORM_B0
self.moving_variance = self.add_weight(
name="moving_variance",
shape=param_shape,
initializer="ones",
# synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
# aggregation=tf.VariableAggregation.MEAN,
)
if self.nonlinearity:
self.vv = self.add_weight(name="vv", shape=param_shape, initializer="ones", trainable=True)
if self.num_groups > 0: # EVONORM_S0
channels_dim = input_shape[1] if self.is_channels_first else input_shape[-1]
num_groups = int(self.num_groups)
while num_groups > 1:
if channels_dim % num_groups == 0:
break
num_groups -= 1
self.__num_groups__ = num_groups
self.groups_dim = channels_dim // self.__num_groups__
if self.is_channels_first:
self.group_shape = [-1, self.__num_groups__, self.groups_dim, *input_shape[2:]]
self.group_reduction_axes = list(range(2, len(self.group_shape))) # [2, 3, 4]
self.group_axes = 2
self.var_shape = [-1, *param_shape[1:]]
else:
self.group_shape = [-1, *input_shape[1:-1], self.__num_groups__, self.groups_dim]
self.group_reduction_axes = list(range(1, len(self.group_shape) - 2)) + [len(self.group_shape) - 1] # [1, 2, 4]
self.group_axes = -1
self.var_shape = [-1, *param_shape[1:]]
def __group_std__(self, inputs):
# _group_std, https://github.com/tensorflow/tpu/blob/main/models/official/resnet/resnet_model.py#L171
grouped = functional.reshape(inputs, self.group_shape)
_, var = functional.moments(grouped, self.group_reduction_axes, keepdims=True)
std = functional.sqrt(var + self.epsilon)
std = functional.repeat(std, self.groups_dim, axis=self.group_axes)
return functional.reshape(std, self.var_shape)
def __batch_std__(self, inputs, training=None):
# _batch_std, https://github.com/tensorflow/tpu/blob/main/models/official/resnet/resnet_model.py#L120
def _call_train_():
_, var = functional.moments(inputs, self.reduction_axes, keepdims=True)
# update_op = tf.assign_sub(moving_variance, (moving_variance - variance) * (1 - decay))
delta = (self.moving_variance - var) * (1 - self.momentum)
self.moving_variance.assign_sub(delta)
return var
def _call_test_():
return self.moving_variance
var = backend.in_train_phase(_call_train_, _call_test_, training=training)
return functional.sqrt(var + self.epsilon)
def __instance_std__(self, inputs):
# _instance_std, https://github.com/tensorflow/tpu/blob/main/models/official/resnet/resnet_model.py#L111
# axes = [1, 2] if data_format == 'channels_last' else [2, 3]
_, var = functional.moments(inputs, self.reduction_axes[1:], keepdims=True)
return functional.sqrt(var + self.epsilon)
def call(self, inputs, training=None, **kwargs):
if self.nonlinearity and self.num_groups > 0: # EVONORM_S0
den = self.__group_std__(inputs)
inputs = inputs * functional.sigmoid(self.vv * inputs) / den
elif self.num_groups > 0: # EVONORM_S0a
# EvoNorm2dS0a https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/layers/evo_norm.py#L239
den = self.__group_std__(inputs)
inputs = inputs / den
elif self.nonlinearity: # EVONORM_B0
left = self.__batch_std__(inputs, training)
right = self.vv * inputs + self.__instance_std__(inputs)
inputs = inputs / functional.maximum(left, right)
return inputs * self.gamma + self.beta
def get_config(self):
config = super().get_config()
config.update(
{
"nonlinearity": self.nonlinearity,
"zero_gamma": self.zero_gamma,
"num_groups": self.num_groups,
"momentum": self.momentum,
"epsilon": self.epsilon,
"data_format": self.data_format,
}
)
return config
def batchnorm_with_activation(
inputs, activation=None, zero_gamma=False, epsilon=1e-5, momentum=0.9, axis="auto", act_first=False, use_evo_norm=False, evo_norm_group_size=-1, name=None
):
"""Performs a batch normalization followed by an activation."""
if use_evo_norm:
nonlinearity = False if activation is None else True
num_groups = inputs.shape[-1] // evo_norm_group_size # Currently using gorup_size as parameter only
return EvoNormalization(nonlinearity, num_groups=num_groups, zero_gamma=zero_gamma, epsilon=epsilon, momentum=momentum, name=name + "evo_norm")(inputs)
bn_axis = (-1 if backend.image_data_format() == "channels_last" else 1) if axis == "auto" else axis
gamma_initializer = initializers.zeros() if zero_gamma else initializers.ones()
if act_first and activation:
inputs = activation_by_name(inputs, activation=activation, name=name)
nn = layers.BatchNormalization(
axis=bn_axis,
momentum=momentum,
epsilon=epsilon,
gamma_initializer=gamma_initializer,
name=name and name + "bn",
)(inputs)
if not act_first and activation:
nn = activation_by_name(nn, activation=activation, name=name)
return nn
def layer_norm(inputs, zero_gamma=False, epsilon=LAYER_NORM_EPSILON, center=True, axis="auto", name=None):
"""Typical LayerNormalization with epsilon=1e-5"""
norm_axis = (-1 if backend.image_data_format() == "channels_last" else 1) if axis == "auto" else axis
gamma_init = initializers.zeros() if zero_gamma else initializers.ones()
return layers.LayerNormalization(axis=norm_axis, epsilon=epsilon, gamma_initializer=gamma_init, center=center, name=name and name + "ln")(inputs)
def group_norm(inputs, groups=32, epsilon=BATCH_NORM_EPSILON, axis="auto", name=None):
"""Typical GroupNormalization with epsilon=1e-5"""
if hasattr(layers, "GroupNormalization"):
GroupNormalization = layers.GroupNormalization # GroupNormalization is added after TF 2.11.0
else:
from tensorflow_addons.layers import GroupNormalization
norm_axis = (-1 if backend.image_data_format() == "channels_last" else 1) if axis == "auto" else axis
return GroupNormalization(groups=groups, axis=norm_axis, epsilon=epsilon, name=name and name + "group_norm")(inputs)
def conv2d_no_bias(inputs, filters, kernel_size=1, strides=1, padding="valid", use_bias=False, groups=1, use_torch_padding=True, name=None, **kwargs):
"""Typical Conv2D with `use_bias` default as `False` and fixed padding,
and torch initializer `uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)`
"""
kernel_size = kernel_size if isinstance(kernel_size, (list, tuple)) else (kernel_size, kernel_size)
if isinstance(padding, str):
padding = padding.lower()
pad = (kernel_size[0] // 2, kernel_size[1] // 2) if use_torch_padding and padding == "same" else (0, 0)
else: # int or list or tuple with specific value
pad = padding if isinstance(padding, (list, tuple)) else (padding, padding)
padding = "same" if max(pad) > 0 else "valid"
if use_torch_padding and not backend.is_torch_backend and padding == "same":
inputs = layers.ZeroPadding2D(padding=pad, name=name and name + "pad")(inputs) if max(pad) > 0 else inputs
padding = "valid"
kernel_initializer = kwargs.get("kernel_initializer", None)
if kernel_initializer is None and not backend.is_torch_backend:
fan_in = 1 / (float(inputs.shape[-1] * kernel_size[0] * kernel_size[1]) ** 0.5)
kernel_initializer = initializers.RandomUniform(-fan_in, fan_in)
groups = max(1, groups)
return layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding="valid" if padding == "valid" else (pad if use_torch_padding else "same"),
use_bias=use_bias,
groups=groups,
kernel_initializer=kernel_initializer,
name=name and name + "conv",
**kwargs,
)(inputs)
def depthwise_conv2d_no_bias(inputs, kernel_size, strides=1, padding="valid", use_bias=False, use_torch_padding=True, name=None, **kwargs):
"""Typical DepthwiseConv2D with `use_bias` default as `False` and fixed padding
and torch initializer `uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)`
"""
kernel_size = kernel_size if isinstance(kernel_size, (list, tuple)) else (kernel_size, kernel_size)
if isinstance(padding, str):
padding = padding.lower()
pad = (kernel_size[0] // 2, kernel_size[1] // 2) if use_torch_padding and padding == "same" else (0, 0)
else: # int or list or tuple with specific value
pad = padding if isinstance(padding, (list, tuple)) else (padding, padding)
padding = "same" if max(pad) > 0 else "valid"
if use_torch_padding and not backend.is_torch_backend and padding == "same":
inputs = layers.ZeroPadding2D(padding=pad, name=name and name + "pad")(inputs) if max(pad) > 0 else inputs
padding = "valid"
depthwise_initializer = kwargs.get("depthwise_initializer", None)
if depthwise_initializer is None and not backend.is_torch_backend:
fan_in = 1 / (float(inputs.shape[-1] * kernel_size[0] * kernel_size[1]) ** 0.5)
depthwise_initializer = initializers.RandomUniform(-fan_in, fan_in)
return layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding="valid" if padding == "valid" else (pad if use_torch_padding else "same"),
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
name=name and name + "dw_conv",
**kwargs,
)(inputs)
def dense_no_bias(inputs, units, use_bias=False, name=None, **kwargs):
"""Typical Dense with `use_bias` default as `False`, and Torch Linear initializer `uniform(-1/sqrt(in_features), 1/sqrt(in_features))`"""
kernel_initializer = kwargs.get("kernel_initializer", None)
if kernel_initializer is None and not backend.is_torch_backend:
fan_in = 1 / (float(inputs.shape[-1]) ** 0.5)
kernel_initializer = initializers.RandomUniform(-fan_in, fan_in)
return layers.Dense(units, kernel_initializer=kernel_initializer, use_bias=use_bias, name=name, **kwargs)(inputs)
""" Blocks """
def output_block(inputs, filters=0, activation="relu", num_classes=1000, drop_rate=0, classifier_activation="softmax", is_torch_mode=True, act_first=False):
nn = inputs
if filters > 0: # efficientnet like
bn_eps = BATCH_NORM_EPSILON if is_torch_mode else TF_BATCH_NORM_EPSILON
nn = conv2d_no_bias(nn, filters, 1, strides=1, use_bias=act_first, use_torch_padding=is_torch_mode, name="features_") # Also use_bias for act_first
nn = batchnorm_with_activation(nn, activation=activation, act_first=act_first, epsilon=bn_eps, name="features_")
if num_classes > 0:
nn = layers.GlobalAveragePooling2D(name="avg_pool")(nn) if len(nn.shape) == 4 else nn
if drop_rate > 0:
nn = layers.Dropout(drop_rate, name="head_drop")(nn)
nn = layers.Dense(num_classes, dtype="float32", activation=classifier_activation, name="predictions")(nn)
return nn
def global_context_module(inputs, use_attn=True, ratio=0.25, divisor=1, activation="relu", use_bias=True, name=None):
"""Global Context Attention Block, arxiv: https://arxiv.org/pdf/1904.11492.pdf"""
is_channels_last = image_data_format() == "channels_last"
filters = inputs.shape[-1 if is_channels_last else 1]
height_axis, width_axis = (1, 2) if is_channels_last else (2, 3)
height, width = inputs.shape[height_axis], inputs.shape[width_axis]
# activation could be ("relu", "hard_sigmoid")
hidden_activation, output_activation = activation if isinstance(activation, (list, tuple)) else (activation, "sigmoid")
reduction = make_divisible(filters * ratio, divisor, limit_round_down=0.0)
if use_attn:
attn = layers.Conv2D(1, kernel_size=1, use_bias=use_bias, name=name and name + "attn_conv")(inputs)
attn = functional.reshape(attn, [-1, 1, height * width]) # [batch, height, width, 1] or [batch, 1, height, width] -> [batch, 1, height * width]
attn = functional.softmax(attn, axis=-1)
context = inputs if is_channels_last else functional.transpose(inputs, [0, 2, 3, 1])
context = functional.reshape(context, [-1, height * width, filters])
context = attn @ context # [batch, 1, filters]
context = functional.reshape(context, [-1, 1, 1, filters]) if is_channels_last else functional.reshape(context, [-1, filters, 1, 1])
else:
context = functional.reduce_mean(inputs, [height_axis, width_axis], keepdims=True)
mlp = layers.Conv2D(reduction, kernel_size=1, use_bias=use_bias, name=name and name + "mlp_1_conv")(context)
mlp = layers.LayerNormalization(epsilon=LAYER_NORM_EPSILON, name=name and name + "ln")(mlp)
mlp = activation_by_name(mlp, activation=hidden_activation, name=name)
mlp = layers.Conv2D(filters, kernel_size=1, use_bias=use_bias, name=name and name + "mlp_2_conv")(mlp)
mlp = activation_by_name(mlp, activation=output_activation, name=name)
return layers.Multiply(name=name and name + "out")([inputs, mlp])
def se_module(inputs, se_ratio=0.25, divisor=8, limit_round_down=0.9, activation="relu", use_bias=True, use_conv=True, name=None):
"""Squeeze-and-Excitation block, arxiv: https://arxiv.org/pdf/1709.01507.pdf"""
channel_axis = -1 if image_data_format() == "channels_last" else 1
h_axis, w_axis = [1, 2] if image_data_format() == "channels_last" else [2, 3]
# activation could be ("relu", "hard_sigmoid") for mobilenetv3
hidden_activation, output_activation = activation if isinstance(activation, (list, tuple)) else (activation, "sigmoid")
filters = inputs.shape[channel_axis]
reduction = make_divisible(filters * se_ratio, divisor, limit_round_down=limit_round_down)
# print(f"{filters = }, {se_ratio = }, {divisor = }, {reduction = }")
se = functional.reduce_mean(inputs, [h_axis, w_axis], keepdims=True if use_conv else False)
if use_conv:
se = layers.Conv2D(reduction, kernel_size=1, use_bias=use_bias, name=name and name + "1_conv")(se)
else:
se = layers.Dense(reduction, use_bias=use_bias, name=name and name + "1_dense")(se)
se = activation_by_name(se, activation=hidden_activation, name=name)
if use_conv:
se = layers.Conv2D(filters, kernel_size=1, use_bias=use_bias, name=name and name + "2_conv")(se)
else:
se = layers.Dense(filters, use_bias=use_bias, name=name and name + "2_dense")(se)
se = activation_by_name(se, activation=output_activation, name=name)
se = se if use_conv else functional.reshape(se, [-1, 1, 1, filters] if image_data_format() == "channels_last" else [-1, filters, 1, 1])
return layers.Multiply(name=name and name + "out")([inputs, se])
def eca_module(inputs, gamma=2.0, beta=1.0, name=None, **kwargs):
"""Efficient Channel Attention block, arxiv: https://arxiv.org/pdf/1910.03151.pdf"""
channel_axis = -1 if image_data_format() == "channels_last" else 1
h_axis, w_axis = [1, 2] if image_data_format() == "channels_last" else [2, 3]
filters = inputs.shape[channel_axis]
beta, gamma = float(beta), float(gamma)
tt = int((np.log(float(filters)) / np.log(2.0) + beta) / gamma)
kernel_size = max(tt if tt % 2 else tt + 1, 3)
pad = kernel_size // 2
nn = functional.reduce_mean(inputs, [h_axis, w_axis], keepdims=False)
nn = functional.pad(nn, [[0, 0], [pad, pad]])
nn = functional.expand_dims(nn, channel_axis)
nn = layers.Conv1D(1, kernel_size=kernel_size, strides=1, padding="valid", use_bias=False, name=name and name + "conv1d")(nn)
nn = functional.squeeze(nn, axis=channel_axis)
nn = activation_by_name(nn, activation="sigmoid", name=name)
nn = nn[:, None, None] if image_data_format() == "channels_last" else nn[:, :, None, None]
# print(f"{inputs.shape = }, {nn.shape = }")
return layers.Multiply(name=name and name + "out")([inputs, nn])
def drop_connect_rates_split(num_blocks, start=0.0, end=0.0):
"""split drop connect rate in range `(start, end)` according to `num_blocks`"""
# drop_connect_rates = functional.split(functional.linspace(start, end, sum(num_blocks)), num_blocks)
cum_split = [sum(num_blocks[: id + 1]) for id, _ in enumerate(num_blocks[:-1])]
drop_connect_rates = np.split(np.linspace(start, end, sum(num_blocks)), cum_split)
return [ii.tolist() for ii in drop_connect_rates]
def drop_block(inputs, drop_rate=0, name=None):
"""Stochastic Depth block by Dropout, arxiv: https://arxiv.org/abs/1603.09382"""
if drop_rate > 0:
noise_shape = [None] + [1] * (len(inputs.shape) - 1) # [None, 1, 1, 1]
return layers.Dropout(drop_rate, noise_shape=noise_shape, name=name and name + "drop")(inputs)
else:
return inputs
def addaptive_pooling_2d(inputs, output_size, reduce="mean", data_format="auto", name=None):
"""Auto set `pool_size` and `strides` for `MaxPool2D` or `AvgPool2D` fitting `output_size`.
(in_height - (pool_size - strides)) / strides == out_height
condition: pool_size >= strides, pool_size != 0, strides != 0
strides being as large as possible: strides == in_height // out_height
==> pool_size = in_height - (out_height - 1) * strides, not in_height % strides, in case in_height == strides will be 0
"""
data_format = image_data_format() if data_format == "auto" else data_format
height, width = inputs.shape[1:-1] if image_data_format() == "channels_last" else inputs.shape[2:]
h_bins, w_bins = output_size[:2] if isinstance(output_size, (list, tuple)) else (output_size, output_size)
reduce_function = layers.MaxPool2D if reduce.lower() == "max" else layers.AvgPool2D
h_strides, w_strides = height // h_bins, width // w_bins
h_pool_size, w_pool_size = height - (h_bins - 1) * h_strides, width - (w_bins - 1) * w_strides
# print(f"{inputs.shape = }, {h_pool_size = }, {w_pool_size = }, {h_strides = }, {w_strides = }")
return reduce_function(pool_size=(h_pool_size, w_pool_size), strides=(h_strides, w_strides), name=name and name + "pool")(inputs)
""" Other layers / functions """
@backend.register_keras_serializable(package="kecamCommon")
def __anti_alias_downsample_initializer__(weight_shape, dtype="float32"):
import numpy as np
kernel_size, channel = (weight_shape[0], weight_shape[2]) if backend.image_data_format() == "channels_last" else (weight_shape[2], weight_shape[0])
ww = np.array(np.poly1d((0.5, 0.5)) ** (kernel_size - 1)).astype("float32")
ww = np.expand_dims(ww, 0) * np.expand_dims(ww, 1)
if backend.image_data_format() == "channels_last":
ww = np.repeat(ww[:, :, None, None], channel, axis=-2)
else:
ww = np.repeat(ww[None, None, :, :], channel, axis=0)
return functional.convert_to_tensor(ww, dtype=dtype)
def anti_alias_downsample(inputs, kernel_size=3, strides=2, padding="same", trainable=False, name=None):
"""DepthwiseConv2D performing anti-aliasing downsample, arxiv: https://arxiv.org/pdf/1904.11486.pdf"""
return layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding="same",
use_bias=False,
trainable=trainable,
depthwise_initializer=__anti_alias_downsample_initializer__,
name=name and name + "anti_alias_down",
)(inputs)
def make_divisible(vv, divisor=4, min_value=None, limit_round_down=0.9):
"""Copied from https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(vv + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < limit_round_down * vv:
new_v += divisor
return new_v
@backend.register_keras_serializable(package="kecamCommon")
def __unfold_filters_initializer__(weight_shape, dtype="float32"):
kernel_size = weight_shape[0]
kernel_out = kernel_size * kernel_size
ww = np.reshape(np.eye(kernel_out, dtype="float32"), [kernel_size, kernel_size, 1, kernel_out])
if len(weight_shape) == 5: # Conv3D or Conv3DTranspose
ww = np.expand_dims(ww, 2)
return functional.convert_to_tensor(ww)
def fold_by_conv2d_transpose(patches, output_shape=None, kernel_size=3, strides=2, dilation_rate=1, padding="same", compressed="auto", name=None):
paded = kernel_size // 2 if padding else 0
if compressed == "auto":
compressed = True if len(patches.shape) == 4 else False
if compressed:
_, hh, ww, cc = patches.shape
channel = cc // kernel_size // kernel_size
conv_rr = functional.reshape(patches, [-1, hh * ww, kernel_size * kernel_size, channel])
else:
_, hh, ww, _, _, channel = patches.shape
# conv_rr = patches
conv_rr = functional.reshape(patches, [-1, hh * ww, kernel_size * kernel_size, channel])
conv_rr = functional.transpose(conv_rr, [0, 3, 1, 2]) # [batch, channnel, hh * ww, kernel * kernel]
conv_rr = functional.reshape(conv_rr, [-1, hh, ww, kernel_size * kernel_size])
convtrans_rr = layers.Conv2DTranspose(
filters=1,
kernel_size=kernel_size,
strides=strides,
dilation_rate=dilation_rate,
padding="valid",
output_padding=paded,
use_bias=False,
trainable=False,
kernel_initializer=__unfold_filters_initializer__,
name=name and name + "fold_convtrans",
)(conv_rr)
out = functional.reshape(convtrans_rr[..., 0], [-1, channel, convtrans_rr.shape[1], convtrans_rr.shape[2]])
out = functional.transpose(out, [0, 2, 3, 1])
if output_shape is None:
output_shape = [-paded, -paded]
else:
output_shape = [output_shape[0] + paded, output_shape[1] + paded]
out = out[:, paded : output_shape[0], paded : output_shape[1]]
return out
@backend.register_keras_serializable(package="kecamCommon")
class CompatibleExtractPatches(layers.Layer):
def __init__(self, sizes=3, strides=2, rates=1, padding="same", compressed=True, force_conv=False, **kwargs):
super().__init__(**kwargs)
self.sizes, self.strides, self.rates, self.padding = sizes, strides, rates, padding
self.compressed, self.force_conv = compressed, force_conv
self.kernel_size = sizes[1] if isinstance(sizes, (list, tuple)) else sizes
self.strides = strides[1] if isinstance(strides, (list, tuple)) else strides
# dilation_rate can be 2 different values, used in DiNAT
self.dilation_rate = (rates if len(rates) == 2 else rates[1:3]) if isinstance(rates, (list, tuple)) else (rates, rates)
self.filters = self.kernel_size * self.kernel_size
if backend.backend() == "tensorflow":
import tensorflow as tf
if len(tf.config.experimental.list_logical_devices("TPU")) != 0 or self.force_conv:
self.use_conv = True
else:
self.use_conv = False
else:
self.use_conv = force_conv
def build(self, input_shape):
_, self.height, self.width, self.channel = input_shape
if self.padding.lower() == "same":
pad_value = self.kernel_size // 2
self.pad_value_list = [[0, 0], [pad_value, pad_value], [pad_value, pad_value], [0, 0]]
self.height, self.width = self.height + pad_value * 2, self.width + pad_value * 2
self.pad_value = pad_value
else:
self.pad_value = 0
if self.use_conv:
self.conv = layers.Conv2D(
filters=self.filters,
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
padding="valid",
use_bias=False,
trainable=False,
kernel_initializer=__unfold_filters_initializer__,
name=self.name and self.name + "unfold_conv",
)
self.conv.build([None, *input_shape[1:-1], 1])
else:
self._sizes_ = [1, self.kernel_size, self.kernel_size, 1]
self._strides_ = [1, self.strides, self.strides, 1]
self._rates_ = [1, *self.dilation_rate, 1]
# output_size = backend.compute_conv_output_size([self.height, self.width], self.kernel_size, self.strides, self.padding, self.dilation_rate)
# self.output_height, self.output_width = output_size
super().build(input_shape)
def call(self, inputs):
if self.pad_value > 0:
inputs = functional.pad(inputs, self.pad_value_list)
if self.use_conv:
merge_channel = functional.transpose(inputs, [0, 3, 1, 2])
merge_channel = functional.reshape(merge_channel, [-1, self.height, self.width, 1])
conv_rr = self.conv(merge_channel)
# TFLite not supporting `tf.transpose` with len(perm) > 4...
out = functional.reshape(conv_rr, [-1, self.channel, conv_rr.shape[1] * conv_rr.shape[2], self.filters])
out = functional.transpose(out, [0, 2, 3, 1]) # [batch, hh * ww, kernel * kernel, channnel]
if self.compressed:
out = functional.reshape(out, [-1, conv_rr.shape[1], conv_rr.shape[2], self.filters * self.channel])
else:
out = functional.reshape(out, [-1, conv_rr.shape[1], conv_rr.shape[2], self.kernel_size, self.kernel_size, self.channel])
else:
out = functional.extract_patches(inputs, self._sizes_, self._strides_, self._rates_, "VALID") # must be upper word VALID/SAME
if not self.compressed:
# [batch, hh, ww, kernel, kernel, channnel]
out = functional.reshape(out, [-1, out.shape[1], out.shape[2], self.kernel_size, self.kernel_size, self.channel])
return out
def get_config(self):
base_config = super().get_config()
base_config.update(
{
"sizes": self.sizes,
"strides": self.strides,
"rates": self.rates,
"padding": self.padding,
"compressed": self.compressed,
"force_conv": self.force_conv,
}
)
return base_config
class PreprocessInput:
"""`rescale_mode` `torch` means `(image - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]`, `tf` means `(image - 0.5) / 0.5`"""
def __init__(self, input_shape=(224, 224, 3), rescale_mode="torch"):
self.set_input_shape(input_shape)
self.set_rescale_mode(rescale_mode)
def set_input_shape(self, input_shape):
input_shape = input_shape[1:] if len(input_shape) == 4 else input_shape
if None in input_shape:
self.input_shape = (None, None) # Dynamic input_shape
elif len(input_shape) == 2:
self.input_shape = input_shape
else:
channel_axis, channel_dim = min(enumerate(input_shape), key=lambda xx: xx[1]) # Assume the smallest value is the channel dimension
self.input_shape = [dim for axis, dim in enumerate(input_shape) if axis != channel_axis]
@staticmethod
def init_mean_std_by_rescale_mode(rescale_mode):
if isinstance(rescale_mode, (list, tuple)): # Specific mean and std
mean, std = rescale_mode
elif rescale_mode == "torch":
mean = np.array([0.485, 0.456, 0.406]).astype("float32") * 255.0
std = np.array([0.229, 0.224, 0.225]).astype("float32") * 255.0
if backend.image_data_format() != "channels_last":
mean, std = mean[:, None, None], std[:, None, None]
elif rescale_mode == "tf": # [0, 255] -> [-1, 1]
mean, std = 127.5, 127.5
# mean, std = 127.5, 128.0
elif rescale_mode == "tf128": # [0, 255] -> [-1, 1]
mean, std = 128.0, 128.0
elif rescale_mode == "raw01":
mean, std = 0, 255.0 # [0, 255] -> [0, 1]
elif rescale_mode == "clip": # value from openai/CLIP
mean = np.array([0.48145466, 0.4578275, 0.40821073]).astype("float32") * 255.0
std = np.array([0.26862954, 0.26130258, 0.27577711]).astype("float32") * 255.0
if backend.image_data_format() != "channels_last":
mean, std = mean[:, None, None], std[:, None, None]
else:
mean, std = 0, 1 # raw inputs [0, 255]
return mean, std
def set_rescale_mode(self, rescale_mode):
self.mean, self.std = self.init_mean_std_by_rescale_mode(rescale_mode)
self.rescale_mode = rescale_mode
def __call__(self, image, resize_method="bilinear", resize_antialias=False, input_shape=None):
if input_shape is not None:
self.set_input_shape(input_shape)
images = np.array([image] if len(np.shape(image)) == 3 else image).astype("float32")
images = (images * 255) if images.max() < 2 else images
images = images if backend.image_data_format() == "channels_last" else images.transpose([0, 3, 1, 2])
images = functional.convert_to_tensor(images)
images = functional.resize(images, self.input_shape, method=resize_method, antialias=resize_antialias)
images = (images - self.mean) / self.std
return images
def add_pre_post_process(model, rescale_mode="tf", input_shape=None, post_process=None, featrues=None):
from keras_cv_attention_models.imagenet.eval_func import decode_predictions
input_shape = model.input_shape[1:] if input_shape is None else input_shape
model.preprocess_input = PreprocessInput(input_shape, rescale_mode=rescale_mode)
model.decode_predictions = decode_predictions if post_process is None else post_process
model.rescale_mode = rescale_mode
if featrues is not None:
model.extract_features = lambda: features
| [] |
2024-01-10 | okin1234/opensource_langchain | langchain_pipeline~model_hub.py | import os
import types
from class_resolver import ClassResolver
from langchain import OpenAI
from .huggingface_model import HuggingFacePipeline
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
MODEL_KWARGS = {"temperature": 0, "max_new_tokens": 2048}
LOAD_KWARGS = {"use_safetensors": True, "trust_remote_code": True, "quantize_config": None, "use_triton": True}
def load_model_gptq(model_id, model_basename, load_kwargs=None):
model_kwargs = MODEL_KWARGS
llm = HuggingFacePipeline.from_model_id(model_id, task="text-generation", model_kwargs=model_kwargs, use_gptq=True, model_basename=model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_7b_gptq(device="all"):
model_id = "TheBloke/Llama-2-7B-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs)
return llm
def llama_v2_13b_gptq(device="all"):
model_id = "TheBloke/Llama-2-13B-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_13b_chat_gptq(device="all"):
model_id = "TheBloke/Llama-2-13B-chat-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def orca_mini_v2_13b_gptq(device="all"):
model_id = "TheBloke/orca_mini_v2_13b-GPTQ"
model_basename = "orca_mini_v2_13b-GPTQ-4bit-128g.no-act.order"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def falcon_7b_instruct_gptq(device="all"):
# 똥임
model_id = "TheBloke/falcon-7b-instruct-GPTQ"
model_basename = "gptq_model-4bit-64g"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def wizard_vicuna_7b_uncensored_superhot_gptq(device="all"):
model_id = "TheBloke/Wizard-Vicuna-7B-Uncensored-SuperHOT-8K-GPTQ"
model_basename = "wizard-vicuna-7b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def wizard_vicuna_13b_uncensored_superhot_gptq(device="all"):
model_id = "TheBloke/Wizard-Vicuna-13B-Uncensored-SuperHOT-8K-GPTQ"
model_basename = "wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def wizard_vicuna_30b_uncensored_gptq(device="all"):
model_id = "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ"
model_basename = "Wizard-Vicuna-30B-Uncensored-GPTQ-4bit--1g.act.order"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_13b_guanaco_qlora_gptq(device="all"):
model_id = "TheBloke/llama-2-13B-Guanaco-QLoRA-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = LOAD_KWARGS
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def openassistant_llama_v2_13b_orca_8k_gptq(device="all"):
model_id = "TheBloke/OpenAssistant-Llama2-13B-Orca-8K-3319-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = {"use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def stablebeluga_13b_gptq(device="all"):
model_id = "TheBloke/StableBeluga-13B-GPTQ"
model_basename = "gptq_model-4bit-128g"
load_kwargs = {"use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def stablebeluga_v2_70b_gptq(device="all"):
model_id = "TheBloke/StableBeluga2-70B-GPTQ"
model_basename = "gptq_model-4bit--1g"
load_kwargs = {"inject_fused_attention": False, "use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_70b_chat_gptq(device="all"):
model_id = "TheBloke/Llama-2-70B-chat-GPTQ"
model_basename = "gptq_model-4bit--1g"
load_kwargs = {"inject_fused_attention": False, "use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_70b_gptq(device="all"):
model_id = "TheBloke/Llama-2-70B-GPTQ"
model_basename = "gptq_model-4bit--1g"
load_kwargs = {"inject_fused_attention": False, "use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def llama_v2_70b_instruct_v2_gptq(device="all"):
model_id = "TheBloke/Upstage-Llama-2-70B-instruct-v2-GPTQ"
model_basename = "gptq_model-4bit--1g"
load_kwargs = {"use_safetensors": True, "trust_remote_code": False, "quantize_config": None, "use_triton": True}
load_kwargs = device_setting(device, load_kwargs=load_kwargs)
llm = load_model_gptq(model_id, model_basename, load_kwargs=load_kwargs)
return llm
def gpt():
llm = OpenAI(temperature=0)
return llm
def llm_resolver(model_name, device="all"):
class Base: pass
func_list = []
for name, val in globals().items():
if isinstance(val, types.FunctionType):
func_list.append(val)
resolver = ClassResolver(func_list, base=Base)
model = resolver.make(model_name, device=device)
return model
def device_setting(device, load_kwargs=None):
if device == "all":
if load_kwargs:
load_kwargs["device_map"] = "auto"
else:
load_kwargs = {"device_map": "auto"}
else:
if load_kwargs:
load_kwargs["device"] = f"cuda:{device}"
else:
load_kwargs = {"device": f"cuda:{device}"}
return load_kwargs | [] |
2024-01-10 | okin1234/opensource_langchain | langchain_pipeline~template.py | from langchain.output_parsers import RegexParser
from langchain.prompts import PromptTemplate
from class_resolver import ClassResolver
import types
def default_map_rerank():
template="""Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:
Question: [question here]
Helpful Answer: [answer here]
Score: [score between 0 and 100]
How to determine the score:
- Higher is a better answer
- Better responds fully to the asked question, with sufficient level of detail
- If you do not know the answer based on the context, that should be a score of 0
- Don't be overconfident!
Example #1
Context:
---------
Apples are red
---------
Question: what color are apples?
Helpful Answer: red
Score: 100
Example #2
Context:
---------
it was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv
---------
Question: what type was the car?
Helpful Answer: a sports car or an suv
Score: 60
Example #3
Context:
---------
Pears are either red or orange
---------
Question: what color are apples?
Helpful Answer: This document does not answer the question
Score: 0
Begin!
Context:
---------
{context}
---------
Question: {question}
Helpful Answer:"""
template = prompt_template(template, output_parser_v1())
return template
def custom_map_rerank(template):
template_="""
Begin!
Context:
---------
{context}
---------
Question: {question} Think through step by step.
Helpful Answer:"""
template = template + template_
template = prompt_template(template, output_parser_v1())
return template
def template_resolver(template):
class Base: pass
func_list = []
for name, val in globals().items():
if isinstance(val, types.FunctionType):
func_list.append(val)
resolver = ClassResolver(func_list, base=Base)
template = resolver.make(template)
return template
def prompt_template(template, output_parser):
prompt = PromptTemplate(template=template, input_variables=["context", "question"], output_parser=output_parser)
return prompt
def output_parser_v1():
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)",
output_keys=["answer", "score"],
)
return output_parser
def output_parser_v2():
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)\nReason: (.*)",
output_keys=["answer", "score", "reason"],
)
return output_parser
def output_parser_v3():
output_parser = RegexParser(
regex=r"(.*?)\nHelpful Answer: (.*)\nScore: (.*)",
output_keys=["reason", "answer", "score"],
)
return output_parser
def old_old_template():
template="""### System:
You must answer the given question based on the given Input.
### User:
{question}
### Input:
{context}
### Response:"""
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
return prompt | [
"\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question} Think through step by step.\nHelpful Answer:",
"question",
"PLACEHOLDERPLACEHOLDER",
"### System:\nYou must answer the given question based on the given Input.\n\n### User:\n{question}\n\n### Input:\n{context}\n\n### Response:",
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
"context"
] |
2024-01-10 | onepointconsulting/kitchen-sink-ai | websocket_server~service~website_query.py | import autogen
from websocket_server.config import cfg
import openai
openai.api_key = cfg.openai_api_key
#### Configuration
config_list = [
{
"model": cfg.openai_model,
"api_key": cfg.openai_api_key,
}
]
llm_config = {
"request_timeout": cfg.request_timeout,
"seed": cfg.seed,
"config_list": config_list,
"temperature": cfg.temperature,
}
TERMINATE_TOKEN = "TERMINATE"
#### Agent Setup
assistant = autogen.AssistantAgent(
name="assistant",
system_message="You are an expert writer who is an expert at summaries",
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode=TERMINATE_TOKEN,
max_consecutive_auto_reply=cfg.max_consecutive_auto_reply,
is_termination_msg=lambda x: x.get("content", "")
.rstrip()
.endswith(TERMINATE_TOKEN),
code_execution_config={"work_dir": cfg.code_dir},
llm_config=llm_config,
system_message=f"""Reply {TERMINATE_TOKEN} if the task has been solved at full satisfaction. Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""",
)
### Execution Part
def provide_website_summary(website_name: str) -> str:
task = f"""
Give me a summary of this website: {website_name}
"""
user_proxy.initiate_chat(assistant, message=task)
message_before_terminate = None
for message in user_proxy.chat_messages[assistant]:
content = message["content"]
if content == TERMINATE_TOKEN:
break
message_before_terminate = content
return message_before_terminate
if __name__ == "__main__":
from websocket_server.log_init import logger
import sys
arg_length = len(sys.argv)
website = "https://www.ibm.com"
if arg_length > 1:
website = sys.argv[1]
response = provide_website_summary(website)
logger.info(response)
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.