date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | MarthurGalarsy/SampleAI | src~lang_chain_files_read.py | from langchain.llms import OpenAI
from langchain.agents import load_tools
from langchain.agents import initialize_agent
import os
# 環境変数の準備
os.environ["OPENAI_API_KEY"] = "XXX"
os.environ["GOOGLE_CSE_ID"] = "XXX"
os.environ["GOOGLE_API_KEY"] = "XXX"
# LLMの設定
llm = OpenAI(model_name="gpt-3.5-turbo")
# 使用するツールをロード
tools = load_tools(["google-search"], llm=llm)
# エージェントを初期化
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
# 会話履歴を格納するための変数
conversation_history = ""
def read_files_recursively(path, type):
files_dict = {}
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(type):
with open(os.path.join(root, file), 'r') as f:
contents = f.read()
files_dict[file] = contents
return files_dict
def send_gpt(prompt):
try:
response = agent.run(prompt)
except ValueError as e:
# エラーが "Could not parse LLM output: `" で始まる場合、エラーメッセージを整形して response に格納
response = str(e)
if not response.startswith("Could not parse LLM output: `"):
raise e
response = response.removeprefix("Could not parse LLM output: `").removesuffix("`")
return response
if __name__ == "__main__":
# Specify the directory path where your program files are located
directory_path = input("プログラムファイルのパスを入力してください:")
type = input("プログラムの種類を入力してください(ex:.kt):")
files_dict = read_files_recursively(directory_path, type)
for filename, contents in files_dict.items():
conversation_history += f"File: {filename}\nContents:\n{contents}\n---\n"
response = send_gpt(f"ユーザー: 下記のソースコードがあります。\n{conversation_history}\n")
print("回答:", response)
conversation_history += f"ChatGPT: {response}\n"
while True:
# ユーザーからの入力を受け付ける
user_input = input("質問を入力してください (終了するには 'exit' と入力してください):")
# 入力が 'exit' の場合、ループを終了
if user_input.lower() == "exit":
break
# 会話履歴にユーザーの入力を追加
conversation_history += f"ユーザー: {user_input}\n"
# エージェントに会話履歴を与えて回答を生成
response = send_gpt(conversation_history)
# 回答を表示
print("回答:", response)
# 会話履歴にエージェントの回答を追加
conversation_history += f"ChatGPT: {response}\n"
| [] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_index_agent_local.py | import os
import streamlit as st
import openai
import re
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.agents import (
initialize_agent
)
from langchain.chat_models import ChatOpenAI
from langchain.tools.base import (
BaseTool,
)
from langchain import GoogleSearchAPIWrapper
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
HumanMessage,
AIMessage
)
from llama_hub.github_repo import GithubClient
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
SimpleDirectoryReader,
Document,
StorageContext,
load_index_from_storage,
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
google_api_key = os.getenv("GOOGLE_API_KEY")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
def ensure_directory_exists(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def extract_java_info(java_source):
package_pattern = re.compile(r'package\s+([a-zA-Z_][\w.]*);')
class_pattern = re.compile(r'(?:public|private|protected|)\s*(?:class|interface|enum)\s+([a-zA-Z_]\w*)')
# パッケージ名を抽出
package_match = package_pattern.search(java_source)
package_name = package_match.group(1) if package_match else None
# クラス名を抽出
class_match = class_pattern.search(java_source)
class_name = class_match.group(1) if class_match else None
# パッケージ名とクラス名をもとにパスを作成
if package_name and class_name:
path = package_name.replace('.', '/') + '/' + class_name + '.java'
return path
else:
return None
llm4 = ChatOpenAI(temperature=0.5, model_name="gpt-4")
llm3_5 = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo-16k")
search = GoogleSearchAPIWrapper(google_api_key = google_api_key)
index_directory = "./levica_context"
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
class CustomSearchTool(BaseTool):
name = "Search"
description = "useful for when you need to answer questions about current events"
def _run(self, query: str) -> str:
"""Use the tool."""
return search.run(query)
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
# 画面部分
st.title("LlamaIndex + LangChain + Local + GPT4 AI for GitHub in Streamlit")
st.caption("by Marthur")
targetDir = st.text_input("対象ディレクトリ")
type = st.text_input("プログラムの種類(ex:.kt)")
local_read_button = st.button("ローカルファイル読み込み")
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
# ローカルファイル読み込みボタン押下処理
if local_read_button:
local_read_button = False
ensure_directory_exists(index_directory)
llm_predictor = LLMPredictor(llm=llm3_5)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
tools = [
CustomSearchTool(),
]
docs = SimpleDirectoryReader(
input_dir = targetDir,
recursive = True,
required_exts = type.split(","),
).load_data()
if not os.path.exists(index_directory + "/repository"):
repository_index = GPTVectorStoreIndex.from_documents(documents=docs, service_context=service_context)
repository_index.storage_context.persist(index_directory + "/repository")
else:
storage_context_repo = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/repository"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/repository"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/repository"),
)
repository_index = load_index_from_storage(storage_context_repo, service_context=service_context)
repository_query_engine = repository_index.as_query_engine(service_context=service_context)
class RepositoryClass(BaseTool):
name="Repository"
description = targetDir + "内に存在する" + type + " のプログラム、ソースコードについて情報の取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return repository_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(RepositoryClass())
file_list = []
for doc in docs:
source = doc.get_text()
path = extract_java_info(source)
file_list.append(Document(text = path))
if not os.path.exists(index_directory + "/source/" + path):
source_index = GPTVectorStoreIndex.from_documents(documents=[Document(text = source)], service_context=service_context)
source_index.storage_context.persist(index_directory + "/source/" + path)
else:
storage_context_src = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/source/" + path),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/source/" + path),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/source/" + path),
)
source_index = load_index_from_storage(storage_context_src, service_context=service_context)
source_query_engine = source_index.as_query_engine(service_context=service_context)
class path_class(BaseTool):
name = path
description = path + "のソースコードを取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return source_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(path_class())
if not os.path.exists(index_directory + "/filelist"):
file_list_index = GPTVectorStoreIndex.from_documents(documents=file_list, service_context=service_context)
file_list_index.storage_context.persist(index_directory + "/filelist")
else:
storage_context_file_list = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/filelist"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/filelist"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/filelist"),
)
file_list_index = load_index_from_storage(storage_context_file_list, service_context=service_context)
file_list_query_engine = file_list_index.as_query_engine(service_context=service_context)
class FileListClass(BaseTool):
name="FileList"
description="Javaのファイルリスト一覧やファイルパス一覧を表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return file_list_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(FileListClass())
agent = initialize_agent(tools, llm4, agent="zero-shot-react-description", memory=memory, verbose=True)
agent.save_agent(index_directory + "/agent.json")
st.session_state["agent"] = agent
if agent:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if git_send_button :
git_send_button = False
agent = st.session_state["agent"]
memory.chat_memory.add_user_message(git_user_input)
response = agent.run(git_user_input)
response = response.replace("mermaid", "")
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"Javaのファイルリスト一覧やファイルパス一覧を表示するために使用します。",
"useful for when you need to answer questions about current events"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_index_git_ui_gpt4.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.memory import ConversationBufferMemory
from llama_index import (
download_loader,
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
SimpleDirectoryReader,
)
from llama_hub.github_repo import GithubRepositoryReader, GithubClient
from langchain.schema import HumanMessage
from langchain.schema import AIMessage
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import GitLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
download_loader("GithubRepositoryReader")
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# 会話履歴を格納するための変数
conversation_history = []
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
# 画面部分
st.title("LlamaIndex + LangChain + Local + GPT4 AI for GitHub in Streamlit")
st.caption("by Marthur")
place_type = ["Git(LlamaIndex)", "Git(LangChain)","Local(LlamaIndex)"]
place_selector = st.radio("読み込み方切り替え", place_type)
if place_selector == "Git(LlamaIndex)" :
owner = st.text_input("GitHubのOwner")
repository = st.text_input("GitHubのRepository")
type = st.text_input("プログラムの種類(ex:.kt)")
targetDir = st.text_input("対象ディレクトリ")
branch = st.text_input("ブランチ")
git_read_button = st.button("GitHub読み込み")
elif place_selector == "Git(LangChain)" :
clone_url = st.text_input("GitHubのURL")
type = st.text_input("プログラムの種類(ex:.kt)")
branch = st.text_input("ブランチ")
repo_path = "./temp"
git_read_button = st.button("GitHub読み込み")
elif place_selector == "Local(LlamaIndex)" :
targetDir = st.text_input("対象ディレクトリ")
type = st.text_input("プログラムの種類(ex:.kt)")
local_read_button = st.button("ローカルファイル読み込み")
target_type = ["Repository", "SingleFile"]
target_type_selector = st.radio("対象切り替え", target_type)
if target_type_selector == "Repository":
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
elif target_type_selector == "SingleFile":
git_user_input = st.text_input("対象ファイル名")
gpt_user_input = st.text_input("質問")
gpt_send_button = st.button("送信")
# GitHub読み込みボタン(LlamaIndex)押下処理
if place_selector == "Git(LlamaIndex)" and git_read_button:
git_read_button = False
loader = GithubRepositoryReader(
github_client,
owner = owner,
repo = repository,
filter_directories = ([targetDir], GithubRepositoryReader.FilterType.INCLUDE),
filter_file_extensions = (type.split(","), GithubRepositoryReader.FilterType.INCLUDE),
verbose = True,
concurrent_requests = 10,
use_parser = True,
)
docs = loader.load_data(branch=branch)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
index = GPTVectorStoreIndex.from_documents(documents=docs, service_context=service_context)
query_engine = index.as_query_engine(service_context=service_context)
st.session_state["query_engine"] = query_engine
if query_engine:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# GitHub読み込みボタン(LangChain)押下処理
if place_selector == "Git(LangChain)" and git_read_button:
git_read_button = False
if os.path.exists(repo_path):
clone_url = None
loader = GitLoader(
clone_url=clone_url,
branch=branch,
repo_path=repo_path,
file_filter=lambda file_path: file_path.endswith(type),
)
index = VectorstoreIndexCreator(
vectorstore_cls=Chroma, # default
embedding=OpenAIEmbeddings(
disallowed_special=(),
chunk_size=1
), #default
).from_loaders([loader])
st.session_state["index"] = index
if index :
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# ローカルファイル読み込みボタン押下処理
if place_selector == "Local(LlamaIndex)" and local_read_button:
local_read_button = False
docs = SimpleDirectoryReader(
input_dir = targetDir,
recursive = True,
required_exts = type.split(","),
).load_data()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
index = GPTVectorStoreIndex.from_documents(documents=docs, service_context=service_context)
query_engine = index.as_query_engine(service_context=service_context)
st.session_state["query_engine"] = query_engine
if query_engine:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# Repositoryで送信ボタン押下処理
if target_type_selector == "Repository" and git_send_button :
git_send_button = False
memory.chat_memory.add_user_message(git_user_input)
if place_selector == "Git(LangChain)":
index = st.session_state["index"]
response = index.query(git_user_input)
response = response.replace("mermaid", "")
else:
query_engine = st.session_state["query_engine"]
response = str(query_engine.query(git_user_input).response)
response = response.replace("mermaid", "")
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# SingleFileで送信ボタン押下処理
if target_type_selector == "SingleFile" and gpt_send_button :
gpt_send_button = False
git_user_input += "のソースコードを表示してください"
memory.chat_memory.add_user_message(git_user_input)
if place_selector == "Git(LangChain)":
index = st.session_state["index"]
code_res = index.query(git_user_input)
else:
query_engine = st.session_state["query_engine"]
code_res = query_engine.query(git_user_input).response
memory.chat_memory.add_ai_message(code_res)
prompt = "下記のコードがあります。\n下記のコードに対して" + gpt_user_input + "\n" + code_res
memory.chat_memory.add_user_message(prompt)
st.session_state["memory"] = memory
# ユーザーの質問を会話履歴に追加
conversation_history.append({"role": "user", "content": prompt})
# GPT-4モデルを使用してテキストを生成
gpt_response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "system", "content": f"You are a excellent system engineer."}] + conversation_history,
max_tokens=3500,
n=1,
temperature=0.8,
)
gpt_message = gpt_response.choices[0].message['content'].strip()
# アシスタントの回答を会話履歴に追加
conversation_history.append({"role": "assistant", "content": gpt_message})
memory.chat_memory.add_ai_message(gpt_message)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"content",
"You are a excellent system engineer.",
"下記のコードがあります。\n下記のコードに対してPLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~lang_chain_git.py | import os
from dotenv import load_dotenv
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import GitLoader
load_dotenv()
clone_url = input("GitHubのURLを入力してください:")
type = input("プログラムの種類を入力してください(ex:.kt):")
repo_path = "./temp"
branch = input("ブランチを入力してください:")
if os.path.exists(repo_path):
clone_url = None
loader = GitLoader(
clone_url=clone_url,
branch=branch,
repo_path=repo_path,
file_filter=lambda file_path: file_path.endswith(type),
)
index = VectorstoreIndexCreator(
vectorstore_cls=Chroma, # default
embedding=OpenAIEmbeddings(disallowed_special=()), #default
).from_loaders([loader])
while True:
# ユーザーからの入力を受け付ける
user_input = input("質問を入力してください (終了するには 'exit' と入力してください):")
# 入力が 'exit' の場合、ループを終了
if user_input.lower() == "exit":
break
if user_input == "":
break
response = index.query(user_input)
print("回答:", response)
| [] |
2024-01-10 | MarthurGalarsy/SampleAI | src~gpt_only.py | import openai
# APIキーを設定してください。例: 'your-api-key'
api_key = 'XXX'
openai.api_key = api_key
def generate_text(prompt, role, conversation_history):
# ユーザーの質問を会話履歴に追加
conversation_history.append({"role": "user", "content": prompt})
# GPT-4モデルを使用してテキストを生成
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"You are a {role}."}] + conversation_history,
max_tokens=50,
n=1,
temperature=0.8,
)
message = response.choices[0].message['content'].strip()
# アシスタントの回答を会話履歴に追加
conversation_history.append({"role": "assistant", "content": message})
return message
if __name__ == "__main__":
# ロールプレイのモデルをユーザーに入力させる
role = input("ロールプレイのモデルを指定してください(例: helpful assistant): ")
# 会話履歴を格納するためのリストを初期化
conversation_history = []
while True:
# ユーザーに質問を入力させる
input_prompt = input("質問を入力してください(終了するには'q'を入力): ")
# 終了条件の確認
if input_prompt.lower() == 'q':
break
# GPT-4からの回答を生成
generated_text = generate_text(input_prompt, role, conversation_history)
# 回答を表示
print("GPT-4からの回答:", generated_text) | [
"質問を入力してください(終了するには'q'を入力): ",
"content",
"You are a PLACEHOLDER."
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_index_agent.py | import os
import streamlit as st
import openai
import re
from dotenv import load_dotenv
from streamlit_chat import message
from langchain import (
GoogleSearchAPIWrapper,
LLMChain
)
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
HumanMessage,
AIMessage,
)
from langchain.agents import (
load_tools,
ZeroShotAgent,
AgentExecutor
)
from langchain.chat_models import ChatOpenAI
from langchain.tools.base import (
BaseTool,
)
from llama_hub.github_repo import GithubRepositoryReader, GithubClient
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
Document,
StorageContext,
load_index_from_storage,
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
google_api_key = os.getenv("GOOGLE_API_KEY")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
def get_input() -> str:
print("Insert your text. Enter 'q' or press Ctrl-D (or Ctrl-Z on Windows) to end.")
contents = []
while True:
try:
line = input()
except EOFError:
break
if line == "q":
break
contents.append(line)
return "\n".join(contents)
def ensure_directory_exists(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def extract_kotlin_info(kotlin_source):
package_pattern = re.compile(r'package\s+([a-zA-Z_][\w.]*)')
class_pattern = re.compile(r'(?:public|private|protected|)\s*(?:class|interface|enum|object)\s+([a-zA-Z_]\w*)')
# パッケージ名を抽出
package_match = package_pattern.search(kotlin_source)
package_name = package_match.group(1) if package_match else None
# クラス名を抽出
class_match = class_pattern.search(kotlin_source)
class_name = class_match.group(1) if class_match else None
# パッケージ名とクラス名をもとにパスを作成
if package_name and class_name:
path = package_name.replace('.', '/') + '/' + class_name + '.kt'
return path
else:
return None
llm4 = ChatOpenAI(temperature=0.5, model_name="gpt-4-1106-preview")
llm3_5 = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo-16k")
search = GoogleSearchAPIWrapper(google_api_key = google_api_key)
index_directory = "./index_context"
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
class CustomSearchTool(BaseTool):
name = "Search"
description = "useful for when you need to answer questions about current events"
def _run(self, query: str) -> str:
"""Use the tool."""
return search.run(query)
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
# 画面部分
st.title("LlamaIndex + LangChain + Local + GPT4 AI for GitHub in Streamlit")
st.caption("by Marthur")
owner = st.text_input("GitHubのOwner")
repository = st.text_input("GitHubのRepository")
type = st.text_input("プログラムの種類(ex:.kt)")
targetDir = st.text_input("対象ディレクトリ")
branch = st.text_input("ブランチ")
git_read_button = st.button("GitHub読み込み")
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
# GitHub読み込みボタン(LlamaIndex)押下処理
if git_read_button:
git_read_button = False
ensure_directory_exists(index_directory)
llm_predictor = LLMPredictor(llm=llm3_5)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
tools = [
CustomSearchTool(),
load_tools(["human"], input_func=get_input)[0]
]
loader = GithubRepositoryReader(
github_client,
owner = owner,
repo = repository,
filter_directories = ([targetDir], GithubRepositoryReader.FilterType.INCLUDE),
filter_file_extensions = (type.split(","), GithubRepositoryReader.FilterType.INCLUDE),
verbose = True,
concurrent_requests = 10,
use_parser = True,
)
docs = loader.load_data(branch=branch)
if not os.path.exists(index_directory + "/" + repository + "/repository"):
repository_index = GPTVectorStoreIndex.from_documents(documents=docs, service_context=service_context)
repository_index.storage_context.persist(index_directory + "/" + repository + "/repository")
else:
storage_context_repo = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/repository"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/repository"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/repository"),
)
repository_index = load_index_from_storage(storage_context_repo, service_context=service_context)
repository_query_engine = repository_index.as_query_engine(service_context=service_context)
class RepositoryClass(BaseTool):
name="Repository"
description = repository + "内に存在する" + type + " のプログラム、ソースコードについて情報の取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return repository_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(RepositoryClass())
file_list = []
for doc in docs:
source = doc.get_text()
path = extract_kotlin_info(source)
file_list.append(Document(text = path))
path_class = path + "_class"
if not os.path.exists(index_directory + "/" + repository + "/source/" + path):
source_index = GPTVectorStoreIndex.from_documents(documents=[Document(text = source)], service_context=service_context)
source_index.storage_context.persist(index_directory + "/" + repository + "/source/" + path)
else:
storage_context_src = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/source/" + path),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/source/" + path),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/source/" + path),
)
source_index = load_index_from_storage(storage_context_src, service_context=service_context)
source_query_engine = source_index.as_query_engine(service_context=service_context)
class path_class(BaseTool):
name = path
description = path + "のソースコードを取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return source_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(path_class())
if not os.path.exists(index_directory + "/" + repository + "/filelist"):
file_list_index = GPTVectorStoreIndex.from_documents(documents=file_list, service_context=service_context)
file_list_index.storage_context.persist(index_directory + "/" + repository + "/filelist")
else:
storage_context_file_list = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/filelist"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/filelist"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/" + repository + "/filelist"),
)
file_list_index = load_index_from_storage(storage_context_file_list, service_context=service_context)
file_list_query_engine = file_list_index.as_query_engine(service_context=service_context)
class FileListClass(BaseTool):
name="FileList"
description="Kotlinのファイルリスト一覧やファイルパス一覧を表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return file_list_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(FileListClass())
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix="""あなたはGitHubやKotlinに詳しいシニアエンジニアです。
次の質問にできる限り答えてください。次のツールにアクセスできます:""",
# suffix="""必ずFINAL FANTASY Tacticsのアグリアスの言葉遣いで回答してください。
suffix="""必ずFINAL FANTASY XIIIのライトニングの言葉遣いで回答してください。
ただし、分からないことは人間に質問してください。
質問内容:{question}
{agent_scratchpad}
""",
input_variables=["question", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=llm4, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_executor.save_agent(index_directory + "/" + repository + "/agent.json")
st.session_state["agent_executor"] = agent_executor
if agent_executor:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if git_send_button :
git_send_button = False
agent_executor = st.session_state["agent_executor"]
memory.chat_memory.add_user_message(git_user_input)
response = agent_executor.run(git_user_input)
response = response.replace("mermaid", "")
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"必ずFINAL FANTASY XIIIのライトニングの言葉遣いで回答してください。\n ただし、分からないことは人間に質問してください。\n 質問内容:{question}\n {agent_scratchpad}\n ",
"question",
"agent_scratchpad",
"Kotlinのファイルリスト一覧やファイルパス一覧を表示するために使用します。",
"useful for when you need to answer questions about current events",
"あなたはGitHubやKotlinに詳しいシニアエンジニアです。\n 次の質問にできる限り答えてください。次のツールにアクセスできます:"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_index_agent_word.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.agents import (
initialize_agent
)
from langchain.chat_models import ChatOpenAI
from langchain.tools.base import (
BaseTool,
)
from langchain import GoogleSearchAPIWrapper
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
HumanMessage,
AIMessage
)
from pathlib import Path
from llama_index import download_loader
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
StorageContext,
load_index_from_storage,
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
google_api_key = os.getenv("GOOGLE_API_KEY")
llm = ChatOpenAI(temperature=0, model_name="gpt-4")
search = GoogleSearchAPIWrapper(google_api_key = google_api_key)
index_directory = "./index_context"
DocxReader = download_loader("DocxReader")
loader = DocxReader()
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
class CustomSearchTool(BaseTool):
name = "Search"
description = "useful for when you need to answer questions about current events"
def _run(self, query: str) -> str:
"""Use the tool."""
return search.run(query)
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
# 画面部分
st.title("LlamaIndex + LangChain + Word + GPT4 AI for GitHub in Streamlit")
st.caption("by Marthur")
target_file_path = st.text_input("対象ファイルパス")
file_read_button = st.button("ローカルファイル読み込み")
user_input = st.text_input("質問")
send_button = st.button("送信")
# ローカルファイル読み込みボタン押下処理
if file_read_button:
file_read_button = False
docs = loader.load_data(file=Path(target_file_path))
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
tools = [
CustomSearchTool(),
]
if not os.path.exists(index_directory + "/repository"):
repository_index = GPTVectorStoreIndex.from_documents(documents=docs, service_context=service_context)
repository_index.storage_context.persist(index_directory + "/repository")
else:
storage_context_repo = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/repository"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/repository"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/repository"),
)
repository_index = load_index_from_storage(storage_context_repo, service_context=service_context)
repository_query_engine = repository_index.as_query_engine(service_context=service_context)
class RepositoryClass(BaseTool):
name="Repository"
description = target_file_path + "内に存在するmemoQのプラグインについて情報の取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return repository_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(RepositoryClass())
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", memory=memory, verbose=True)
agent.save_agent(index_directory + "/agent.json")
st.session_state["agent"] = agent
if agent:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if send_button :
send_button = False
agent = st.session_state["agent"]
memory.chat_memory.add_user_message(user_input)
response = agent.run(user_input)
response = response.replace("mermaid", "")
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"useful for when you need to answer questions about current events"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~lang_chain_git_ui.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import GitLoader
from langchain.schema import HumanMessage
from langchain.schema import AIMessage
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
st.title("langchain for GitHub in Streamlit")
st.caption("by Marthur")
clone_url = st.text_input("GitHubのURL")
type = st.text_input("プログラムの種類(ex:.kt)")
branch = st.text_input("ブランチ")
repo_path = "./temp"
read_button = st.button("GitHub読み込み")
model_list = ["Git", "GPT"]
model_selector = st.radio("モデル切り替え", model_list)
if model_selector == "Git":
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
elif model_selector == "GPT":
git_user_input = st.text_input("対象ファイル名")
gpt_user_input = st.text_input("質問")
gpt_send_button = st.button("送信")
# 会話履歴を格納するための変数
conversation_history = []
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
if read_button:
read_button = False
if os.path.exists(repo_path):
clone_url = None
loader = GitLoader(
clone_url=clone_url,
branch=branch,
repo_path=repo_path,
file_filter=lambda file_path: file_path.endswith(type),
)
index = VectorstoreIndexCreator(
vectorstore_cls=Chroma, # default
embedding=OpenAIEmbeddings(
disallowed_special=(),
chunk_size=1
), #default
).from_loaders([loader])
st.session_state["index"] = index
if index :
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if model_selector == "Git" and git_send_button :
git_send_button = False
memory.chat_memory.add_user_message(git_user_input)
index = st.session_state["index"]
response = index.query(git_user_input)
# セッションへのチャット履歴の保存
st.session_state["index"] = index
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if model_selector == "GPT" and gpt_send_button :
gpt_send_button = False
memory.chat_memory.add_user_message(git_user_input + "のソースコード")
index = st.session_state["index"]
code_res = index.query(git_user_input + "のソースコード")
# セッションへのチャット履歴の保存
st.session_state["index"] = index
memory.chat_memory.add_ai_message(code_res)
st.session_state["memory"] = memory
prompt = "下記のコードがあります。\n下記のコードに対して" + gpt_user_input + "\n" + code_res
memory.chat_memory.add_user_message(prompt)
# ユーザーの質問を会話履歴に追加
conversation_history.append({"role": "user", "content": prompt})
# GPT-4モデルを使用してテキストを生成
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"You are a excellent system engineer."}] + conversation_history,
max_tokens=3500,
n=1,
temperature=0.8,
)
gpt_message = gpt_response.choices[0].message['content'].strip()
# アシスタントの回答を会話履歴に追加
conversation_history.append({"role": "assistant", "content": gpt_message})
memory.chat_memory.add_ai_message(gpt_message)
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"content",
"You are a excellent system engineer.",
"下記のコードがあります。\n下記のコードに対してPLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_langchain_web_page.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain import (
GoogleSearchAPIWrapper,
LLMChain
)
from langchain.memory import ConversationBufferMemory
from langchain.schema import (
HumanMessage,
AIMessage,
)
from langchain.agents import (
load_tools,
ZeroShotAgent,
AgentExecutor
)
from langchain.chat_models import ChatOpenAI
from langchain.tools.base import (
BaseTool,
)
from llama_hub.github_repo import GithubClient
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
ServiceContext,
SimpleWebPageReader,
StorageContext,
load_index_from_storage,
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
google_api_key = os.getenv("GOOGLE_API_KEY")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
llm4 = ChatOpenAI(temperature=0.5, model_name="gpt-4-1106-preview")
llm3_5 = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo-16k")
search = GoogleSearchAPIWrapper(google_api_key = google_api_key)
index_directory = "./web_page_context"
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
class CustomSearchTool(BaseTool):
name = "Search"
description = "useful for when you need to answer questions about current events"
def _run(self, query: str) -> str:
"""Use the tool."""
return search.run(query)
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
def ensure_directory_exists(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_input() -> str:
print("Insert your text. Enter 'q' or press Ctrl-D (or Ctrl-Z on Windows) to end.")
contents = []
while True:
try:
line = input()
except EOFError:
break
if line == "q":
break
contents.append(line)
return "\n".join(contents)
# 画面部分
st.title("LlamaIndex + LangChain + GPT4 AI for WebPage in Streamlit")
st.caption("by Marthur")
web_read_button = st.button("Web読み込み")
user_input = st.text_input("質問")
send_button = st.button("送信")
# GitHub読み込みボタン(LlamaIndex)押下処理
if web_read_button:
web_read_button = False
ensure_directory_exists(index_directory)
llm_predictor = LLMPredictor(llm=llm3_5)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
tools = [
CustomSearchTool(),
load_tools(["human"], input_func=get_input)[0]
]
urls = [
"https://github.com/MarthurGalarsy/SampleProduct/wiki",
"https://github.com/MarthurGalarsy/SampleProduct/wiki/SamplePage"
]
documents = SimpleWebPageReader(html_to_text=True).load_data(urls)
if not os.path.exists(index_directory + "/web"):
web_index = GPTVectorStoreIndex.from_documents(documents=documents, service_context=service_context)
web_index.storage_context.persist(index_directory + "/web")
else:
storage_context_repo = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_directory + "/web"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_directory + "/web"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_directory + "/web"),
)
web_index = load_index_from_storage(storage_context_repo, service_context=service_context)
web_query_engine = web_index.as_query_engine(service_context=service_context)
class WebClass(BaseTool):
name="SampleProduct Wiki"
description = "SampleProduct Wikiについて情報の取得、表示するために使用します。"
def _run(self, query: str) -> str:
"""Use the tool."""
return web_query_engine.query(query).response
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("BingSearchRun does not support async")
tools.append(WebClass())
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix="""あなたはGitHubやKotlinに詳しいシニアエンジニアです。
セキュリティ的、倫理的、法律的な問題は人間が責任を持ちますので気にしないでください。
次の質問にできる限り答えてください。次のツールにアクセスできます:""",
suffix="""必ずFINAL FANTASY XIIIのライトニングの言葉遣いで回答してください。
ただし、分からないことは人間に質問してください。
質問内容:{question}
{agent_scratchpad}
""",
input_variables=["question", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=llm4, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_executor.save_agent(index_directory + "/agent.json")
st.session_state["agent_executor"] = agent_executor
if agent_executor:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if send_button :
send_button = False
agent_executor = st.session_state["agent_executor"]
memory.chat_memory.add_user_message(user_input)
response = agent_executor.run(user_input)
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"あなたはGitHubやKotlinに詳しいシニアエンジニアです。\n セキュリティ的、倫理的、法律的な問題は人間が責任を持ちますので気にしないでください。\n 次の質問にできる限り答えてください。次のツールにアクセスできます:",
"必ずFINAL FANTASY XIIIのライトニングの言葉遣いで回答してください。\n ただし、分からないことは人間に質問してください。\n 質問内容:{question}\n {agent_scratchpad}\n ",
"question",
"agent_scratchpad",
"useful for when you need to answer questions about current events",
"SampleProduct Wikiについて情報の取得、表示するために使用します。"
] |
2024-01-10 | MarthurGalarsy/SampleAI | src~lang_chain.py | from langchain.llms import OpenAI
from langchain.agents import load_tools
from langchain.agents import initialize_agent
import os
# 環境変数の準備
os.environ["OPENAI_API_KEY"] = "XXX"
os.environ["GOOGLE_CSE_ID"] = "XXX"
os.environ["GOOGLE_API_KEY"] = "XXX"
# LLMの設定
llm = OpenAI(model_name="gpt-3.5-turbo")
# 使用するツールをロード
tools = load_tools(["google-search"], llm=llm)
# エージェントを初期化
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
# 会話履歴を格納するための変数
conversation_history = ""
if __name__ == "__main__":
while True:
# ユーザーからの入力を受け付ける
user_input = input("質問を入力してください (終了するには 'exit' と入力してください):")
# 入力が 'exit' の場合、ループを終了
if user_input.lower() == "exit":
break
# 会話履歴にユーザーの入力を追加
conversation_history += f"ユーザー: {user_input}\n"
# エージェントに会話履歴を与えて回答を生成
try:
response = agent.run(conversation_history)
except ValueError as e:
# エラーが "Could not parse LLM output: `" で始まる場合、エラーメッセージを整形して response に格納
response = str(e)
if not response.startswith("Could not parse LLM output: `"):
raise e
response = response.removeprefix("Could not parse LLM output: `").removesuffix("`")
# 回答を表示
print("回答:", response)
# 会話履歴にエージェントの回答を追加
conversation_history += f"ChatGPT: {response}\n"
| [] |
2024-01-10 | MarthurGalarsy/SampleAI | src~llama_index_git_ui.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.memory import ConversationBufferMemory
from llama_index import download_loader, GPTVectorStoreIndex
from llama_hub.github_repo import GithubRepositoryReader, GithubClient
from langchain.schema import HumanMessage
from langchain.schema import AIMessage
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
github_client = GithubClient(os.getenv("GITHUB_TOKEN"))
download_loader("GithubRepositoryReader")
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
# 会話履歴を格納するための変数
conversation_history = []
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
# 画面部分
st.title("langchain for GitHub in Streamlit")
st.caption("by Marthur")
owner = st.text_input("GitHubのOwner")
repository = st.text_input("GitHubのRepository")
type = st.text_input("プログラムの種類(ex:.kt)")
targetDir = st.text_input("対象ディレクトリ")
branch = st.text_input("ブランチ")
read_button = st.button("GitHub読み込み")
model_list = ["Git", "GPT"]
model_selector = st.radio("モデル切り替え", model_list)
if model_selector == "Git":
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
elif model_selector == "GPT":
git_user_input = st.text_input("対象ファイル名")
gpt_user_input = st.text_input("質問")
gpt_send_button = st.button("送信")
# GitHub読み込みボタン押下処理
if read_button:
read_button = False
loader = GithubRepositoryReader(
github_client,
owner = owner,
repo = repository,
filter_directories = ([targetDir], GithubRepositoryReader.FilterType.INCLUDE),
filter_file_extensions = ([type], GithubRepositoryReader.FilterType.INCLUDE),
verbose = True,
concurrent_requests = 10,
)
docs = loader.load_data(branch=branch)
index = GPTVectorStoreIndex.from_documents(docs)
query_engine = index.as_query_engine()
st.session_state["query_engine"] = query_engine
if query_engine:
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# Gitで送信ボタン押下処理
if model_selector == "Git" and git_send_button :
git_send_button = False
memory.chat_memory.add_user_message(git_user_input)
query_engine = st.session_state["query_engine"]
response = query_engine.query(git_user_input).response
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# Gptで送信ボタン押下処理
if model_selector == "GPT" and gpt_send_button :
gpt_send_button = False
git_user_input += "のソースコードを表示してください"
memory.chat_memory.add_user_message(git_user_input)
query_engine = st.session_state["query_engine"]
code_res = query_engine.query(git_user_input).response
memory.chat_memory.add_ai_message(code_res)
prompt = "下記のコードがあります。\n下記のコードに対して" + gpt_user_input + "\n" + code_res
memory.chat_memory.add_user_message(prompt)
st.session_state["memory"] = memory
# ユーザーの質問を会話履歴に追加
conversation_history.append({"role": "user", "content": prompt})
# GPT-4モデルを使用してテキストを生成
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"You are a excellent system engineer."}] + conversation_history,
max_tokens=3500,
n=1,
temperature=0.8,
)
gpt_message = gpt_response.choices[0].message['content'].strip()
# アシスタントの回答を会話履歴に追加
conversation_history.append({"role": "assistant", "content": gpt_message})
memory.chat_memory.add_ai_message(gpt_message)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"content",
"You are a excellent system engineer.",
"下記のコードがあります。\n下記のコードに対してPLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | schroeder-g/marcus | streamlitapp.py | import streamlit as st
import pinecone
import openai
from apikey import OPENAI_KEY, PINECONE_KEY, PINECONE_ENV
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains import RetrievalQA
from langchain.chains.question_answering import load_qa_chain
from langchain.agents import initialize_agent, Tool
from langchain.prompts import PromptTemplate
# Set Streamlit page configuration
st.set_page_config(page_title='🧠 Marcus', layout='wide')
# Set up the Streamlit app layout
st.title("🧠 Marcus")
# Initialize session states
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
TEMPLATE= """Pretend you are a stoic philosopher from Ancient Greece named Marcus.
Return responses in the style
of an ancient Greek philosopher like Epictetus or Seneca. Please cite stoic thinkers and
their writings if they are relevant to the discussion.
Sign off every response with "Sincerely, Marcus".
User input: {user_input}"""
PROMPT = PromptTemplate(
input_variables = ['user_input'],
template = TEMPLATE
)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_KEY)
pinecone.init(
api_key=PINECONE_KEY,
environment=PINECONE_ENV
)
index_name = 'marcus'
docsearch = Pinecone.from_existing_index(index_name, embeddings)
# Define function to get user input
def get_text():
"""
Get the user input text.
Returns:
(str): The text entered by the user
"""
input_text = st.text_input("You: ", st.session_state["input"], key="input",
placeholder="Ask me anything ...",
label_visibility='hidden')
return input_text
# Define function to start a new chat
def new_chat():
"""
Clears session state and starts a new chat.
"""
save = []
for i in range(len(st.session_state['generated'])-1, -1, -1):
save.append("User:" + st.session_state["past"][i])
save.append("Marcus:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
st.session_state.entity_memory.entity_store = {}
st.session_state.entity_memory.buffer.clear()
API_O = st.sidebar.text_input("API-KEY", type="password")
# Session state storage would be ideal
if API_O:
# Create an OpenAI instance
llm = ChatOpenAI(
openai_api_key=API_O,
model_name='gpt-3.5-turbo',
temperature=0.2)
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationBufferWindowMemory(
memory_key='chat_history',
k=5,
return_messages=True)
# retrieval qa chain
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=docsearch.as_retriever()
)
tools = [
Tool(
name='Stoic Compendium',
func=qa.run,
description=(
'use this tool when answering philosophical queries'
)
)
]
agent = initialize_agent(
agent='chat-conversational-react-description',
tools=tools,
llm=llm,
verbose=True,
max_iterations=3,
early_stopping_method='generate',
memory=st.session_state.entity_memory
)
else:
st.sidebar.warning('API key required to try this app.The API key is not stored in any form.')
# st.stop()
# Add a button to start a new chat
st.sidebar.button("New Chat", on_click = new_chat, type='primary')
# Get the user input
user_input = get_text()
# Generate the output using the ConversationChain object and the user input, and add the input/output to the session
if user_input:
prompt_with_query = PROMPT.format(user_input = user_input)
response = agent(prompt_with_query)
answer = response["output"]
st.session_state.past.append(user_input)
st.session_state.generated.append(answer)
# Allow to download as well
download_str = []
# Display the conversation history using an expander, and allow the user to download it
with st.expander("Conversation", expanded=True):
for i in range(len(st.session_state['generated'])-1, -1, -1):
st.info(st.session_state["past"][i],icon="🧐")
st.success(st.session_state["generated"][i], icon="🤖")
download_str.append(st.session_state["past"][i])
download_str.append(st.session_state["generated"][i])
# Can throw error - requires fix
download_str = '\n'.join(download_str)
if download_str:
st.download_button('Download',download_str)
| [
"user_input",
"Sincerely, Marcus",
"Pretend you are a stoic philosopher from Ancient Greece named Marcus.\nReturn responses in the style\nof an ancient Greek philosopher like Epictetus or Seneca. Please cite stoic thinkers and \ntheir writings if they are relevant to the discussion.\nSign off every response with \"Sincerely, Marcus\".\n\nUser input: {user_input}"
] |
2024-01-10 | gunny97/DRAFT | FactsNet~ada_gpt3.py | import pandas as pd
import os
import openai
from evaluate import load
f1_metric = load('f1')
acc_metric = load('accuracy')
rec_metric = load('recall')
prec_metric = load('precision')
openai.api_key = "xxxxxxxxxx"
restart_sequence = "\n"
def gpt3_clf(prompt, content_name):
if 'facts' in content_name:
content_name = content_name.replace('-facts','')
if '-' in content_name:
content_name = content_name.replace('-',' ')
response = openai.Completion.create(
# model="text-ada-001",
model = 'ada',
prompt=f"Is the following text related to {content_name}? \
Answer yes or no. \
\n\n\n\"{prompt}\"",
temperature=0,
max_tokens=6,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def load_testdata_path():
test_data_path = "/crawl/crawler/test_data"
test_data_path_list = []
for cat in os.listdir(test_data_path):
sub_path = os.path.join(test_data_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
test_data_path_list.append(edge_path)
return test_data_path_list
def main(content_name, test_data_path, few_shot, query_list):
test_set = pd.read_csv(test_data_path)
test_set.drop(['Unnamed: 0'], axis=1, inplace=True)
test_set.columns = ['text', 'label']
# positive samples
for i in range(test_set.shape[0]):
if test_set.iloc[i]['label'] == 0:
test_set.label[i] = 1
elif test_set.iloc[i]['label'] == 1 or test_set.iloc[i]['label'] == 2:
test_set.label[i] = 0
test_set.drop(index = len(test_set)-1, inplace=True)
if few_shot:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
query_prompt = []
for q in query_list:
query_prompt.append("'"+str(q)+ "'" + ": yes\n")
query_prompt_str = "\n" + "".join(query_prompt)
text = query_prompt_str + '\n' + '\n' + '\n' + "'"+str(text)+ "'" + ":"
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
else:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
if __name__ == "__main__":
import logging
logger = logging.getLogger(__name__)
streamHandler = logging.StreamHandler()
few_shot = True
for shot_number in [1,3,5,0]:
shot = 'few-shot'
if shot_number == 0:
few_shot = False
shot = 'zero-shot'
fileHandler = logging.FileHandler(f'./0103_ORIGINAL_GPT3_ADA_factsNet_{shot}_{shot_number}-shot_results.log')
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
query_path = "/crawl/crawler/query_output"
query_path_list = []
for cat in os.listdir(query_path):
if cat == 'general':
continue
else:
sub_path = os.path.join(query_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
for cat_3 in os.listdir(edge_path):
file_path = os.path.join(edge_path, cat_3)
query_path_list.append(file_path)
test_data_path_list = load_testdata_path()
for iii, test_data_path in enumerate(test_data_path_list):
content_name = test_data_path.split('/')[-1].replace('.csv','')
if few_shot:
for q in query_path_list:
# if content_name in q:
if content_name == q.split('/')[-1].replace('query_','').replace('.csv',''):
content_query_path = q
query_list = list(pd.read_csv(content_query_path)['query'])
print("="*30,content_name,"="*30)
query_list = query_list[:shot_number]
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, query_list)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}-{shot_number}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: {content_query_path}")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
else:
print("="*30,content_name,"="*30)
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, None)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: {content_query_path}")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
fileHandler.close()
logger.removeHandler(fileHandler) | [
"[]",
"\n",
"Is the following text related to PLACEHOLDER? Answer yes or no. \n\n\n\"PLACEHOLDER\""
] |
2024-01-10 | gunny97/DRAFT | FactsNet~davinci_gpt3_three_shot.py | import pandas as pd
import os
import openai
from evaluate import load
import backoff # for exponential backoff
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
f1_metric = load('f1')
acc_metric = load('accuracy')
rec_metric = load('recall')
prec_metric = load('precision')
# openai.api_key = "xxxxxxxxxxx"
# local
# openai.api_key = "xxxxxxxxx"
# openai.api_key = "xxxxxxxxxxx"
openai.api_key = "xxxxxxxxxxxx"
restart_sequence = "\n"
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def completions_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
def gpt3_clf(prompt, content_name):
if 'facts' in content_name:
content_name = content_name.replace('-facts','')
if '-' in content_name:
content_name = content_name.replace('-',' ')
# import pdb; pdb.set_trace()
# response = openai.Completion.create(
# # model="text-davinci-002",
# model = 'davinci',
# prompt=f"Is the following text related to {content_name}? \
# Answer yes or no. \
# \n\n\n\"{prompt}\"",
# temperature=0,
# max_tokens=6,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
response = completions_with_backoff(
model = 'davinci',
prompt=f"Is the following text related to {content_name}? \
Answer yes or no. \
\n\n\n\"{prompt}\"",
temperature=0,
max_tokens=6,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def load_testdata_path():
test_data_path = "/crawl/crawler/test_data"
test_data_path_list = []
for cat in os.listdir(test_data_path):
sub_path = os.path.join(test_data_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
test_data_path_list.append(edge_path)
return test_data_path_list
def main(content_name, test_data_path, few_shot, query_list):
test_set = pd.read_csv(test_data_path)
test_set.drop(['Unnamed: 0'], axis=1, inplace=True)
test_set.columns = ['text', 'label']
# positive samples
for i in range(test_set.shape[0]):
if test_set.iloc[i]['label'] == 0:
test_set.label[i] = 1
elif test_set.iloc[i]['label'] == 1 or test_set.iloc[i]['label'] == 2:
test_set.label[i] = 0
test_set.drop(index = len(test_set)-1, inplace=True)
if few_shot:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
query_prompt = []
for q in query_list:
query_prompt.append("'"+str(q)+ "'" + ": yes\n")
query_prompt_str = "\n" + "".join(query_prompt)
text = query_prompt_str + '\n' + '\n' + '\n' + "'"+str(text)+ "'" + ":"
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
else:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
if __name__ == "__main__":
import logging
logger = logging.getLogger(__name__)
streamHandler = logging.StreamHandler()
few_shot = True
# for shot_number in [1,3,5,0]:
for shot_number in [3]:
shot = 'few-shot'
if shot_number == 0:
few_shot = False
shot = 'zero-shot'
fileHandler = logging.FileHandler(f'./0103_ORIGINAL_GPT3_DAVINCI_factsNet_{shot}_{shot_number}-shot_results.log')
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
query_path = "/crawl/crawler/query_output"
query_path_list = []
for cat in os.listdir(query_path):
if cat == 'general':
continue
else:
sub_path = os.path.join(query_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
for cat_3 in os.listdir(edge_path):
file_path = os.path.join(edge_path, cat_3)
query_path_list.append(file_path)
test_data_path_list = load_testdata_path()
for iii, test_data_path in enumerate(test_data_path_list):
if iii < 255:
continue
content_name = test_data_path.split('/')[-1].replace('.csv','')
if few_shot:
for q in query_path_list:
# if content_name in q:
if content_name == q.split('/')[-1].replace('query_','').replace('.csv',''):
content_query_path = q
query_list = list(pd.read_csv(content_query_path)['query'])
print("="*30,content_name,"="*30)
query_list = query_list[:shot_number]
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, query_list)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}-{shot_number}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: {content_query_path}")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
else:
print("="*30,content_name,"="*30)
# import pdb; pdb.set_trace()
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, None)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: NO")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
fileHandler.close()
logger.removeHandler(fileHandler) | [
"\n",
"[]"
] |
2024-01-10 | gunny97/DRAFT | FactsNet~davinci_gpt3.py | import pandas as pd
import os
import openai
from evaluate import load
import backoff # for exponential backoff
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
f1_metric = load('f1')
acc_metric = load('accuracy')
rec_metric = load('recall')
prec_metric = load('precision')
# openai.api_key = "xxxxxxxxxxx"
# local
openai.api_key = "xxxxxxxxxxxxxx"
# openai.api_key = "xxxxxxxx"
restart_sequence = "\n"
@backoff.on_exception(backoff.expo, openai.error.RateLimitError)
def completions_with_backoff(**kwargs):
return openai.Completion.create(**kwargs)
def gpt3_clf(prompt, content_name):
if 'facts' in content_name:
content_name = content_name.replace('-facts','')
if '-' in content_name:
content_name = content_name.replace('-',' ')
# import pdb; pdb.set_trace()
# response = openai.Completion.create(
# # model="text-davinci-002",
# model = 'davinci',
# prompt=f"Is the following text related to {content_name}? \
# Answer yes or no. \
# \n\n\n\"{prompt}\"",
# temperature=0,
# max_tokens=6,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0
# )
response = completions_with_backoff(
model = 'davinci',
prompt=f"Is the following text related to {content_name}? \
Answer yes or no. \
\n\n\n\"{prompt}\"",
temperature=0,
max_tokens=6,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['text']
def load_testdata_path():
test_data_path = "/crawl/crawler/test_data"
test_data_path_list = []
for cat in os.listdir(test_data_path):
sub_path = os.path.join(test_data_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
test_data_path_list.append(edge_path)
return test_data_path_list
def main(content_name, test_data_path, few_shot, query_list):
test_set = pd.read_csv(test_data_path)
test_set.drop(['Unnamed: 0'], axis=1, inplace=True)
test_set.columns = ['text', 'label']
# positive samples
for i in range(test_set.shape[0]):
if test_set.iloc[i]['label'] == 0:
test_set.label[i] = 1
elif test_set.iloc[i]['label'] == 1 or test_set.iloc[i]['label'] == 2:
test_set.label[i] = 0
test_set.drop(index = len(test_set)-1, inplace=True)
if few_shot:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
query_prompt = []
for q in query_list:
query_prompt.append("'"+str(q)+ "'" + ": yes\n")
query_prompt_str = "\n" + "".join(query_prompt)
text = query_prompt_str + '\n' + '\n' + '\n' + "'"+str(text)+ "'" + ":"
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
else:
gpt3_pred = []
for i in range(len(test_set)):
text = test_set.iloc[i]['text']
resp = gpt3_clf(text, content_name).lower()
if 'yes' in resp:
gpt3_pred.append(1)
else:
gpt3_pred.append(0)
f1_score = f1_metric.compute(predictions=gpt3_pred, references=test_set['label'])
acc_score = acc_metric.compute(predictions=gpt3_pred, references=test_set['label'])
rec_score = rec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
prec_score = prec_metric.compute(predictions=gpt3_pred, references=test_set['label'])
return f1_score, acc_score, rec_score, prec_score
if __name__ == "__main__":
import logging
logger = logging.getLogger(__name__)
streamHandler = logging.StreamHandler()
few_shot = True
# for shot_number in [1,3,5,0]:
for shot_number in [0]:
shot = 'few-shot'
if shot_number == 0:
few_shot = False
shot = 'zero-shot'
fileHandler = logging.FileHandler(f'./0103_ORIGINAL_GPT3_DAVINCI_factsNet_{shot}_{shot_number}-shot_results.log')
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
query_path = "/crawl/crawler/query_output"
query_path_list = []
for cat in os.listdir(query_path):
if cat == 'general':
continue
else:
sub_path = os.path.join(query_path,cat)
for cat_2 in os.listdir(sub_path):
edge_path = os.path.join(sub_path, cat_2)
for cat_3 in os.listdir(edge_path):
file_path = os.path.join(edge_path, cat_3)
query_path_list.append(file_path)
test_data_path_list = load_testdata_path()
for iii, test_data_path in enumerate(test_data_path_list):
content_name = test_data_path.split('/')[-1].replace('.csv','')
if few_shot:
for q in query_path_list:
# if content_name in q:
if content_name == q.split('/')[-1].replace('query_','').replace('.csv',''):
content_query_path = q
query_list = list(pd.read_csv(content_query_path)['query'])
print("="*30,content_name,"="*30)
query_list = query_list[:shot_number]
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, query_list)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}-{shot_number}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: {content_query_path}")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
else:
print("="*30,content_name,"="*30)
# import pdb; pdb.set_trace()
f1, acc, rec, prec = main(content_name, test_data_path,few_shot, None)
logger.setLevel(level=logging.DEBUG)
logger.debug(f"content_name: {content_name}")
logger.debug(f"test_data_path: {test_data_path}")
logger.debug(f"content_query_path: NO")
logger.debug(f"f1-score: {round(f1['f1'],4)}")
logger.debug(f"accuracy: {round(acc['accuracy'],4)}")
logger.debug(f"recall: {round(rec['recall'],4)}")
logger.debug(f"precision: {round(prec['precision'],4)}")
logger.debug("="*100)
fileHandler.close()
logger.removeHandler(fileHandler) | [
"[]",
"\n"
] |
2024-01-10 | arcilli/ChatGPT | src~revChatGPT~revChatGPT.py | # Author: @[email protected]
# License: MIT
# Description: A Python wrapper for OpenAI's chatbot API
import json
import uuid
import requests
from OpenAIAuth.OpenAIAuth import OpenAIAuth, Debugger
def generate_uuid() -> str:
uid = str(uuid.uuid4())
return uid
class Chatbot:
config: json
conversation_id: str
parent_id: str
headers: dict
conversation_id_prev: str
parent_id_prev: str
def __init__(self, config, conversation_id=None, debug=False, refresh=True) -> Exception:
self.debugger = Debugger(debug)
self.debug = debug
self.config = config
self.conversation_id = conversation_id
self.parent_id = generate_uuid()
if "session_token" in config or ("email" in config and "password" in config) and refresh:
self.refresh_session()
if "Authorization" in config:
self.refresh_headers()
# Resets the conversation ID and parent ID
def reset_chat(self) -> None:
self.conversation_id = None
self.parent_id = generate_uuid()
# Refreshes the headers -- Internal use only
def refresh_headers(self) -> None:
if "Authorization" not in self.config:
self.config["Authorization"] = ""
elif self.config["Authorization"] is None:
self.config["Authorization"] = ""
self.headers = {
"Host": "chat.openai.com",
"Accept": "text/event-stream",
"Authorization": "Bearer " + self.config["Authorization"],
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/16.1 Safari/605.1.15",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
}
# Generates a UUID -- Internal use only
# Generator for chat stream -- Internal use only
def get_chat_stream(self, data) -> None:
response = requests.post(
"https://chat.openai.com/backend-api/conversation",
headers=self.headers,
data=json.dumps(data),
stream=True,
timeout=100,
)
for line in response.iter_lines():
try:
line = line.decode("utf-8")
if line == "":
continue
line = line[6:]
line = json.loads(line)
try:
message = line["message"]["content"]["parts"][0]
self.conversation_id = line["conversation_id"]
self.parent_id = line["message"]["id"]
except:
continue
yield {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
except:
continue
# Gets the chat response as text -- Internal use only
def get_chat_text(self, data) -> dict:
# Create request session
s = requests.Session()
# set headers
s.headers = self.headers
# Set multiple cookies
if "session_token" in self.config:
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
s.cookies.set(
"__Secure-next-auth.callback-url",
"https://chat.openai.com/",
)
# Set proxies
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
response = s.post(
"https://chat.openai.com/backend-api/conversation",
data=json.dumps(data),
)
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
self.debugger.log("Incorrect response from OpenAI API")
self.debugger.log(response.text)
try:
resp = response.json()
if resp['detail']['code'] == "invalid_api_key":
if "email" in self.config and "password" in self.config:
self.refresh_session()
return self.get_chat_text(data)
else:
raise Exception(
"Missing necessary credentials") from exc
except Exception as exc2:
raise Exception("Not a JSON response") from exc2
raise Exception("Incorrect response from OpenAI API") from exc
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
return {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
# Gets the chat response
def get_chat_response(self, prompt, output="text") -> dict or None:
data = {
"action": "next",
"messages": [
{
"id": str(generate_uuid()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": self.conversation_id,
"parent_message_id": self.parent_id,
"model": "text-davinci-002-render",
}
self.conversation_id_prev = self.conversation_id
self.parent_id_prev = self.parent_id
if output == "text":
return self.get_chat_text(data)
elif output == "stream":
return self.get_chat_stream(data)
else:
raise ValueError("Output must be either 'text' or 'stream'")
def rollback_conversation(self) -> None:
self.conversation_id = self.conversation_id_prev
self.parent_id = self.parent_id_prev
def refresh_session(self) -> Exception:
if (
"session_token" not in self.config
and ("email" not in self.config or "password" not in self.config)
and "Authorization" not in self.config
):
error = ValueError("No tokens provided")
self.debugger.log(error)
raise error
elif "session_token" in self.config:
if (
self.config["session_token"] is None
or self.config["session_token"] == ""
):
raise ValueError("No tokens provided")
s = requests.Session()
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
# Set cookies
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
# s.cookies.set("__Secure-next-auth.csrf-token", self.config['csrf_token'])
response = s.get(
"https://chat.openai.com/api/auth/session",
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, "
"like Gecko) Version/16.1 Safari/605.1.15 ",
},
)
try:
self.config["session_token"] = response.cookies.get(
"__Secure-next-auth.session-token",
)
self.config["Authorization"] = response.json()["accessToken"]
self.refresh_headers()
except Exception as exc:
print("Error refreshing session")
self.debugger.log(response.text)
raise Exception("Error refreshing session") from exc
elif "email" in self.config and "password" in self.config:
try:
self.login(self.config["email"], self.config["password"])
except Exception as exc:
self.debugger.log("Login failed")
raise exc
elif "Authorization" in self.config:
self.refresh_headers()
return
else:
raise ValueError("No tokens provided")
def login(self, email, password) -> None:
self.debugger.log("Logging in...")
use_proxy = False
proxy = None
if "proxy" in self.config:
if self.config["proxy"] != "":
use_proxy = True
proxy = self.config["proxy"]
auth = OpenAIAuth(email, password, use_proxy, proxy, debug=self.debug)
try:
auth.begin()
except Exception as exc:
# if ValueError with e as "Captcha detected" fail
if exc == "Captcha detected":
self.debugger.log(
"Captcha not supported. Use session tokens instead.")
raise ValueError("Captcha detected") from exc
raise exc
if auth.access_token is not None:
self.config["Authorization"] = auth.access_token
if auth.session_token is not None:
self.config["session_token"] = auth.session_token
else:
possible_tokens = auth.session.cookies.get(
"__Secure-next-auth.session-token",
)
if possible_tokens is not None:
if len(possible_tokens) > 1:
self.config["session_token"] = possible_tokens[0]
else:
try:
self.config["session_token"] = possible_tokens
except Exception as exc:
raise Exception("Error logging in") from exc
self.refresh_headers()
else:
raise Exception("Error logging in")
| [
"text",
"content_type"
] |
2024-01-10 | Firiks/gpt-assistant | gpt-assistant.py | """
GPT voice assistant
1. user says "Hey GPT"
2. then says a command, command is transcribed to text
3. GPT responds to the command
4. use tts to play the response
"""
import os
import json
import openai
import pyttsx3 # uses system TTS so its fast but not very natural, alternatives are: gTTS, CoquiTTS, larynx, Bark
import dotenv
import asyncio
import speech_recognition as sr
from functions import functions, available_functions
# load environment variables
dotenv.load_dotenv()
# set openai api key
openai.api_key = os.getenv("OPENAI_API_KEY")
# keep track of conversation
conversation = []
# setup conversation mode
conversation_init = {"role": "system", "content": "You are a helpful voice assistant. You will respond to the user's requests."}
# set up conversation
conversation.append(conversation_init)
# initialize pyttsx3 audio engine
AUDIO_ENGINE = pyttsx3.init()
AUDIO_ENGINE.setProperty('rate', 125) # set speech rate
AUDIO_ENGINE.setProperty('volume', 1.0) # set speech volume
# get voices
voices = AUDIO_ENGINE.getProperty('voices')
# set voice
AUDIO_ENGINE.setProperty('voice', voices[1].id) # female voice, voices[0].id is male
# get gpt model from environment variable
MODEL = os.getenv("GPT_MODEL")
# create recognizer instance
recongizer = sr.Recognizer()
def detect_microphone():
"""
Detect microphone
"""
print("Say something to detect microphone ...")
for device_index in sr.Microphone.list_working_microphones():
m = sr.Microphone(device_index=device_index)
print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(device_index, m))
break
else:
print("No working microphones found!")
exit(1)
def speech_to_text():
"""
Transcribes speech to text
"""
with sr.Microphone() as source: # use the default microphone as the audio source
source.pause_threshold = 1 # seconds of non-speaking audio before a phrase is considered complete
recongizer.adjust_for_ambient_noise(source) # adjust for ambient noise
print("Listening...")
audio = recongizer.listen(source) # record audio prompt
try:
# convert speech to text
text = recongizer.recognize_google(audio, show_all=True)
if text:
text = text['alternative'][0]['transcript'] # get first transcript
return text
except sr.UnknownValueError:
print('No speech detected')
except sr.RequestError:
print('API was unreachable or unresponsive')
return ""
def text_to_speech(text):
"""
Converts text to speech
"""
AUDIO_ENGINE.say(text)
AUDIO_ENGINE.runAndWait()
def delete_conversation():
"""
Delete conversation
"""
global conversation
global conversation_init
conversation = []
conversation.append(conversation_init)
def chatgpt_response():
"""
Get response from chatgpt
"""
try:
# get chat response
response = openai.ChatCompletion.create(
model=MODEL,
messages=conversation,
functions=functions,
function_call='auto'
)
print('ChatGPT response: ', response['choices'][0]['message'])
response_message = response["choices"][0]["message"]
# check if function call
if response_message.get("function_call"):
function_name = response_message["function_call"]["name"]
fuction_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = fuction_to_call(
**function_args
)
# add assistant response to call the function
conversation.append(response_message)
# extend conversation with function response
conversation.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
# get second response with function response
second_response = openai.ChatCompletion.create(
model=MODEL,
messages=conversation,
)
# assistant response to function response
conversation.append({"role": "assistant", "content": second_response['choices'][0]['message']})
return second_response['choices'][0]['message']
else:
# add assistant response to conversation
conversation.append({"role": "assistant", "content": response['choices'][0]['message']})
return response['choices'][0]['message']
except Exception as e:
print('Error: ', e)
return False
async def main():
# uncomment to debug microphone
#detect_microphone()
print("Say 'Hey GPT' to start the conversation ...")
while True: # run in loop
text = speech_to_text()
print("Text: " + text)
if text.lower == "delete conversation":
delete_conversation()
text_to_speech("Conversation deleted.")
if text.lower() == "hey gpt": # use lower case to avoid case sensitivity
print("Listening for command ...")
text = speech_to_text() # get command
if text:
print("Human: " + text)
# add user prompt to conversation
conversation.append({"role": "user", "content": text})
response = chatgpt_response()
if response:
text_to_speech(response)
else:
text_to_speech("Sorry, I did not get that.")
if __name__ == "__main__":
asyncio.run(main()) | [
"You are a helpful voice assistant. You will respond to the user's requests."
] |
2024-01-10 | Reidond/chat-channel-bot | chat_channel_bot~discord_bot.py | import os
from .config import Config
import discord
from typing import Optional
from discord import app_commands
import traceback
import openai
import json
import asyncio
Config.init_config("./config.ini")
openai_api_key = Config.get_or_else("credentials", "OPENAI_API_KEY", "sk-xxxx")
os.environ["OPENAI_API_KEY"] = openai_api_key
discord_token = Config.get_or_else("credentials", "DISCORD_TOKEN", "token")
discord_guild_id = Config.get_or_else("credentials", "DISCORD_GUILD_ID", "0")
MY_GUILD = discord.Object(id=discord_guild_id) # replace with your guild id
from .embedchain_async import App as EmbedChainApp, aadd, aadd_local, aquery
class MyClient(discord.Client):
embedchain_chat_bot: Optional[EmbedChainApp | None] = None
def __init__(self, *, intents: discord.Intents):
super().__init__(intents=intents)
# A CommandTree is a special type that holds all the application command
# state required to make it work. This is a separate class because it
# allows all the extra state to be opt-in.
# Whenever you want to work with application commands, your tree is used
# to store and work with them.
# Note: When using commands.Bot instead of discord.Client, the bot will
# maintain its own tree instead.
self.tree = app_commands.CommandTree(self)
self.embedchain_chat_bot = EmbedChainApp()
# In this basic example, we just synchronize the app commands to one guild.
# Instead of specifying a guild to every command, we copy over our global commands instead.
# By doing so, we don't have to wait up to an hour until they are shown to the end-user.
async def setup_hook(self):
# This copies the global commands over to your guild.
self.tree.copy_global_to(guild=MY_GUILD)
await self.tree.sync(guild=MY_GUILD)
intents = discord.Intents.default()
client = MyClient(intents=intents)
@client.event
async def on_ready():
if not client.user:
return
print(f"Logged in as {client.user} (ID: {client.user.id})")
class TrainModal(discord.ui.Modal, title="Train the bot"):
train_data = discord.ui.TextInput(
label="What do you want to train on?",
style=discord.TextStyle.long,
placeholder="Type your text...",
required=False,
max_length=300,
)
async def on_submit(self, interaction: discord.Interaction):
async def train_model():
systemMessage = """
From the text below extract all occurences of youtube video url, pdf file url, web pages url.
Include also text without those urls as one string.
Only answer in json no other text is required.
Json format:`{"text":"","youtube_videos":[""],"web_pages":[""],"pdfs":[""]}`
"""
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": systemMessage},
{"role": "user", "content": self.train_data.value},
],
temperature=0,
)
completionRaw = response.choices[0].message.content
completion = json.loads(completionRaw)
try:
for ytv in completion["youtube_videos"]:
await aadd(client.embedchain_chat_bot, "youtube_video", ytv)
for wp in completion["web_pages"]:
await aadd(client.embedchain_chat_bot, "web_page", wp)
for pdf in completion["pdfs"]:
await aadd(client.embedchain_chat_bot, "pdf_file", pdf)
await aadd_local(client.embedchain_chat_bot, "text", completion["text"])
except ValueError as e:
print(e)
await interaction.edit_original_response(content="Success training model.")
asyncio.create_task(train_model())
await interaction.response.send_message("Training model...", ephemeral=True)
async def on_error(self, interaction: discord.Interaction, error: Exception) -> None:
await interaction.response.send_message("Oops! Something went wrong.", ephemeral=True)
# Make sure we know what the error actually is
traceback.print_exception(type(error), error, error.__traceback__)
@client.tree.command(
name="train",
description="Train the bot with new data",
)
@app_commands.describe()
async def train(interaction: discord.Interaction):
if client.embedchain_chat_bot is None:
return
await interaction.response.send_modal(TrainModal())
@client.tree.command(
name="query",
description="Query the bot with prompt",
)
@app_commands.describe(
query="The prompt to query the bot with",
)
async def query(interaction: discord.Interaction, query: str):
if client.embedchain_chat_bot is None:
return
async def query_model():
answer = await aquery(client.embedchain_chat_bot, query)
systemMessage = """
From the text below find if the text is in negative, questionable form like:
I don't know; Can't find any information;
Only answer in json no other text is required.
Set success field only if text is not in negative, questionable form.
Json format:`{"success":false}`
"""
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": systemMessage},
{"role": "user", "content": answer},
],
temperature=0,
)
completionRaw = response.choices[0].message.content
completion = json.loads(completionRaw)
if not completion["success"]:
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are helpfull assistant."},
{"role": "user", "content": query},
],
temperature=0,
)
completionRaw = response.choices[0].message.content
return await interaction.edit_original_response(content=completionRaw)
await interaction.edit_original_response(content=answer)
asyncio.create_task(query_model())
await interaction.response.send_message("Querying model...")
client.run(discord_token)
| [
"You are helpfull assistant."
] |
2024-01-10 | amazon-science/nameguess | run_eval.py | import os
import sys
import json
import argparse
import pandas as pd
sys.path.insert(1, os.path.join(sys.path[0], "./src"))
print(sys.path)
from metric import BNGMetrics
from model_call import OpenaiLLM
def extract_answer(raw_answer_str: str, sep_token: str):
raw_answer_str = raw_answer_str.strip("").split(".")[0]
answer_list = [_ans.strip("") for _ans in raw_answer.split(sep_token)]
return answer_list
class PromptTemplate:
@property
def demos(self):
_demo = (
"As abbreviations of column names from a table, "
"c_name | pCd | dt stand for Customer Name | Product Code | Date. "
)
return _demo
@property
def sep_token(self):
_sep_token = " | "
return _sep_token
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", type=str, required=True, choices=["gpt-3.5-turbo", "gpt-4"]
)
parser.add_argument("--test_file_sf", type=str, default="eval_sf.json")
parser.add_argument("--test_file_chicago", type=str, default="eval_chicago.json")
parser.add_argument("--test_file_la", type=str, default="eval_la.json")
args = parser.parse_args()
DATA_NAME = "nameGuess"
def load_data(file_name):
return json.load(open(os.path.join("data", file_name), "r"))
data_sf = load_data(args.test_file_sf)
data_chicago = load_data(args.test_file_chicago)
data_la = load_data(args.test_file_la)
label_dict_l = data_sf + data_chicago + data_la
num_examples = sum([len(ele["gt_label"]) for ele in label_dict_l])
print("num_examples:", num_examples)
# define the model and used prompt
defined_prompt = PromptTemplate()
model = OpenaiLLM(model_name=args.model_name)
all_results = []
for _idx, example_dict in enumerate(label_dict_l):
prompt = (
example_dict["table_prompt"] + defined_prompt.demos + example_dict["query"]
)
x_list, y_list = example_dict["technical_name"], example_dict["gt_label"]
raw_answer = model(prompt, temperature=0.0, max_tokens=1024)
# postprocessing the answers
answers = extract_answer(raw_answer, defined_prompt.sep_token)
if len(answers) != len(x_list):
y_pred_list = [" "] * len(x_list)
print("Error! The extracted answers are not correct.")
else:
y_pred_list = answers
for _x, _pred, _y in zip(x_list, y_pred_list, y_list):
print(f"{_x}\t-->\t{_pred}\t(label={_y})")
# save the prediction and table information for each input query name
one_results = []
for _x, _y, _pred, _level in zip(
x_list, y_list, y_pred_list, example_dict["difficulty"]
):
one_results.append(
[
str(_idx),
str(example_dict["table_partition"]),
str(example_dict["table_id"]),
str(_x),
str(_y),
str(_pred),
str(_level),
]
)
all_results += one_results
pred_df = pd.DataFrame(
all_results,
columns=[
"example_id",
"table_partition",
"table_id",
"technical_name",
"gt_label",
"prediction",
"difficulty",
],
)
print("pred_df:", pred_df)
# Evaluate the individual accuracy for each prediction
metric_names = ["squad"]
metric_generator = BNGMetrics(metric_names) # , #device=device)
individual_scores = metric_generator.compute_scores(
predictions=pred_df["prediction"], references=pred_df["gt_label"], level="individual"
)
pred_df["exact-match"] = individual_scores["individual_squad-em"]
pred_df["f1"] = individual_scores["individual_squad-f1"]
# save the results
save_res = {
"squad-em": individual_scores["squad-em"],
"squad-f1": individual_scores["squad-f1"],
"squad-pm": individual_scores["squad-pm"],
"model-name": args.model_name,
"total-num-example": num_examples,
"demo": defined_prompt.demos,
}
print(json.dumps(save_res, indent=4))
# save results and configures
save_dir = os.path.join('outputs', "{}-{}-results".format(DATA_NAME, args.model_name))
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, f"{args.model_name}_results.json"), "w") as fo:
json.dump(save_res, fo, indent=4)
# save individual prediction results
pred_df.to_csv(os.path.join(save_dir, f"{args.model_name}_predictions.csv"))
| [
"table_prompt"
] |
2024-01-10 | VistritPandey/Metaphor | eventPlanner.py | import openai
from metaphor_python import Metaphor
import API # API.py YOU CAN REPLACE THIS WITH YOUR OWN API KEYS or REPLACE IT DIRECTLY HERE
# Replace with your API keys
openai.api_key = API.openAI_API
metaphor = Metaphor(API.metaphor_API)
# Function to suggest venues using Metaphor
def suggest_event_venues(event_type, location, budget):
query = f"{event_type} venues in {location} within {budget} budget"
search_response = metaphor.search(query, use_autoprompt=True)
return search_response.results[:5]
# Function for chat-based completion
def chat_completion(user_question):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_question},
],
)
return completion.choices[0].message.content
# Main program
if __name__ == "__main__":
print("Welcome to the Event Planning Assistant!")
user_input = input("What type of event are you planning? ")
event_ideas = chat_completion(f"Generate event ideas for '{user_input}'")
print("\nHere are some event ideas:")
print(event_ideas)
location = input("\nEnter the location for the event: ")
budget = input("What is your budget for the venue? ")
venues = suggest_event_venues(user_input, location, budget)
print("\nTop venue suggestions:")
for idx, venue in enumerate(venues, start=1):
venue_name = venue.title
venue_location = venue.url
print(f"{idx}. {venue_name}, {venue_location}")
print("\nThank you for using the Event Planning Assistant!") | [
"You are a helpful assistant."
] |
2024-01-10 | VistritPandey/Metaphor | docSummarizer.py | import openai
from metaphor_python import Metaphor
import API # Replace with your own API keys or replace directly here
import requests
from bs4 import BeautifulSoup
# Replace with your API keys
openai.api_key = API.openAI_API
metaphor = Metaphor(API.metaphor_API)
# Function to suggest documentation URLs using Metaphor
def suggest_documentation_urls(document_name):
query = f"Documentation for {document_name}"
search_response = metaphor.search(query, use_autoprompt=True)
return search_response.results[:5]
# Function for document summarization
def document_summarizer(document_text):
#print(document_text)
"""
completion = openai.Completion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": document_text},
],
)
return completion.choices[0].message.content
"""
def fetch_document_content(url):
try:
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
main_content = soup.find('div', {'class': 'main-content'})
if main_content:
document_text = main_content.get_text()
return document_text
else:
return "Document content not found on the page."
else:
return "Failed to retrieve document content. Please check the URL."
except Exception as e:
return f"An error occurred: {str(e)}"
# Main program
if __name__ == "__main__":
print("Welcome to the Documentation Summarizer!")
document_name = input("Enter the name or topic of the documentation you want to summarize: ")
documentation_urls = suggest_documentation_urls(document_name)
print("\nTop documentation suggestions:")
for idx, doc in enumerate(documentation_urls, start=1):
doc_name = doc.title
doc_url = doc.url
print(f"{idx}. {doc_name}, {doc_url}")
choice = input("\nSelect a documentation source (enter the number): ")
selected_doc = documentation_urls[int(choice) - 1]
doc_content = fetch_document_content(selected_doc.url)
summary = document_summarizer(doc_content)
print("\nSummary of the documentation:")
print(summary)
print("\nThank you for using the Documentation Summarizer!")
| [] |
2024-01-10 | Jinneo/RefugeeAIChatBot | chatbot~logic.py | from PyPDF2 import PdfReader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
import os
os.environ["OPENAI_API_KEY"] = "KEYOPEN"
pdf_reader = PdfReader('/Users/praveenvadlamani/Downloads/OIRAChatbot/data.pdf')
document_text = ''
for page in pdf_reader.pages:
page_text = page.extract_text()
if page_text:
document_text += page_text
text_separator = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len,
)
text_chunks = text_separator.split_text(document_text)
embeddings_model = OpenAIEmbeddings()
document_search = FAISS.from_texts(text_chunks, embeddings_model)
qa_chain = load_qa_chain(OpenAI(), chain_type="stuff")
conversation_history = []
def chatbot_response(user_input):
global conversation_history
if user_input.lower() == "exit":
return "Goodbye!"
conversation_history.append(("User:", user_input))
conversation_text = ' '.join([f"{role} {message}" for role, message in conversation_history])
document_results = document_search.similarity_search(conversation_text)
response = qa_chain.run(
input_documents=document_results,
question=user_input + "",
temperature=0.6,
)
conversation_history.append(("Chatbot:", response))
return response
| [] |
2024-01-10 | a3ro-dev/AutoGit | utils~git.py | import os
import platform
import subprocess
import sys
import openai
import utils.keys as keys
openai.api_key = keys.api
class GitSSH:
"""
A class responsible for the installation of Git and SSH related operations
"""
def __init__(self, email):
"""
Initializes the object with the email. This is called by __init__ and should not be called directly
@param email - The email to set
"""
self.email = email
def install_git(self):
"""
Install Git on the operating system based on the distribution or macOS package manager
@return True if Git is installed False if Git is not
"""
# Check if Git is installed
try:
subprocess.run(["git", "--version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except FileNotFoundError as e:
print(f"{e}\n")
print("Git is not installed. Installing Git...")
# Install Git based on the operating system
# Install Git. Git. Git. Git e on Windows.
if os.name == "nt": # Windows
subprocess.run(["winget", "install", "--id", "Git.Git", "-e", "--source", "winget"])
# This function is used to install Git based on the operating system.
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"): # Linux and macOS
# Check the specific Linux distribution or macOS package manager
# Returns the distro of the current platform.
if sys.platform.startswith("linux"):
distro = self.get_linux_distribution().lower()
else:
distro = platform.mac_ver()[0].lower()
# Install Git based on the Linux distribution or macOS package manager
# This function is used to run the git install command.
if distro in ["debian", "ubuntu"]:
subprocess.run(["sudo", "apt-get", "install", "-y", "git"])
elif distro == "fedora":
subprocess.run(["sudo", "dnf", "install", "-y", "git"])
elif distro == "gentoo":
subprocess.run(["sudo", "emerge", "--ask", "--verbose", "dev-vcs/git"])
elif distro == "arch":
subprocess.run(["sudo", "pacman", "-S", "git"])
elif distro == "opensuse":
subprocess.run(["sudo", "zypper", "install", "git"])
elif distro == "mageia":
subprocess.run(["sudo", "urpmi", "git"])
elif distro == "nixos":
subprocess.run(["nix-env", "-i", "git"])
elif distro == "freebsd":
subprocess.run(["sudo", "pkg", "install", "git"])
elif distro == "openbsd":
subprocess.run(["sudo", "pkg_add", "git"])
elif distro == "alpine":
subprocess.run(["sudo", "apk", "add", "git"])
elif distro == "darwin":
# Run the git install git and port
if subprocess.run(["which", "brew"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode == 0:
subprocess.run(["brew", "install", "git"])
elif subprocess.run(["which", "port"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode == 0:
subprocess.run(["sudo", "port", "install", "git"])
else:
print("Homebrew or MacPorts not found. Please install Git manually.")
return
else:
print("Unsupported Linux distribution or macOS version. Please install Git manually.")
return
else:
print("Unsupported operating system. Please install Git manually.")
return
def get_linux_distribution(self):
"""
Get the Linux distribution. This is used to determine whether or not we are running on Linux or not.
@return A string of the Linux distribution or " " if not
"""
try:
with open("/etc/os-release", "r") as f:
lines = f.readlines()
# Returns the ID of the first ID in the line.
for line in lines:
# Returns the ID of the line.
if line.startswith("ID="):
return line.split("=")[1].strip().lower()
except FileNotFoundError as e:
print(e)
return ""
def generate_ssh_key(self):
"""
Generate SSH key and print documentation on how to connect to GitHub. This is done by running ssh - keygen on every file
"""
# Generate SSH key pair
home_dir = os.path.expanduser("~")
ssh_dir = os.path.join(home_dir, ".ssh")
key_file = os.path.join(ssh_dir, "id_rsa.pub")
print("Contents of .ssh directory:")
# Prints out the files in the ssh_dir
for file_name in os.listdir(ssh_dir):
print(f">-+-< {file_name} >-+-<")
subprocess.run(["ssh-keygen", "-t", "rsa", "-b", "4096", "-C", self.email])
# Print SSH key
with open(key_file, "r") as f:
ssh_key = f.read()
print("SSH key:")
print(ssh_key)
# Print documentation on how to connect to GitHub
print("Documentation:")
print("1. Copy the SSH key above.")
print("2. Go to your GitHub account settings.")
print("3. Click on 'SSH and GPG keys'.")
print("4. Click on 'New SSH key' or 'Add SSH key'.")
print("5. Paste the copied SSH key into the 'Key' field.")
print("6. Provide a suitable title for the key.")
print("7. Click 'Add SSH key' or 'Add key'.")
confirmation: str = str(input("Are you done with these steps?: [y/n]"))
# Confirm the user is confirmed.
if confirmation == "y":
# Check if an existing SSH connection to GitHub exists
github_host = "github.com"
ssh_config_file = os.path.join(ssh_dir, "config")
with open(ssh_config_file, "r") as f:
ssh_config = f.read()
# If there is an existing SSH connection to GitHub
if github_host in ssh_config:
print("Existing SSH connection to GitHub:")
print(ssh_config)
subprocess.run(["ssh", "-T", "[email protected]"])
else:
issue: str = str(input("What is the issue that you are you facing?: "))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"You are a github expert and a DevOps enthusiast, your name is AutoGit and you help people setup git and github."},
{"role": "user", "content": issue}
],
)
assistant_reply = response.choices[0].message.content.strip() #type: ignore
print(assistant_reply)
confirmation: str = str(input("Is Your Issue Solved? [y/n]: "))
# Confirm the user is confirmed.
if confirmation == "y":
# Check if an existing SSH connection to GitHub exists
github_host = "github.com"
ssh_config_file = os.path.join(ssh_dir, "config")
with open(ssh_config_file, "r") as f:
ssh_config = f.read()
# If there is an existing SSH connection to GitHub
if github_host in ssh_config:
print("Existing SSH connection to GitHub:")
print(ssh_config)
subprocess.run(["ssh", "-T", "[email protected]"])
else:
print("Issue Still Not Solved? Search for the issue on Google.")
sys.exit()
# This is the main entry point for the main module.
if __name__ == "__main__":
# Example usage
email = input("Enter Your Email: ")
git = GitSSH(email=email)
git.install_git()
git.generate_ssh_key()
| [
"You are a github expert and a DevOps enthusiast, your name is AutoGit and you help people setup git and github."
] |
2024-01-10 | AmitMY/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | yusufali98/ovon | ovon~dataset~visualise_objects.py | import argparse
import json
import os
import os.path as osp
import pickle
from typing import Dict, List, Union
import multiprocessing
import openai
import GPUtil
import habitat
import habitat_sim
import numpy as np
from habitat.config.default import get_agent_config, get_config
from habitat.config.default_structured_configs import (
HabitatSimSemanticSensorConfig,
)
from habitat.config.read_write import read_write
from habitat_sim._ext.habitat_sim_bindings import BBox, SemanticObject
from habitat_sim.agent.agent import Agent, AgentState
from habitat_sim.simulator import Simulator
from ovon.dataset.pose_sampler import PoseSampler
from ovon.dataset.semantic_utils import (
get_hm3d_semantic_scenes,
ObjectCategoryMapping,
)
from ovon.dataset.generate_viewpoints import config_sim
from ovon.dataset.visualization import (
get_best_viewpoint_with_posesampler,
get_bounding_box,
get_color, get_depth
)
from torchvision.transforms import ToPILImage
from tqdm import tqdm
SCENES_ROOT = "data/scene_datasets/hm3d"
NUM_GPUS = len(GPUtil.getAvailable(limit=256))
TASKS_PER_GPU = 12
def create_html(
file_name: str,
objects_mapping: Dict,
visualised: bool = True,
threshold: float = 0.05,
):
html_head = """
<html>
<head>
<meta charset="utf-8">
<title>Objects Visualisation</title>
</head>"""
html_style = """
<style>
/* Three image containers (use 25% for four, and 50% for two, etc) */
.column {
float: left;
width: 20.00%;
padding: 5px;
}
/* Clear floats after image containers */
{
box-sizing: border-box;
}
.row {
display: flex;
}
</style>
"""
html_script = """
<script>
var li_categories = []
const download0 = () => (
encodeURIComponent(
JSON.stringify(
localStorage.getItem('categories')
),
null,
2
)
)
function addObjectToCategories(cb) {
if (cb.checked) {
li_categories.push(cb.id);
}
else {
var index = li_categories.indexOf(cb.id);
if (index > -1) {
li_categories.splice(index, 1);
}
}
console.log(li_categories)
localStorage.setItem("categories",li_categories)
download0()
}
</script>
"""
cnt = 0
html_body = ""
for obj in objects_mapping.keys():
# Visualized Objects
if visualised and objects_mapping[obj][0][1] >= threshold:
cnt += 1
html_body += f"""<h3>{obj}</h3><input name="checkbox" onclick="addObjectToCategories(this);" type="checkbox" id="{obj}" />
<div class="row">
"""
for cov, frac, scene in objects_mapping[obj][:5]:
html_body += f"""
<div class="column">
<img src="../images/objects/{scene}/{obj}.png" alt="{obj}" style="width:100%">
<h5>cov = {cov:.3f}, frac = {frac:.3f}</h5>
</div>
"""
html_body += "</div>"
# Filtered Objects
elif not visualised and objects_mapping[obj][0][1] < threshold:
cnt += 1
html_body += f"""<h3>{obj}</h3>
<div class="row">
"""
for cov, frac, scene in objects_mapping[obj][:5]:
html_body += f"""
<div class="column">
<img src="../images/objects/{scene}/{obj}.png" alt="{obj}" style="width:100%">
<h5>cov = {cov:.3f}, frac = {frac:.3f}</h5>
</div>
"""
html_body += "</div>"
html_body = (
f"""
<body>
<h2> Visualising {cnt} objects </h2>
"""
+ html_body
)
html_body += """</body>
</html>"""
f = open(file_name, "w")
f.write(html_head + html_style + html_script + html_body)
f.close()
def save_img(img, path):
(ToPILImage()(img)).convert("RGB").save(path)
def get_objnav_config(i: int, scene: str):
CFG = "habitat-lab/habitat-lab/habitat/config/benchmark/nav/objectnav/objectnav_hm3d.yaml"
SCENE_CFG = f"{SCENES_ROOT}/hm3d_annotated_basis.scene_dataset_config.json"
objnav_config = get_config(CFG)
with read_write(objnav_config):
agent_config = get_agent_config(objnav_config.habitat.simulator)
# Stretch agent
agent_config.height = 1.41
agent_config.radius = 0.17
sensor_pos = [0, 1.31, 0]
agent_config.sim_sensors.update(
{"semantic_sensor": HabitatSimSemanticSensorConfig()}
)
FOV = 90
for sensor, sensor_config in agent_config.sim_sensors.items():
agent_config.sim_sensors[sensor].hfov = FOV
agent_config.sim_sensors[sensor].width //= 2
agent_config.sim_sensors[sensor].height //= 2
agent_config.sim_sensors[sensor].position = sensor_pos
objnav_config.habitat.task.measurements = {}
deviceIds = GPUtil.getAvailable(
order="memory", limit=1, maxLoad=1.0, maxMemory=1.0
)
if i < NUM_GPUS * TASKS_PER_GPU or len(deviceIds) == 0:
deviceId = i % NUM_GPUS
else:
deviceId = deviceIds[0]
objnav_config.habitat.simulator.habitat_sim_v0.gpu_device_id = (
deviceId # i % NUM_GPUS
)
objnav_config.habitat.dataset.scenes_dir = "./data/scene_datasets/"
objnav_config.habitat.dataset.split = "train"
objnav_config.habitat.simulator.scene = scene
objnav_config.habitat.simulator.scene_dataset = SCENE_CFG
return objnav_config
def get_simulator(objnav_config) -> Simulator:
sim = habitat.sims.make_sim(
"Sim-v0", config=objnav_config.habitat.simulator
)
navmesh_settings = habitat_sim.NavMeshSettings()
navmesh_settings.set_defaults()
navmesh_settings.agent_radius = (
objnav_config.habitat.simulator.agents.main_agent.radius
)
navmesh_settings.agent_height = (
objnav_config.habitat.simulator.agents.main_agent.height
)
sim.recompute_navmesh(sim.pathfinder, navmesh_settings)
return sim
def is_on_ceiling(sim: Simulator, aabb: BBox):
point = np.asarray(aabb.center)
snapped = sim.pathfinder.snap_point(point)
# The snapped point is on the floor above
# It is more than 20 cms above the object's upper edge
if snapped[1] > point[1] + aabb.sizes[0] / 2 + 0.20:
return True
# Snapped point is on the ground
if snapped[1] < point[1] - 1.5:
return True
return False
def get_all_objects_in_view(obs, target_obj, threshold=0.01):
area = np.prod(obs["semantic"].shape)
obj_ids, num_pixels = np.unique(obs["semantic"], return_counts=True)
objects = [
obj_ids[i]
for i in range(len(num_pixels))
if num_pixels[i] / (area) > threshold and obj_ids[i] != target_obj
]
return objects
def get_objects_for_scene(args) -> None:
scene, outpath, device_id = args
scene_key = os.path.basename(scene).split(".")[0]
split = outpath
"""
if os.path.isfile(os.path.join(outpath, f"meta/{scene_key}.pkl")):
return
"""
cfg = get_objnav_config(device_id, scene_key)
sim = get_simulator(cfg)
objects_info = sim.semantic_scene.objects
objects_dict = {obj.semantic_id: obj for obj in objects_info}
pose_sampler = PoseSampler(
sim=sim,
r_min=1.0,
r_max=2.0,
r_step=0.5,
rot_deg_delta=10.0,
h_min=0.8,
h_max=1.4,
sample_lookat_deg_delta=5.0,
)
split = outpath.split("/")[-2]
print(split)
objects_visualized = []
cnt = 0
agent = sim.get_agent(0)
cat_map = ObjectCategoryMapping(
mapping_file="ovon/dataset/source_data/Mp3d_category_mapping.tsv",
allowed_categories=None,
coverage_meta_file="data/coverage_meta/{}.pkl".format(split),
frame_coverage_threshold=0.05,
)
os.makedirs(os.path.join(outpath, f"images/{scene_key}"), exist_ok=True)
object_view_data = []
objects_info = list(
filter(
lambda obj: cat_map[obj.category.name()] is not None, objects_info
)
)
for object in objects_info:
name = object.category.name().replace("/", "_")
if is_on_ceiling(sim, object.aabb):
continue
check, view = get_best_viewpoint_with_posesampler(
sim, pose_sampler, [object]
)
if check:
cov, pose, _ = view
if cov < 0.05:
continue
agent.set_state(pose)
obs = sim.get_sensor_observations()
object_ids_in_view = get_all_objects_in_view(
obs, object.semantic_id
)
objects_in_view = list(
filter(
lambda obj: obj is not None
and (
cat_map[obj.category.name()] is not None
or "wall" in obj.category.name().lower()
),
[*map(objects_dict.get, object_ids_in_view)],
)
)
colors = get_color(obs, [object] + objects_in_view)
depths = get_depth(obs, [object] + objects_in_view)
drawn_img, bbs, area_covered = get_bounding_box(
obs, [object] + objects_in_view, depths = depths
)
if np.sum(area_covered) > 0:
# Save information of this object and all objects on top
path = os.path.join(
outpath,
f"images/{scene_key}/{name}_{object.semantic_id}.png",
)
save_img(drawn_img, path)
view_info = {
"target_obj_id": object.semantic_id,
"target_obj_name": object.category.name(),
"target_obj_2d_bb": bbs[0],
"target_obj_3d_bb": {
"center": object.aabb.center,
"sizes_x_y_z": object.aabb.sizes,
},
"target_obj_depth": depths[0],
"target_obj_color": colors[0],
"ref_objects": {
f"{obj.category.name()}_{obj.semantic_id}": {
"2d_bb": bbs[i + 1],
"3d_bb": {
"center": obj.aabb.center,
"sizes_x_y_z": obj.aabb.sizes,
},
}
for i, obj in enumerate(objects_in_view)
},
"scene": scene_key,
"img_ref": path,
}
object_view_data.append(view_info)
objects_visualized.append(object.category.name().strip())
cnt += 1
meta_save_path = os.path.join(outpath, f"meta/{scene_key}.pkl")
with open(meta_save_path, "wb") as handle:
pickle.dump(object_view_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def get_objects_for_split(
split: str,
outpath: str,
num_scenes: Union[int, None],
tasks_per_gpu: int = 1,
multiprocessing_enabled: bool = False,
):
"""Makes episodes for all scenes in a split"""
scenes = sorted(
list(
get_hm3d_semantic_scenes("data/scene_datasets/hm3d", [split])[
split
]
)
)
num_scenes = len(scenes) if num_scenes is None else num_scenes
scenes = scenes[:num_scenes]
scenes =['vLpv2VX547B']
print(scenes)
print(
"Starting visualisation for split {} with {} scenes".format(
split, len(scenes)
)
)
os.makedirs(os.path.join(outpath.format(split), "meta"), exist_ok=True)
if multiprocessing_enabled:
gpus = len(GPUtil.getAvailable(limit=256))
cpu_threads = gpus * 16
deviceIds = GPUtil.getAvailable(
order="memory", limit=1, maxLoad=1.0, maxMemory=1.0
)
print(
"In multiprocessing setup - cpu {}, GPU: {}".format(
cpu_threads, gpus
)
)
items = []
for i, s in enumerate(scenes):
deviceId = deviceIds[0]
if i < gpus * tasks_per_gpu or len(deviceIds) == 0:
deviceId = i % gpus
items.append((s, outpath.format(split), deviceId))
mp_ctx = multiprocessing.get_context("forkserver")
with mp_ctx.Pool(cpu_threads) as pool, tqdm(
total=len(scenes), position=0
) as pbar:
for _ in pool.imap_unordered(get_objects_for_scene, items):
pbar.update()
else:
for scene in tqdm(scenes, total=len(scenes), dynamic_ncols=True):
get_objects_for_scene((scene, outpath.format(split), 0))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--split",
help="split of data to be used",
type=str,
required=True,
)
parser.add_argument(
"-o",
"--outpath",
help="output path",
type=str,
default="data/object_views/{}/",
)
parser.add_argument(
"-n",
"--num_scenes",
help="number of scenes",
type=int,
)
parser.add_argument(
"--tasks_per_gpu", help="number of scenes", type=int, default=1
)
parser.add_argument(
"-m",
"--multiprocessing_enabled",
dest="multiprocessing_enabled",
action="store_true",
)
args = parser.parse_args()
split = args.split
num_scenes = args.num_scenes
outpath = args.outpath
tasks_per_gpu = args.tasks_per_gpu
multiprocessing_enabled = args.multiprocessing_enabled
get_objects_for_split(
split, outpath, num_scenes, tasks_per_gpu, multiprocessing_enabled
)
| [] |
2024-01-10 | argmaxml/llm_workshop | src~llm_helpers.py | import json
import requests
from openai import OpenAI
from decouple import config
openai_client = OpenAI(api_key=config("OPENAI_API_KEY"))
huggingfacehub_api_token = config("HUGGINGFACE_API_TOKEN")
def hf_ask(question: str, model_url="https://api-inference.huggingface.co/models/google/flan-t5-xxl") -> str:
"""Ask a question to Huggingface, apply it to every row of a pandas dataframe and return the answer"""
def pandas_func(row) -> str:
prompt = question.format(**(dict(row.items())))
headers = {"Authorization": f"Bearer {huggingfacehub_api_token}"}
response = requests.post(
model_url, headers=headers, json={"inputs": prompt})
if response.status_code != 200:
return None
return json.loads(response.content.decode("utf-8"))[0]['generated_text']
return pandas_func
def chatgpt_ask(question: str, model_name="gpt-3.5-turbo") -> str:
"""Ask a question to chatgpt, apply it to every row of a pandas dataframe and return the answer"""
def pandas_func(row)-> str:
try:
prompt = question.format(**(dict(row.items())))
completion = openai_client.chat.completions.create(
model=model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
ret = completion.choices[0].message.content.strip()
return ret
except:
return None
return pandas_func
| [
"You are a helpful assistant."
] |
2024-01-10 | philipperoubert/chloe | src~chloe.py | import os
import openai
import speech_recognition as sr
from google.cloud import texttospeech
import pygame
import io
import pickle
import pvporcupine
import pyaudio
import time
import soundfile as sf
import whisper
from speech_recognition import AudioData
import numpy as np
import torch
import plugins
import sys
def audio_data_to_numpy(audio_data: AudioData) -> np.ndarray:
"""Converts an AudioData object to a numpy array.
Args:
audio_data (AudioData): The AudioData object to be converted.
Returns:
np.ndarray: The numpy array.
"""
wav_bytes = audio_data.get_wav_data(convert_rate=16000)
wav_stream = io.BytesIO(wav_bytes)
audio_array, _ = sf.read(wav_stream)
audio_array = audio_array.astype(np.float32)
return audio_array
def ask_gpt(prompt, model="gpt-3.5-turbo", conversation=[]):
"""Asks GPT-3 a question, and get a response.
Args:
prompt (str): The prompt to be sent to GPT-3.
model (str): The model to be used. Please see https://platform.openai.com/docs/models/overview for a list of available models.
Returns:
str: The response from GPT-3.
"""
openai.api_key = os.environ["OPENAI_API_KEY"]
conversation.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(model=model, messages=conversation)
conversation.append(response.choices[0].message)
with open("resources/conversation.pkl", "wb") as fp:
pickle.dump(conversation, fp)
message = response.choices[0].message.content
return message
def play_audio(audio_content):
"""Plays audio from a byte array.
Args:
audio_content (bytes): The audio content to be played.
"""
pygame.mixer.init()
audio_file = io.BytesIO(audio_content)
pygame.mixer.music.load(audio_file)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def synthesize_speech(text, voice_name="en-US-Studio-O"):
"""Synthesizes speech from the input string of text.
Args:
text (str): The text to be synthesized.
voice_name (str): The name of the voice to be used. Please see https://cloud.google.com/text-to-speech/docs/voices for a list of available voices.
Returns:
bytes: The synthesized audio.
"""
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(
language_code="en-US",
name=voice_name,
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
response = client.synthesize_speech(
input=input_text, voice=voice, audio_config=audio_config
)
return response.audio_content
def main(transcription_mode="online"):
if not os.path.exists("resources/conversation.pkl"):
conversation = [
# Initialisation Prompt
{
"role": "system",
"content": "I want you to act as a human that could become a friend. Your name is Chloe. You are kind and affectionate, funny and sometimes even a little bit sassy. You can evolve some further personality traits over time and with the conversation. Your messages should be short unless you deem necessary to have a longer message just as if you had an actual real life conversation. The conversation is spoken, not written. You should get to know each other over time, so don't rush to introduce yourself, let the user find out more about you, such as your name. You should mention the user's name from time to time to increase realism. You must get to know the user initially, e.g. what their name is, how old they are, etc.",
},
# Weather Plugin
{
"role": "system",
"content": 'If the user asks for the weather, you must respond "Request get_weather {cityname}", for example "Request get_weather Paris". You aren\'t allowed to add more text after that. The system will provide you with weather data. If you don\'t know where the user lives, ask the user.',
},
# Time Plugin
{
"role": "system",
"content": 'If the user asks for the current time, you must respond "Request get_time". You aren\'t allowed to add more text after that. The system will provide you with the curent time data.',
},
]
else:
with open("resources/conversation.pkl", "rb") as f:
conversation = pickle.load(f)
if transcription_mode == "offline":
model = whisper.load_model("base.en")
recognizer = sr.Recognizer()
porcupine = pvporcupine.create(
keyword_paths=["resources/hey_chloe_hotword.ppn"],
access_key=os.environ["PORCUPINE_ACCESS_KEY"],
)
openai.api_key = os.environ["OPENAI_API_KEY"]
while True:
# Configure PyAudio
pa = pyaudio.PyAudio()
audio_stream = pa.open(
rate=porcupine.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=porcupine.frame_length,
)
print("Listening for hotword...")
try:
while True:
# Read audio data from the microphone
pcm = audio_stream.read(porcupine.frame_length)
pcm = [
int.from_bytes(pcm[i:i+2], byteorder="little", signed=True)
for i in range(0, len(pcm), 2)
]
# Check if the hotword is detected
keyword_index = porcupine.process(pcm)
if keyword_index >= 0:
print("Hotword detected")
break
finally:
audio_stream.stop_stream()
audio_stream.close()
pa.terminate()
# 15 second timeout, if no audio is detected, Chloe will stop listening and go back to listening for the hotword.
start_time = time.time()
while time.time() - start_time < 15:
try:
with sr.Microphone() as source:
print("Please start speaking...")
audio = recognizer.listen(source)
# Transcribe the audio using OpenAI's Whisper model
print("Transcribing audio...")
if transcription_mode == "online":
transcript = recognizer.recognize_whisper_api(audio, api_key=os.environ["OPENAI_API_KEY"])
text = transcript
else:
audio_numpy = audio_data_to_numpy(audio)
transcript = model.transcribe(
audio_numpy,
fp16=torch.cuda.is_available(),
)
text = transcript["text"]
print("You said: ", text)
# if text is only composed of punctuation or whitespace ignore it
if (
len(text) == 0
or text.isspace()
or text.isalpha()
or text == ". . . . ."
):
pass
elif text == "Thank you." and conversation[-1]["content"][-1] == "?":
pass
else:
gpt_response = ask_gpt(text, model="gpt-3.5-turbo", conversation=conversation)
print("GPT-3.5-turbo response: ", gpt_response)
if "request get_weather" in gpt_response.lower():
audio_content = synthesize_speech(
"Let me check that for you...", "en-US-Studio-O"
)
play_audio(audio_content)
city = gpt_response.split("Request get_weather ")[1]
weather = plugins.get_weather(city)
conversation.append({"role": "system", "content": str(weather)})
gpt_response = ask_gpt(text, model="gpt-3.5-turbo", conversation=conversation)
print("GPT-3.5-turbo response: ", gpt_response)
if "request get_time" in gpt_response.lower():
audio_content = synthesize_speech(
"Let me check that for you...", "en-US-Studio-O"
)
play_audio(audio_content)
currnet_time = plugins.get_time()
conversation.append({"role": "system", "content": currnet_time})
gpt_response = ask_gpt(text, model="gpt-3.5-turbo", conversation=conversation)
print("GPT-3.5-turbo response: ", gpt_response)
audio_content = synthesize_speech(gpt_response, "en-US-Studio-O")
play_audio(audio_content)
start_time = time.time()
except sr.WaitTimeoutError:
continue
except sr.UnknownValueError:
print("Sorry, I couldn't understand that.")
except sr.RequestError as e:
print(f"Could not request results; {e}")
break
if __name__ == "__main__":
if len(sys.argv) > 1:
main(transcription_mode=sys.argv[1])
else:
main()
| [
"I want you to act as a human that could become a friend. Your name is Chloe. You are kind and affectionate, funny and sometimes even a little bit sassy. You can evolve some further personality traits over time and with the conversation. Your messages should be short unless you deem necessary to have a longer message just as if you had an actual real life conversation. The conversation is spoken, not written. You should get to know each other over time, so don't rush to introduce yourself, let the user find out more about you, such as your name. You should mention the user's name from time to time to increase realism. You must get to know the user initially, e.g. what their name is, how old they are, etc.",
"If the user asks for the current time, you must respond \"Request get_time\". You aren't allowed to add more text after that. The system will provide you with the curent time data.",
"If the user asks for the weather, you must respond \"Request get_weather {cityname}\", for example \"Request get_weather Paris\". You aren't allowed to add more text after that. The system will provide you with weather data. If you don't know where the user lives, ask the user."
] |
2024-01-10 | takitsuba/openai-api | openai_api~clean_text.py | import os
import sys
import openai
if __name__ == "__main__":
redundant_text = sys.argv[1]
direction = "次の文章を簡潔にしてください。"
prompt = direction + "\n" + redundant_text
openai.api_key = os.environ.get("API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
cleaned_text = response["choices"][0]["text"].strip()
print(cleaned_text)
| [
"次の文章を簡潔にしてください。\nPLACEHOLDER"
] |
2024-01-10 | takitsuba/openai-api | openai_api~request_cg.py | import os
import sys
import openai
# import codecs
# sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
if __name__ == "__main__":
message = sys.argv[1]
openai.api_key = os.environ.get("API_KEY")
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": message}]
)
print(completion.choices[0]["message"]["content"].strip())
| [] |
2024-01-10 | Josiah-Hill/Twitchsight | WebApp.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import streamlit as st
import os
import pickle
import torch
import openai
from sentence_transformers import SentenceTransformer
label_encoder_classes = {0: 'Account Management', 1: 'Ads', 2: 'Badges/Emotes',
3: 'Bits', 4: 'Channel Page', 5: 'Channel Points',
6: 'Charity', 7: 'Chat', 8: 'Creator Camp',
9: 'Creator Dashboard', 10: 'Creator Dashboard: Stream Manager',
11: 'Creators and Stream Features', 12: 'Customer Experience',
13: 'Developers', 14: 'Discover', 15: 'Extensions', 16: 'IGDB',
17: 'IRL Events and Merch', 18: 'Localization', 19: 'Moderation',
20: 'Purchase Management', 21: 'Safety', 22: 'Subscriptions',
23: 'Twitch Applications: Consoles', 24: 'Twitch Applications: Mobile',
25: 'Twitch Applications: TV Apps', 26: 'Twitch Studio',
27: 'User Accessibility', 28: 'Video Features', 29: 'Video Performance'}
from torch import nn
class NN_CLF_GPT(nn.Module):
def __init__(self, input_size=1536, output_size=30):
super(NN_CLF_GPT, self).__init__()
self.layer1 = nn.Linear(input_size, 128)
self.layer2 = nn.Linear(128, 64)
self.layer3 = nn.Linear(64, output_size)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.layer1(x))
x = self.relu(self.layer2(x))
x = self.layer3(x)
return x
class NN_CLF_BERT(nn.Module):
def __init__(self, input_size=384, output_size=30):
super(NN_CLF_BERT, self).__init__()
self.layer1 = nn.Linear(input_size, 128)
self.layer2 = nn.Linear(128, 64)
self.layer3 = nn.Linear(64, output_size)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.layer1(x))
x = self.relu(self.layer2(x))
x = self.layer3(x)
return x
class NN_REG_GPT(nn.Module):
def __init__(self, input_size=1536):
super(NN_REG_GPT, self).__init__()
self.layer1 = nn.Linear(input_size, 128)
self.layer2 = nn.Linear(128, 64)
self.layer3 = nn.Linear(64, 1) # Output size is 1 for regression
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.layer1(x))
x = self.relu(self.layer2(x))
x = self.layer3(x)
return x
class NN_REG_BERT(nn.Module):
def __init__(self, input_size=384):
super(NN_REG_BERT, self).__init__()
self.layer1 = nn.Linear(input_size, 128)
self.layer2 = nn.Linear(128, 64)
self.layer3 = nn.Linear(64, 1) # Output size is 1 for regression
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.layer1(x))
x = self.relu(self.layer2(x))
x = self.layer3(x)
return x
class NN_REGSIF_GPT(nn.Module):
def __init__(self, input_size=1536, num_classes=30):
super(NN_REGSIF_GPT, self).__init__()
# Shared layers
self.base_layer1 = nn.Linear(input_size, 128)
self.base_layer2 = nn.Linear(128, 64)
self.relu = nn.ReLU()
# Regression head
self.regression_head = nn.Linear(64, 1) # Output one value for regression
# Classification head
self.classification_head = nn.Linear(64, num_classes) # Output for each class
def forward(self, x):
# Shared layers
x = self.relu(self.base_layer1(x))
x = self.relu(self.base_layer2(x))
# Regression and classification heads
regression_output = self.regression_head(x)
classification_output = self.classification_head(x)
return regression_output, classification_output
class NN_REGSIF_BERT(nn.Module):
def __init__(self, input_size=384, num_classes=30):
super(NN_REGSIF_BERT, self).__init__()
# Shared layers
self.base_layer1 = nn.Linear(input_size, 128)
self.base_layer2 = nn.Linear(128, 64)
self.relu = nn.ReLU()
# Regression head
self.regression_head = nn.Linear(64, 1) # Output one value for regression
# Classification head
self.classification_head = nn.Linear(64, num_classes) # Output for each class
def forward(self, x):
# Shared layers
x = self.relu(self.base_layer1(x))
x = self.relu(self.base_layer2(x))
# Regression and classification heads
regression_output = self.regression_head(x)
classification_output = self.classification_head(x)
return regression_output, classification_output
nn_clf_gpt = NN_CLF_GPT()
nn_clf_bert = NN_CLF_BERT()
nn_reg_gpt = NN_REG_GPT()
nn_reg_bert = NN_REG_BERT()
nn_regsif_gpt = NN_REGSIF_GPT()
nn_regsif_bert = NN_REGSIF_BERT()
models = {}
ml_model_names, dl_model_names = [], []
for model_name in os.listdir():
model_first_name = model_name.split('.')[0]
if '.pkl' == model_name[-4:] or '.pickle' in model_name[-7:]:
ml_model_names.append(model_first_name)
with open(f'{model_name}', 'rb') as f:
models[model_first_name] = pickle.load(f)
elif '.pt' == model_name[-3:] or '.pth' in model_name[-4:]:
dl_model_names.append(model_first_name)
if model_first_name == 'neural_network_classification_GPT':
nn_clf_gpt.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_clf_gpt.to('cpu').eval()
if model_first_name == 'neural_network_classification_BERT':
nn_clf_bert.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_clf_bert.to('cpu').eval()
if model_first_name == 'neural_network_regression_GPT':
nn_reg_gpt.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_reg_gpt.to('cpu').eval()
if model_first_name == 'neural_network_regression_BERT':
nn_reg_bert.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_reg_bert.to('cpu').eval()
if model_first_name == 'regsification_GPT':
nn_regsif_gpt.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_regsif_gpt.to('cpu').eval()
if model_first_name == 'regsification_BERT':
nn_regsif_bert.load_state_dict(torch.load(f'{model_name}'))
models[model_first_name] = nn_regsif_bert.to('cpu').eval()
st.write('All models loaded.')
sbert_model_name = 'paraphrase-MiniLM-L6-v2'
device = 'cpu'
sbert = SentenceTransformer(sbert_model_name, device=device)
def embed_text_openai(text, model="text-embedding-ada-002"):
client = openai.OpenAI(api_key=st.secrets['OPENAI_API_KEY'])
text = str(text).replace("\n", " ")
response = client.embeddings.create(input=[text], model=model)
return response.data[0].embedding
def embed_text_BERT(text, emb_model=sbert):
embeddings = emb_model.encode(text, convert_to_tensor=True).cpu().numpy()
return embeddings
def inverse_transform_prediction(normalized_prediction, min_val= 0.0, max_val=18563.0):
raw_prediction = normalized_prediction * (max_val - min_val) + min_val
return raw_prediction
def get_prediction(text, model_name, emb_type):
if emb_type == 'openai':
emb = embed_text_openai(text)
elif emb_type == 'bert':
emb = embed_text_BERT(text)
else:
raise ValueError(f'Unknown embedding type: {emb_type}')
if model_name in ml_model_names:
model = models[model_name]
pred = model.predict([emb])[0]
return pred
elif model_name in dl_model_names:
if 'classification' in model_name:
model = models[model_name]
emb = torch.tensor(emb).float().to('cpu')
outputs = model(emb)
_, predicted = torch.max(outputs.data, 0)
pred = predicted.item()
return pred
elif 'regression' in model_name:
model = models[model_name]
emb = torch.tensor(emb).float().to('cpu')
pred = model(emb).detach().cpu().numpy()[0]
return pred
else:
raise ValueError(f'Unknown model name: {model_name}')
def run_app():
st.title('TwitchSight: Predictive Modeling and Analysis of Twitch User Ideas')
st.header('Reza Khan Mohammadi, Patrick Govan, and Josiah Hill')
st.write(
'The project "TwitchSight" is currently focusing on predicting the popularity\
of user ideas on Twitch and classifying them into respective themes. \
We have scraped data from the Twitch UserVoice platform, resulting in 13,233\
ideas spanning across 30 categories. We have taken this data and trained a\
number of models to assign each idea into one of the UserVoice categories. \
With this web app, you can classify a new idea into an appropriate category\
using any of our models.'
)
text = st.text_input('UserVoice Idea', 'Enter a new prompt to classify.')
model_name = st.selectbox(
'Select a model to predict your data:',
ml_model_names + dl_model_names)
emb_type = "openai" if "GPT" in model_name else "bert"
if st.button('Execute Model'):
if 'classification' in model_name:
pred_class_idx = get_prediction(text, model_name, emb_type)
pred_class_label = label_encoder_classes[pred_class_idx]
st.write(f'Predicted class: {pred_class_label}')
elif 'regression' in model_name:
pred_class_reg_norm = get_prediction(text, model_name, emb_type)
pred_class_reg_raw = inverse_transform_prediction(pred_class_reg_norm)
st.write(f'Predicted regression value: {pred_class_reg_raw}')
else:
st.write('Invalid model name')
run_app()
| [] |
2024-01-10 | Jman4190/gpt3-codenames | api~demo_web_app.py | """Runs the web app given a GPT object and UI configuration."""
from http import HTTPStatus
import json
import subprocess
import openai
from flask import Flask, request, Response
from .gpt import set_openai_key, Example
from .ui_config import UIConfig
from dotenv import load_dotenv
import os
load_dotenv()
KEY_NAME = os.getenv('OPENAI_KEY')
def demo_web_app(gpt, config=UIConfig()):
"""Creates Flask app to serve the React app."""
app = Flask(__name__)
#app.config.from_envvar(CONFIG_VAR)
#set_openai_key(app.config[KEY_NAME])
set_openai_key(KEY_NAME)
@app.route("/params", methods=["GET"])
def get_params():
# pylint: disable=unused-variable
response = config.json()
return response
def error(err_msg, status_code):
return Response(json.dumps({"error": err_msg}), status=status_code)
def get_example(example_id):
"""Gets a single example or all the examples."""
# return all examples
if not example_id:
return json.dumps(gpt.get_all_examples())
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
return json.dumps(example.as_dict())
def post_example():
"""Adds an empty example."""
new_example = Example("", "")
gpt.add_example(new_example)
return json.dumps(gpt.get_all_examples())
def put_example(args, example_id):
"""Modifies an existing example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
if "input" in args:
example.input = args["input"]
if "output" in args:
example.output = args["output"]
# update the example
gpt.add_example(example)
return json.dumps(example.as_dict())
def delete_example(example_id):
"""Deletes an example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
gpt.delete_example(example_id)
return json.dumps(gpt.get_all_examples())
@app.route(
"/examples",
methods=["GET", "POST"],
defaults={"example_id": ""},
)
@app.route(
"/examples/<example_id>",
methods=["GET", "PUT", "DELETE"],
)
def examples(example_id):
method = request.method
args = request.json
if method == "GET":
return get_example(example_id)
if method == "POST":
return post_example()
if method == "PUT":
return put_example(args, example_id)
if method == "DELETE":
return delete_example(example_id)
return error("Not implemented", HTTPStatus.NOT_IMPLEMENTED)
@app.route("/translate", methods=["GET", "POST"])
def translate():
# pylint: disable=unused-variable
prompt = request.json["prompt"]
response = gpt.submit_request(prompt)
offset = 0
if not gpt.append_output_prefix_to_query:
offset = len(gpt.output_prefix)
return {'text': response['choices'][0]['text'][offset:]}
subprocess.Popen(["yarn", "start"])
app.run()
| [] |
2024-01-10 | karthikbharadwaj/HrGpt | jd_chatgpt.py | import openai
import streamlit as st
openai.api_key = "replace with api key"
model_engine = "text-davinci-003"
prompt = "Generate a data scientist Job Description"
st.title("HR Job Description Generator")
def submit_callback():
completion = openai.Completion.create(
engine=model_engine,
prompt=st.session_state["query"],
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
result = completion.choices[0].text
st.info(result)
st.download_button("Download Job Description",result)
role = st.selectbox('Select a role to generate the JD',("Data Scientist",
"Data Engineer",
"Solution Architect",
"Chief Technology Officer"))
exp = st.selectbox("Minimum Experience",(range(1,25)))
specs = st.text_area(label="Add specifications of the role",value="")
st.session_state["query"] = "Generate a job description for " + role + " with minimum experience " + str(exp) + " having skills in " + specs
if st.button("Generate JD", type='primary'):
# Use GPT-3 to generate a summary of the article
response = openai.Completion.create(
engine=model_engine,
prompt=st.session_state["query"],
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
# Print the generated summary
res = response["choices"][0]["text"]
st.success(res)
st.download_button('Download JD', res)
| [
"Generate a data scientist Job Description"
] |
2024-01-10 | OsherKoren/langchain_with_openai | use_cases~chain_summarize_uc.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module if for chain summarization use case"""
import warnings
warnings.filterwarnings("ignore")
import os
from langchain.chains.summarize import load_summarize_chain
import get_data
import models
import text_preparation
# Get the absolute path by joining the current directory with the relative path
current_directory = os.path.dirname(os.path.abspath(__file__))
# The relative path from the current location to the target file
txt_relative_path = "../data_files/alice_in_wonderland.txt"
txt_file_path = os.path.join(current_directory, txt_relative_path)
if __name__ == "__main__":
docs = get_data.load_local_file(txt_file_path)
chunks = text_preparation.split_docs_recursively(docs=docs, chunk_size=500)[:2] # Save tokens ...
llm = models.set_openai_chat_model(max_tokens=500)
chain = load_summarize_chain(llm, chain_type="map_reduce", verbose=True)
summary = chain.run(chunks)
print("Summary: \n", summary)
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | models.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for setting up the models."""
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
def set_openai_model(model_name: str = "gpt-3.5-turbo", temperature: float = 0.2, **kwargs)\
-> OpenAI:
model = OpenAI(
model_name=model_name,
temperature=temperature,
)
for key, value in kwargs.items():
setattr(model, key, value)
return model
def set_openai_chat_model(model_name: str = "gpt-3.5-turbo", temperature: float = 0.2, **kwargs) -> ChatOpenAI:
model = ChatOpenAI(
model_name=model_name,
temperature=temperature,
)
for key, value in kwargs.items():
setattr(model, key, value)
return model
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | connect.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for connecting to the OpenAI API."""
import openai
import os
import serpapi
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.getenv("OPENAI_API_KEY")
serpapi.api_key = os.getenv("SERPAPI_API_KEY")
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | text_preparation.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module contains the prompts for the use cases."""
from typing import List, Union
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.schema.document import Document
import connect, utils
log = utils.setup_logger()
def get_openai_embedding(text: str, model="text-embedding-ada-002", text_type: str = "query"):
embedding = OpenAIEmbeddings(model=model)
if text_type == "query":
embedded_text = embedding.embed_query(text)
elif text_type == "doc":
embedded_text = embedding.embed_doc(text)
else:
raise NotImplementedError(f"Text type {text_type} not implemented.")
log.info(f"Embedding length is: {len(embedded_text)}")
return embedded_text
def get_openai_embedding_list(docs: List[Document], model="text-embedding-ada-002"):
embedding = OpenAIEmbeddings(model=model)
embedding_list = embedding.embed_documents([doc.page_content for doc in docs])
log.info(f"Embedding list length is: {len(embedding_list)}")
return embedding_list
def split_docs_recursively(docs: Union[str, List[Document]], chunk_size: int = 1000, chunk_overlap: int = 20):
"""Split text into chunks of characters."""
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
if isinstance(docs, str):
docs = text_splitter.create_documents([docs])
elif isinstance(docs, list):
docs = text_splitter.split_documents(docs)
else:
raise NotImplementedError(f"Type {type(docs)} not implemented.")
log.info(f"Number of documents: {len(docs)}")
return docs
def retrieve_docs(docs: List[Document],
model="text-embedding-ada-002",
chunk_size: int = 1000,
chunk_overlap: int = 20):
"""Retrieve and embed documents."""
texts = split_docs_recursively(docs=docs, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
embedding = OpenAIEmbeddings(model=model)
db = FAISS.from_documents(texts, embedding)
retriever = db.as_retriever()
return retriever
def extract_relevant_docs(
text: str,
docs: List[Document],
model="text-embedding-ada-002",
chunk_size: int = 1000,
chunk_overlap: int = 20):
"""Retrieve and get relevant documents."""
retriever = retrieve_docs(docs=docs, model=model, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
relevant_docs = retriever.get_relevant_documents(text)
log.info(f"Number of relevant documents: {len(relevant_docs)}")
return relevant_docs
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | use_cases~db_agent.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for running the db agent use case."""
import urllib
import warnings
warnings.filterwarnings("ignore")
import os
from langchain import SQLDatabase, SQLDatabaseChain, OpenAI
import connect, models
dsn = os.getenv("DSN")
database = os.getenv("DATABASE")
quoted = urllib.parse.quote_plus(f'DSN={dsn};DATABASE={database}')
connection_string = f"mssql+pyodbc:///?odbc_connect={quoted}"
def run_db_agent(url, llm, query):
db = SQLDatabase.from_uri(url)
db_chain = SQLDatabaseChain.from_llm(llm=llm, db=db, verbose=True)
response = db_chain.run(query)
return response
if __name__ == "__main__":
llm = models.set_openai_model(temperature=0)
query = "How many rooms were rented in Manhattan from January to June 2021?"
response = run_db_agent(url=connection_string, llm=llm, query=query)
print(response) | [] |
2024-01-10 | OsherKoren/langchain_with_openai | use_cases~traditional_meal_chain_uc.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for running the traditional meal chain use case."""
import warnings
warnings.filterwarnings("ignore")
from langchain.chains import LLMChain, SimpleSequentialChain
import models, prompts
def run_location_model():
prompt_template = prompts.setup_location_template_prompt()
llm = models.set_openai_model(max_tokens=500)
return LLMChain(llm=llm, prompt=prompt_template)
def run_meal_model():
prompt_template = prompts.setup_meal_template_prompt()
llm = models.set_openai_model(max_tokens=500)
return LLMChain(llm=llm, prompt=prompt_template)
if __name__ == "__main__":
location_llm = run_location_model()
meal_llm = run_meal_model()
llm_chain = SimpleSequentialChain(chains=[location_llm, meal_llm], verbose=True)
response = llm_chain.run("Black Forest, Germany")
print(response)
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | get_data.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for loading data """
from langchain.document_loaders import BSHTMLLoader, CSVLoader, PyMuPDFLoader, S3FileLoader, TextLoader
def load_txt_file(file_path: str):
loader = TextLoader(file_path=file_path)
return loader.load()
def load_csv_file(file_path: str):
loader = CSVLoader(file_path=file_path)
return loader.load()
def load_pdf_file(file_path: str):
loader = PyMuPDFLoader(file_path=file_path)
return loader.load()
def load_html_file(file_path: str):
loader = BSHTMLLoader(file_path=file_path)
return loader.load()
def load_local_file(file_path: str):
file_type = file_path.split(".")[-1]
if file_type == "txt":
return load_txt_file(file_path)
elif file_type == "csv":
return load_csv_file(file_path)
elif file_type == "pdf":
return load_pdf_file(file_path)
elif file_type == "html":
return load_html_file(file_path)
else:
raise NotImplementedError(f"File type {file_type} not implemented.")
def read_txt_file(file_path: str):
with open(file_path) as f:
file = f.read()
return file
def load_from_s3(s3_uri: str):
if s3_uri.startswith("s3://"):
s3_uri = s3_uri[5:]
bucket, key = s3_uri.split("/", 1)
loader = S3FileLoader(bucket, key)
return loader.load()
| [] |
2024-01-10 | OsherKoren/langchain_with_openai | use_cases~travel_agent_uc.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""This module is for running the travel agent use case."""
from langchain.memory import ChatMessageHistory
import models, prompts
import warnings
warnings.filterwarnings("ignore")
def run_travel_agent(text: str):
messages = prompts.setup_travel_agent_messages(text)
history.add_message(messages[0])
history.add_message(messages[1])
chat_model = models.set_openai_chat_model()
response = chat_model(history.messages)
return response
if __name__ == "__main__":
history = ChatMessageHistory()
history.add_ai_message("Hello, I'm your travel agent. What can I do for you?")
text = "I want to go to the Allgau area in Germany. Can you build me an itinerary for 8 days?"
ai_response = run_travel_agent(text)
history.add_ai_message(ai_response.content)
lines = ai_response.content.splitlines()
for line in lines:
print(line)
print('\n\n', history.messages)
| [] |
2024-01-10 | shenyubao/javachain | src~main~resources~gitLoader_load.py | import sys
import json
from langchain.document_loaders import GitLoader
def load(repoPath, branch):
loader = GitLoader(repo_path=repoPath, branch=branch)
docs = loader.load()
return docs
if __name__ == '__main__':
repoPath = sys.argv[1]
branch = sys.argv[2]
docs = load(repoPath, branch)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | shenyubao/javachain | src~main~resources~split~tokenTextSpliter.py | import sys
import json
from langchain.text_splitter import TokenTextSplitter
def splitText(text):
text_splitter = TokenTextSplitter(chunk_size=200)
texts = text_splitter.split_text(text)
return texts
if __name__ == '__main__':
text = sys.argv[1]
docs = splitText(text)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | shenyubao/javachain | src~main~resources~docx2txtLoader.py | import sys
import json
from langchain.document_loaders import Docx2txtLoader
def load(filePath):
loader = Docx2txtLoader(filePath)
docs = loader.load()
return docs
if __name__ == '__main__':
input = sys.argv[1]
docs = load(input)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | shenyubao/javachain | src~main~resources~bsHtmlLoader.py | import sys
import json
from langchain.document_loaders import BSHTMLLoader
def load(filePath):
loader = BSHTMLLoader(filePath)
docs = loader.load()
return docs
if __name__ == '__main__':
input = sys.argv[1]
docs = load(input)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | shenyubao/javachain | src~main~resources~csvLoader_load.py | import sys
import json
from langchain.document_loaders.csv_loader import CSVLoader
def load(filePath):
loader = CSVLoader(file_path=filePath)
docs = loader.load()
return docs
if __name__ == '__main__':
input = sys.argv[1]
docs = load(input)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | shenyubao/javachain | src~main~resources~pdfLoader.py | import sys
import json
from langchain.document_loaders import UnstructuredPDFLoader
def load(filePath):
loader = UnstructuredPDFLoader(filePath)
docs = loader.load()
return docs
if __name__ == '__main__':
input = sys.argv[1]
docs = load(input)
print(json.dumps(docs, default=lambda docs: docs.__dict__)) | [] |
2024-01-10 | charris-msft/chat-with-your-data-solution-accelerator | code~utilities~helpers~LLMHelper.py | import openai
from typing import List
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from .EnvHelper import EnvHelper
class LLMHelper:
def __init__(self):
env_helper: EnvHelper = EnvHelper()
# Configure OpenAI API
openai.api_type = "azure"
openai.api_version = env_helper.AZURE_OPENAI_API_VERSION
openai.api_base = env_helper.OPENAI_API_BASE
openai.api_key = env_helper.OPENAI_API_KEY
self.llm_model = env_helper.AZURE_OPENAI_MODEL
self.llm_max_tokens = env_helper.AZURE_OPENAI_MAX_TOKENS if env_helper.AZURE_OPENAI_MAX_TOKENS != '' else None
self.embedding_model = env_helper.AZURE_OPENAI_EMBEDDING_MODEL
def get_llm(self):
return AzureChatOpenAI(deployment_name=self.llm_model, temperature=0, max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
# TODO: This needs to have a custom callback to stream back to the UI
def get_streaming_llm(self):
return AzureChatOpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler], deployment_name=self.llm_model, temperature=0,
max_tokens=self.llm_max_tokens, openai_api_version=openai.api_version)
def get_embedding_model(self):
return OpenAIEmbeddings(deployment=self.embedding_model, chunk_size=1)
def get_chat_completion_with_functions(self, messages: List[dict], functions: List[dict], function_call: str="auto"):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
functions=functions,
function_call=function_call,
)
def get_chat_completion(self, messages: List[dict]):
return openai.ChatCompletion.create(
deployment_id=self.llm_model,
messages=messages,
)
| [] |
2024-01-10 | charris-msft/chat-with-your-data-solution-accelerator | code~utilities~document_chunking~Layout.py | from typing import List
from .DocumentChunkingBase import DocumentChunkingBase
from langchain.text_splitter import MarkdownTextSplitter
from .Strategies import ChunkingSettings
from ..common.SourceDocument import SourceDocument
class LayoutDocumentChunking(DocumentChunkingBase):
def __init__(self) -> None:
pass
def chunk(self, documents: List[SourceDocument], chunking: ChunkingSettings) -> List[SourceDocument]:
full_document_content = "".join(list(map(lambda document: document.content, documents)))
document_url = documents[0].source
splitter = MarkdownTextSplitter.from_tiktoken_encoder(chunk_size=chunking.chunk_size, chunk_overlap=chunking.chunk_overlap)
chunked_content_list = splitter.split_text(full_document_content)
# Create document for each chunk
documents = []
chunk_offset = 0
for idx, chunked_content in enumerate(chunked_content_list):
documents.append(
SourceDocument.from_metadata(
content=chunked_content,
document_url=document_url,
metadata={"offset": chunk_offset},
idx=idx,
)
)
chunk_offset += len(chunked_content)
return documents
| [] |
2024-01-10 | charris-msft/chat-with-your-data-solution-accelerator | code~utilities~orchestrator~Strategies.py | from enum import Enum
class OrchestrationStrategy(Enum):
OPENAI_FUNCTION = 'openai_function'
LANGCHAIN = 'langchain'
def get_orchestrator(orchestration_strategy: str):
if orchestration_strategy == OrchestrationStrategy.OPENAI_FUNCTION.value:
from .OpenAIFunctions import OpenAIFunctionsOrchestrator
return OpenAIFunctionsOrchestrator()
elif orchestration_strategy == OrchestrationStrategy.LANGCHAIN.value:
from .LangChainAgent import LangChainAgent
return LangChainAgent()
else:
raise Exception(f"Unknown orchestration strategy: {orchestration_strategy}")
| [] |
2024-01-10 | andevrrr/ExpenseTracker-NodeJs-Python | python~categorize.py | import pandas as pd
import openai
from dotenv import load_dotenv
import os
import sys
if len(sys.argv) < 2:
print("Error: No file path provided")
sys.exit(1)
file_path = sys.argv[1]
# Load the environment variables
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
def categorize_business_with_gpt(business_name, transaction_type, payer, categories):
# Updated prompt to include transaction type
prompt = (
f"'{payer}' is the payer, '{transaction_type}' is the transaction type, and '{business_name}' is the receiver. "
f"This is a bank statement from a user where you can see their purchases, transfers, etc. "
f"To categorize a transaction, consider transactions with both a name and surname in both the receiver and payer fields. "
f"Determine the most appropriate category for this transaction from the following list: {', '.join(categories)}. Answer in just one of the categories from the list."
)
try:
response = openai.Completion.create(
model="gpt-3.5-turbo-instruct", # Use the latest available model
prompt=prompt,
max_tokens=60
)
return response.choices[0].text.strip()
except openai.error.OpenAIError as e:
print(f"An error occurred: {e}")
return "Error: Unable to categorize"
def categorize_transaction(row, categories):
business_name = row['Saajan nimi']
transaction_type = row['Tapahtumalaji']
payer = row['Maksaja']
return categorize_business_with_gpt(business_name, transaction_type, payer, categories)
pd.set_option('display.max_rows', 100)
# Load the CSV file
df = pd.read_csv(file_path, delimiter=';') # Adjust delimiter if needed
transactions = df[['Summa', 'Maksupäivä', 'Maksaja', 'Saajan nimi', 'Tapahtumalaji']] # Ensure 'Tapahtumalaji' is included
# Define your categories
categories = [
"Groceries",
"Utilities",
"Entertainment",
"Dining and Restaurants",
"Transportation",
"Healthcare",
"Clothing and Apparel",
"Technology and Electronics",
"Subscriptions and Memberships",
"Home and Garden",
"Education",
"Travel and Accommodation",
"Gifts and Donations",
"Financial Services",
"Sports and Recreation",
"Housing and Leasing",
"Transfers",
"Taxi",
]
# Apply categorization
transactions['Category'] = transactions.apply(lambda row: categorize_transaction(row, categories), axis=1)
# Output the categorized data
print(transactions) # For testing, show first few rows
# Optionally, save to JSON
# transactions.to_json('/mnt/data/categorized_transactions.json')
| [
"To categorize a transaction, consider transactions with both a name and surname in both the receiver and payer fields. ",
", ",
"This is a bank statement from a user where you can see their purchases, transfers, etc. ",
"'PLACEHOLDER' is the payer, 'PLACEHOLDER' is the transaction type, and 'PLACEHOLDER' is the receiver. "
] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | llm_stuff.py | from datetime import datetime
import streamlit as st
from langchain import LLMChain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory, StreamlitChatMessageHistory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langsmith.client import Client
from streamlit_feedback import streamlit_feedback
_DEFAULT_SYSTEM_PROMPT = "You are a helpful chatbot."
def get_langsmith_client():
return Client(
api_key=st.session_state.langsmith_api_key,
)
def get_memory() -> ConversationBufferMemory:
return ConversationBufferMemory(
chat_memory=StreamlitChatMessageHistory(key="langchain_messages"),
return_messages=True,
memory_key="chat_history",
)
def get_llm_chain(
memory: ConversationBufferMemory,
system_prompt: str = _DEFAULT_SYSTEM_PROMPT,
temperature: float = 0.7,
) -> LLMChain:
"""Return a basic LLMChain with memory."""
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt + "\nIt's currently {time}.",
),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
],
).partial(time=lambda: str(datetime.now()))
llm = ChatOpenAI(
temperature=temperature,
streaming=True,
openai_api_key=st.session_state.openai_api_key,
)
return LLMChain(prompt=prompt, llm=llm, memory=memory or get_memory())
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
def feedback_component(client):
scores = {"😀": 1, "🙂": 0.75, "😐": 0.5, "🙁": 0.25, "😞": 0}
if feedback := streamlit_feedback(
feedback_type="faces",
optional_text_label="[Optional] Please provide an explanation",
key=f"feedback_{st.session_state.run_id}",
):
score = scores[feedback["score"]]
feedback = client.create_feedback(
st.session_state.run_id,
feedback["type"],
score=score,
comment=feedback.get("text", None),
)
st.session_state.feedback = {"feedback_id": str(feedback.id), "score": score}
st.toast("Feedback recorded!", icon="📝") | [
"{input}",
"chat_history",
"You are a helpful chatbot.",
"human",
"\nIt's currently {time}."
] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | chat_other.py | import os
import streamlit as st
from langchain.callbacks.manager import tracing_v2_enabled
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
from langchain.schema.runnable import RunnableConfig
from openai.error import AuthenticationError
from llm_stuff import (
_DEFAULT_SYSTEM_PROMPT,
get_memory,
get_llm_chain,
StreamHandler,
feedback_component,
get_langsmith_client,
)
st.set_page_config(
page_title="Chat LangSmith",
page_icon="🦜",
)
# "# Chat🦜🛠️"
# Initialize State
if "trace_link" not in st.session_state:
st.session_state.trace_link = None
if "run_id" not in st.session_state:
st.session_state.run_id = None
st.sidebar.markdown(
"""
# Menu
""",
)
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
st.session_state.openai_api_key = openai_api_key
langsmith_api_key = st.sidebar.text_input(
"LangSmith API Key (optional)",
type="password",
)
st.session_state.langsmith_api_key = langsmith_api_key
if st.session_state.langsmith_api_key.startswith("ls__"):
langsmith_project = st.sidebar.text_input(
"LangSmith Project Name",
value="langchain-streamlit-demo",
)
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = st.session_state.langsmith_api_key
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = langsmith_project
client = get_langsmith_client()
else:
langsmith_project = None
client = None
if st.session_state.openai_api_key.startswith("sk-"):
system_prompt = (
st.sidebar.text_area(
"Custom Instructions",
_DEFAULT_SYSTEM_PROMPT,
help="Custom instructions to provide the language model to determine style, personality, etc.",
)
.strip()
.replace("{", "{{")
.replace("}", "}}")
)
temperature = st.sidebar.slider(
"Temperature",
min_value=0.0,
max_value=1.0,
value=0.7,
help="Higher values give more random results.",
)
memory = get_memory()
chain = get_llm_chain(memory, system_prompt, temperature)
run_collector = RunCollectorCallbackHandler()
if st.sidebar.button("Clear message history"):
print("Clearing message history")
memory.clear()
st.session_state.trace_link = None
st.session_state.run_id = None
# Display chat messages from history on app rerun
# NOTE: This won't be necessary for Streamlit 1.26+, you can just pass the type directly
# https://github.com/streamlit/streamlit/pull/7094
def _get_openai_type(msg):
if msg.type == "human":
return "user"
if msg.type == "ai":
return "assistant"
return msg.role if msg.type == "chat" else msg.type
for msg in st.session_state.langchain_messages:
streamlit_type = _get_openai_type(msg)
avatar = "🦜" if streamlit_type == "assistant" else None
with st.chat_message(streamlit_type, avatar=avatar):
st.markdown(msg.content)
if st.session_state.trace_link:
st.sidebar.markdown(
f'<a href="{st.session_state.trace_link}" target="_blank"><button>Latest Trace: 🛠️</button></a>',
unsafe_allow_html=True,
)
def _reset_feedback():
st.session_state.feedback_update = None
st.session_state.feedback = None
if prompt := st.chat_input(placeholder="Ask me a question!"):
st.chat_message("user").write(prompt)
_reset_feedback()
with st.chat_message("assistant", avatar="🦜"):
message_placeholder = st.empty()
stream_handler = StreamHandler(message_placeholder)
runnable_config = RunnableConfig(
callbacks=[run_collector, stream_handler],
tags=["Streamlit Chat"],
)
try:
if client and langsmith_project:
with tracing_v2_enabled(project_name=langsmith_project):
full_response = chain.invoke(
{"input": prompt},
config=runnable_config,
)["text"]
else:
full_response = chain.invoke(
{"input": prompt},
config=runnable_config,
)["text"]
except AuthenticationError:
st.error("Please enter a valid OpenAI API key.", icon="❌")
st.stop()
message_placeholder.markdown(full_response)
if client:
run = run_collector.traced_runs[0]
run_collector.traced_runs = []
st.session_state.run_id = run.id
wait_for_all_tracers()
url = client.read_run(run.id).url
st.session_state.trace_link = url
if client and st.session_state.get("run_id"):
feedback_component(client)
else:
st.error("Please enter a valid OpenAI API key.", icon="❌")
st.stop() | [
"Custom Instructions",
"Custom instructions to provide the language model to determine style, personality, etc."
] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | pages~Ask_About_%E2%9D%93.py | import streamlit as st
import pandas as pd
from pytube import YouTube
from matplotlib.patches import Arc
from langchain.callbacks.manager import tracing_v2_enabled
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
from langchain.schema.runnable import RunnableConfig
from openai.error import AuthenticationError
from langsmith import Client
from llm_stuff import (
_DEFAULT_SYSTEM_PROMPT,
get_memory,
get_llm_chain,
StreamHandler,
get_langsmith_client,
)
# auth_key from secrets
auth_key = st.secrets['auth_key']
st.set_page_config(page_title='LLM with Streamlit',
page_icon='👀', layout='centered', initial_sidebar_state='expanded' )
# to hide streamlit menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
# pass javascript to hide streamlit menu
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
def main():
if st.session_state.process_status == 'done':
st.subheader('❓:red[ **Ask about the video**]')
langsmith_project = None
client = None
instructions = "You are a helpful chatbot. Response questions considering the following text. If you don´t know the answer, say it and don´t create any new information. <text>"
if st.session_state.openai_api_key.startswith("sk-"):
system_prompt = instructions + st.session_state.chat_text.strip().replace("{", "{{").replace("}", "}}") + "</text>"
memory = get_memory()
chain = get_llm_chain(memory, system_prompt, 0)
run_collector = RunCollectorCallbackHandler()
def _get_openai_type(msg):
if msg.type == "human":
return "user"
if msg.type == "ai":
return "assistant"
return msg.role if msg.type == "chat" else msg.type
for msg in st.session_state.langchain_messages:
streamlit_type = _get_openai_type(msg)
avatar = "👀" if streamlit_type == "assistant" else None
with st.chat_message(streamlit_type, avatar=avatar):
st.markdown(msg.content)
if st.session_state.trace_link:
st.sidebar.markdown(
f'<a href="{st.session_state.trace_link}" target="_blank"><button>Latest Trace: 🛠️</button></a>',
unsafe_allow_html=True,
)
def _reset_feedback():
st.session_state.feedback_update = None
st.session_state.feedback = None
if prompt := st.chat_input(placeholder="Ask me a question!"):
st.chat_message("user").write(prompt)
_reset_feedback()
with st.chat_message("assistant", avatar="👀"):
message_placeholder = st.empty()
stream_handler = StreamHandler(message_placeholder)
runnable_config = RunnableConfig(
callbacks=[run_collector, stream_handler],
tags=["Streamlit Chat"],
)
try:
if client and langsmith_project:
with tracing_v2_enabled(project_name=langsmith_project):
full_response = chain.invoke(
{"input": prompt},
config=runnable_config,
)["text"]
else:
full_response = chain.invoke(
{"input": prompt},
config=runnable_config,
)["text"]
except AuthenticationError:
st.error("Please enter a valid OpenAI API key.", icon="❌")
st.stop()
message_placeholder.markdown(full_response)
if client:
run = run_collector.traced_runs[0]
run_collector.traced_runs = []
st.session_state.run_id = run.id
wait_for_all_tracers()
url = client.read_run(run.id).url
st.session_state.trace_link = url
else:
st.error("Please enter a valid OpenAI API key.", icon="❌")
st.stop()
else:
st.markdown('Process the video first!')
if __name__ == '__main__':
main() | [] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | app_ai.py | import streamlit as st
import pandas as pd
from pytube import YouTube
import requests
import time
import assemblyai as aai
from collections import defaultdict
import re
import nltk
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
from langchain.callbacks.manager import tracing_v2_enabled
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
from langchain.schema.runnable import RunnableConfig
from openai.error import AuthenticationError
from langsmith import Client
from llm_stuff import (
_DEFAULT_SYSTEM_PROMPT,
get_memory,
get_llm_chain,
StreamHandler,
get_langsmith_client,
)
# auth_key from secrets
auth_key = st.secrets['auth_key']
# global variables
audio_location = ''
audio_url = ''
link = ''
link_new = ''
# Initialize State
if "trace_link" not in st.session_state:
st.session_state.trace_link = None
if "run_id" not in st.session_state:
st.session_state.run_id = None
if "openai_api_key" not in st.session_state:
st.session_state.openai_api_key = ''
st.session_state.transcription = ''
st.session_state.process_status = ''
st.session_state.chat_text = ''
# youtube-dl options
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': './%(id)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
CHUNK_SIZE = 5242880
# endpoints
upload_endpoint = 'https://api.assemblyai.com/v2/upload'
headers = {
"authorization": auth_key,
"content-type": "application/json"
}
st.set_page_config(page_title='LLM with Streamlit',
page_icon='👀', layout='centered', initial_sidebar_state='expanded' )
# to hide streamlit menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
# pass javascript to hide streamlit menu
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
# @st.cache_data(ttl=600)
def download_audio(link):
global audio_location
_id = link.strip()
def get_vid(_id):
# create object YouTube
yt = YouTube(_id)
audio_stream = yt.streams.filter(only_audio=True).first()
# print(audio_stream)
return audio_stream
# download the audio of the Youtube video locally
audio_stream = get_vid(_id)
download_path = './'
audio_location = audio_stream.download(output_path=download_path)
# print('Saved audio to', audio_location)
def read_file(filename):
with open(filename, 'rb') as _file:
while True:
data = _file.read(CHUNK_SIZE)
if not data:
break
yield data
def upload_audio():
global audio_location
global audio_url
upload_response = requests.post(
upload_endpoint,
headers=headers,
data=read_file(audio_location)
)
audio_url = upload_response.json()['upload_url']
def gauge_chart(value, max_value, label):
fig, ax = plt.subplots(figsize=(6, 5))
# Define angules
start_angle = 0
end_angle_red = 180
end_angle_green = 180 - (value / max_value) * 180 # reverte start point
# arc widgth and radius
arc_width = 0.2 # width of the arc
arc_radius = 0.4 # Radius of the arc
# Arc green
arc_green = Arc((0.5, 0.5), arc_radius * 2, arc_radius * 2, angle=0, theta1=start_angle, theta2=end_angle_red, color='green', lw=40)
ax.add_patch(arc_green)
# Arc red
arc_red = Arc((0.5, 0.5), arc_radius * 2, arc_radius * 2, angle=0, theta1=start_angle, theta2=end_angle_green, color='red', lw=40)
ax.add_patch(arc_red)
# aditional settings
ax.axis('off')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
# explain text
ax.text(0.5, 0.6, "{:.1f}%".format(round(value, 1)), ha='center', va='center', fontsize=20)
ax.text(0.5, 0.5, label, ha='center', va='center', fontsize=16)
ax.text(0.5, 0.25, "Global Confidence", ha='center', va='center', fontsize=26, color='black')
ax.text(0.5, 0.1, "Greater green bar is better", ha='center', va='center', fontsize=18, color='green')
return fig
def main():
global audio_location
global audio_url
global link
global link_new
with st.sidebar:
# st.image('logo-250-transparente.png')
st.header('Information')
st.write("""
This project was created with the goal of participating in the 'Streamlit LLM Hackathon 2023'.
\nThis site uses **AssemblyAI** to transcribe audio from YouTube videos and **LangChain** to handle chat.
\nTo chat about the video, please, supply your OPENAI API KEY.
\nAt this point, the video must be in English.
""")
st.header('OpenAI API KEY')
if st.session_state.openai_api_key == '':
st.write("""
❗ If you want to "ask questions about" the video, please, supply your OPENAI API KEY **before** starting.
""")
st.session_state.openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password", value=st.session_state.openai_api_key)
else:
st.write('Using the OPENAI API KEY already supplied before.')
st.header('About')
st.write('Details about this project can be found in https://github.com/htsnet/StreamlitHackathonLLM2023')
# título
title = f'Audio transcription and analysis with LLM'
st.title(title)
subTitle = f'Using a Youtube video link, this site will transcribe the audio and show relevant information.'
st.subheader(subTitle)
# information tabs
st.markdown('<style>[id^="tabs-bui3-tab-"] > div > p{font-size:20px;}</style>', unsafe_allow_html=True)
# emoji list https://streamlit-emoji-shortcodes-streamlit-app-gwckff.streamlit.app/
tab1, tab2, tab3, tab4 = st.tabs(['📹:red[ **Video Process**]', '📖:red[ **Transcription**]', '📄:red[ **Sumary**]', '🏷️:red[ **Categories**]'])
with tab1:
st.subheader('Start here!')
# link
link = st.text_input('Paste your Youtube video link and press Enter')
# download stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
if link != '':
if link_new == link:
st.toast('Video already processed!', icon='❗')
else:
link_new = link
time_start = time.time()
aai.settings.api_key = auth_key
col1, col2, col3 = st.columns(3)
# using col1 to reduce width of video
with col1:
st.video(link)
try:
with st.spinner('Getting audio... (1/3)'):
download_audio(link)
with st.spinner('Uploading audio... (2/3)'):
upload_audio()
with col2:
st.write('Uploaded audio to', audio_url)
with st.spinner('Transcribing audio... (3/3)'):
config = aai.TranscriptionConfig(
speaker_labels=True,
iab_categories=True
)
transcriber = aai.Transcriber()
transcript = transcriber.transcribe(
audio_url,
config=config
)
# st.markdown(transcript.text)
st.session_state.transcription = transcript
# dictionary to store the words
word_counts = defaultdict(int)
# regular expression to remove punctuation
word_pattern = re.compile(r'\b\w+\b')
word_count = 0
confidence_values = 0
if st.session_state.transcription.error:
st.write(st.session_state.transcription.error)
st.session_state.process_status = 'error'
st.toast('Problem with the video!', icon='❗')
st.stop()
try:
# read json result and count words
for pieces in transcript.utterances:
words = pieces.words
for word in words:
# remove punctuation and convert to lowercase
text = word_pattern.findall(word.text)
# sum 1 for each word found, if not empty
if text and text[0] not in stop_words:
word_counts[text[0].lower()] += 1
word_count += 1
confidence_values += word.confidence
except Exception as e:
st.write(e)
st.session_state.process_status = 'done'
if st.session_state.openai_api_key != '':
st.write("Ask question about the content! **Click on sidebar** to go to chat.")
else:
st.write("Sorry, there wasn't an OPENAI API KEY to ask questions...")
time_stop = time.time()
except Exception as e:
st.write('Error! Maybe the video is private. Try another')
st.write(e)
st.session_state.process_status = ''
time_stop = time.time()
st.toast('Problem with the video!', icon='❗')
st.stop()
with col2:
time_total = time_stop - time_start
st.write('🕔 Processed in', "{:.1f}".format(round(time_total, 1)), 'seconds!')
with col3:
# st.markdown(f"Total words: {word_count}")
# st.markdown(f"Total confidence: {confidence_values}")
# st.markdown(f"Average confidence: {confidence_values/word_count}")
if word_count > 0:
confidence = confidence_values/word_count * 100
else:
confidence = 0
# Gauge Chart
max_value = 100
st.pyplot(gauge_chart(confidence, max_value, f'{word_count} words'))
st.markdown('See the tabs above for information about the audio!')
st.toast('Great. Video processed! Enjoy', icon='🎉')
with tab2:
st.subheader('Audio Transcription')
if st.session_state.process_status == 'done':
# Get the parts of the transcript that were tagged with topics
st.session_state.chat_text = ''
for result in st.session_state.transcription.iab_categories.results:
st.session_state.chat_text += result.text + ' '
st.markdown(result.text)
# st.markdown(f"Timestamp: {result.timestamp.start} - {result.timestamp.end}")
# for label in result.labels:
# st.markdown(label.label) # topic
# st.markdown(label.relevance) # how relevant the label is for the portion of text
else:
st.markdown('Process the video first!')
with tab3:
st.subheader('Sumary')
if st.session_state.process_status == 'done':
# sort descending
sorted_word_counts = dict(sorted(word_counts.items(), key=lambda item: item[1], reverse=True))
# show the words more used
st.write("WORDS USED MORE THAN 3 TIMES")
word_count_tuples = [(word, count) for word, count in sorted_word_counts.items() if count > 3]
# create a dataframe with the list of tuples
df = pd.DataFrame(word_count_tuples, columns=["Word", "Count"])
# show the dataframe
st.table(df)
else:
st.markdown('Process the video first!')
with tab4:
st.subheader('Relevant Categories')
if st.session_state.process_status == 'done':
# Get a summary of all topics in the transcript
for label, relevance in st.session_state.transcription.iab_categories.summary.items():
relevance = "{:.1f}%".format(round(relevance * 100, 1))
st.markdown(f"{label} ({relevance})")
else:
st.markdown('Process the video first!')
if __name__ == '__main__':
main() | [
"application/json"
] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | chat_tutorial.py | import streamlit as st
import os
from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationBufferWindowMemory, ConversationSummaryBufferMemory, VectorStoreRetrieverMemory
from langchain.chains import ConversationChain
from langchain.prompts.prompt import PromptTemplate
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.schema import messages_from_dict, messages_to_dict
os.environ["OPENAI_API_KEY"] = st.secrets["openai_key"]
history = ChatMessageHistory()
history.add_user_message("Hello")
history.add_ai_message("Hi, what do you want to know about the video?")
text_to_talk = """We are talking about this: O serviço best seller em utilização. Consiste na preparação do evento e registro de presença em tablets de forma off-line (sem internet no local).
Você faz a criação do evento, importa ou digita os nomes dos convidados, marca quem é VIP, indica grupos e vários outro recursos.
Para esta versão você baixa o aplicativo ConfirmAki PRO (disponível para Android e iOS) gratuitamente.
No dia do evento, você transfere o evento para o app nos tablets que serão usados na recepção e está tudo pronto para iniciar o registro de presença dos convidados. A partir deste ponto você não precisa mais da internet. Sua recepção pode ser em um parque, um salão, na praia, em um sítio ou qualquer lugar. Não é mais preciso da internet.
Terminado o evento, retorne com os tablets a um local com internet e, com um simples toque, transfira para o servidor todos os registros realizados. Você já pode emitir um relatório PDF com os que compareceram ao evento ou baixar uma planilha com todos os dados e usar como desejar.
CONFIRMAKI PREMIUM
Quer mais segurança na recepção de convidados? Então use a versão Premium, que trabalha on-line (com internet de forma constante).
Usa o mesmo conceito da versão PRO, sendo que o registro da presença é sempre marcada no servidor. Assim, uma vez registrada a presença de um convidado, qualquer nova busca por este convidado indica que o mesmo já foi registrado, independente da recepcionista que atenda.
Para esta versão você baixa o aplicativo ConfirmAki Premium (disponível para Android e iOS) gratuitamente.
Não é preciso transferir antecipadamente o evento para o tablet. Nesta versão, toda informação é buscada no servidor e atualizada imediatamente. Você pode aumentar ou reduzir o número de recepcionistas à vontade, sem interferir no registro de presença. Tudo está centralizado no servidor.
A qualquer momento, ou no final do evento, você tem todas as informações de presença, horário de registro, quem registrou e outras informações de apoio."""
memory = ConversationBufferMemory(chat_memory=history)
history.buffer = memory.load_memory_variables({})
memory = ConversationBufferMemory(chat_memory=history, ai_prefix="AI", user_prefix="User")
chat_gpt = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
conversation = ConversationChain(llm=chat_gpt,
memory=ConversationBufferMemory(),
verbose=True
)
conversation.prompt.templates = [
"""The following is a friendly conversation between a human and an AI.
The AI is a talkative and provides lots of specific details about the text that follows.
If the AI does not know the answer to a question say it and does not create any new information. <text>""" + text_to_talk + """</text>"""
]
| [] |
2024-01-10 | htsnet/StreamlitHackathonLLM2023 | chat_langchain.py | import os
import tempfile
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import ConversationalRetrievalChain
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.text_splitter import RecursiveCharacterTextSplitter
st.set_page_config(page_title="LangChain: Chat with Documents", page_icon="🦜")
st.title("🦜 LangChain: Chat with Documents")
@st.cache_resource(ttl="1h")
def configure_retriever(uploaded_files):
# Read documents
docs = []
temp_dir = tempfile.TemporaryDirectory()
for file in uploaded_files:
temp_filepath = os.path.join(temp_dir.name, file.name)
with open(temp_filepath, "wb") as f:
f.write(file.getvalue())
loader = PyPDFLoader(temp_filepath)
docs.extend(loader.load())
# Split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# Create embeddings and store in vectordb
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
vectordb = DocArrayInMemorySearch.from_documents(splits, embeddings)
# Define retriever
retriever = vectordb.as_retriever(search_type="mmr", search_kwargs={"k": 2, "fetch_k": 4})
return retriever
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.text = initial_text
self.run_id_ignore_token = None
def on_llm_start(self, serialized: dict, prompts: list, **kwargs):
# Workaround to prevent showing the rephrased question as output
if prompts[0].startswith("Human"):
self.run_id_ignore_token = kwargs.get("run_id")
def on_llm_new_token(self, token: str, **kwargs) -> None:
if self.run_id_ignore_token == kwargs.get("run_id", False):
return
self.text += token
self.container.markdown(self.text)
class PrintRetrievalHandler(BaseCallbackHandler):
def __init__(self, container):
self.status = container.status("**Context Retrieval**")
def on_retriever_start(self, serialized: dict, query: str, **kwargs):
self.status.write(f"**Question:** {query}")
self.status.update(label=f"**Context Retrieval:** {query}")
def on_retriever_end(self, documents, **kwargs):
for idx, doc in enumerate(documents):
source = os.path.basename(doc.metadata["source"])
self.status.write(f"**Document {idx} from {source}**")
self.status.markdown(doc.page_content)
self.status.update(state="complete")
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
uploaded_files = st.sidebar.file_uploader(
label="Upload PDF files", type=["pdf"], accept_multiple_files=True
)
if not uploaded_files:
st.info("Please upload PDF documents to continue.")
st.stop()
retriever = configure_retriever(uploaded_files)
# Setup memory for contextual conversation
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(memory_key="chat_history", chat_memory=msgs, return_messages=True)
# Setup LLM and QA chain
llm = ChatOpenAI(
model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, temperature=0, streaming=True
)
qa_chain = ConversationalRetrievalChain.from_llm(
llm, retriever=retriever, memory=memory, verbose=True
)
if len(msgs.messages) == 0 or st.sidebar.button("Clear message history"):
msgs.clear()
msgs.add_ai_message("How can I help you?")
avatars = {"human": "user", "ai": "assistant"}
for msg in msgs.messages:
st.chat_message(avatars[msg.type]).write(msg.content)
if user_query := st.chat_input(placeholder="Ask me anything!"):
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
retrieval_handler = PrintRetrievalHandler(st.container())
stream_handler = StreamHandler(st.empty())
response = qa_chain.run(user_query, callbacks=[retrieval_handler, stream_handler]) | [] |
2024-01-10 | jacoblee93/oss-model-extraction-evals | bootstrap_dataset.py | import os
import glob
from typing import Optional, List
from enum import Enum
from langchain.pydantic_v1 import BaseModel, Field
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains.openai_functions import (
convert_to_openai_function,
get_openai_output_parser,
)
class ToneEnum(str, Enum):
positive = "positive"
negative = "negative"
class Email(BaseModel):
"""Relevant information about an email."""
sender: Optional[str] = Field(None, description="The sender's name, if available")
sender_phone_number: Optional[str] = Field(None, description="The sender's phone number, if available")
sender_address: Optional[str] = Field(None, description="The sender's address, if available")
action_items: List[str] = Field(..., description="A list of action items requested by the email")
topic: str = Field(..., description="High level description of what the email is about")
tone: ToneEnum = Field(..., description="The tone of the email.")
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an expert researcher."),
(
"human",
"What can you tell me about the following email? Make sure to answer in the correct format: {email}",
),
]
)
openai_functions = [convert_to_openai_function(Email)]
llm_kwargs = {
"functions": openai_functions,
"function_call": {"name": openai_functions[0]["name"]}
}
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
output_parser = get_openai_output_parser([Email])
extraction_chain = prompt | llm.bind(**llm_kwargs) | output_parser
files = glob.glob('./dataset/*')
for file in files:
with open(file, 'r') as f:
content = f.read()
print(file)
extraction_chain.invoke({
"email": content
})
| [
"What can you tell me about the following email? Make sure to answer in the correct format: {email}",
"[('system', 'You are an expert researcher.'), ('human', 'What can you tell me about the following email? Make sure to answer in the correct format: {email}')]",
"human",
"You are an expert researcher."
] |
2024-01-10 | jacoblee93/oss-model-extraction-evals | format.py | import mailbox
from langchain.schema import Document
from langchain.document_transformers import Html2TextTransformer
# Replace 'input.mbox' with your MBOX file name
mbox_file = '../Mail/Spam.mbox'
mbox = mailbox.mbox(mbox_file)
html2text = Html2TextTransformer()
for i, message in enumerate(mbox):
print(i)
with open(f'./dataset/email_{i}.eml', 'wb') as f:
raw_doc = Document(page_content=message.as_string())
transformed_docs = html2text.transform_documents([raw_doc])
f.write(transformed_docs[0].page_content.replace("jacoblee93", "jacob").replace("= ", "").replace("=E2=80=94", "").encode("utf-8")) | [] |
2024-01-10 | jacoblee93/oss-model-extraction-evals | run_evals.py | import os
from typing import Optional, List
from enum import Enum
from langsmith import Client
from langchain.smith import RunEvalConfig, run_on_dataset
from langchain_experimental.llms.ollama_functions import OllamaFunctions
from langchain_experimental.llms.anthropic_functions import AnthropicFunctions
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain.pydantic_v1 import BaseModel, Field
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains.openai_functions import (
convert_to_openai_function
)
class ToneEnum(str, Enum):
positive = "positive"
negative = "negative"
class Email(BaseModel):
"""Relevant information about an email."""
sender: Optional[str] = Field(None, description="The sender's name, if available")
sender_phone_number: Optional[str] = Field(None, description="The sender's phone number, if available")
sender_address: Optional[str] = Field(None, description="The sender's address, if available")
action_items: List[str] = Field(..., description="A list of action items requested by the email")
topic: str = Field(..., description="High level description of what the email is about")
tone: ToneEnum = Field(..., description="The tone of the email.")
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an expert researcher."),
(
"human",
"What can you tell me about the following email? Make sure to answer in the correct format: {email}",
),
]
)
openai_functions = [convert_to_openai_function(Email)]
llm_kwargs = {
"functions": openai_functions,
"function_call": {"name": openai_functions[0]["name"]}
}
# Ollama JSON mode has a bug where it infintely generates newlines. This stop sequence hack fixes it
llm = OllamaFunctions(temperature=0, model="llama2", timeout=300, stop=["\n\n\n\n"])
# llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
# llm = AnthropicFunctions(temperature=0, model="claude-2")
# output_parser = get_openai_output_parser([Email])
output_parser = JsonOutputFunctionsParser()
extraction_chain = prompt | llm.bind(**llm_kwargs) | output_parser | (lambda x: { "output": x })
eval_llm = ChatOpenAI(model="gpt-4", temperature=0.0, model_kwargs={"seed": 42})
evaluation_config = RunEvalConfig(
evaluators=[
RunEvalConfig.LabeledScoreString(
criteria={
"accuracy": """
Score 1: The answer is incorrect and unrelated to the question or reference document.
Score 3: The answer is partially correct but has more than one omission or major errors.
Score 5: The answer is mostly correct but has more than one omission or major error.
Score 7: The answer is mostly correct but has at most one omission or major error.
Score 9: The answer is mostly correct with no omissions and only minor errors, and aligns with the reference document.
Score 10: The answer is correct, complete, and aligns with the reference document. Extra information is acceptable if it is sensible.
If the reference answer contains multiple alternatives, the predicted answer must only match one of the alternatives to be considered correct.
If the predicted answer contains additional helpful and accurate information that is not present in the reference answer, it should still be considered correct and not be penalized.
""" # noqa
}, llm=eval_llm, normalize_by=10.0
),
],
)
client = Client()
run_on_dataset(
dataset_name="Extraction Over Spam Emails",
llm_or_chain_factory=extraction_chain,
client=client,
evaluation=evaluation_config,
project_name="llama2-test",
concurrency_level=1,
)
| [
"What can you tell me about the following email? Make sure to answer in the correct format: {email}",
"[('system', 'You are an expert researcher.'), ('human', 'What can you tell me about the following email? Make sure to answer in the correct format: {email}')]",
"human",
"You are an expert researcher."
] |
2024-01-10 | Wenxuan-Zhou/stable-baselines3 | stable_baselines3~common~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
import copy
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
BaseModelSelf = TypeVar("BaseModelSelf", bound="BaseModel")
class BaseModel(nn.Module):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls: Type[BaseModelSelf], path: str, device: Union[th.device, str] = "auto") -> BaseModelSelf:
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Allow to load policy saved with older version of SB3
if "sde_net_arch" in saved_variables["data"]:
warnings.warn(
"sde_net_arch is deprecated, please downgrade to SB3 v1.2.0 if you need such parameter.",
DeprecationWarning,
)
del saved_variables["data"]["sde_net_arch"]
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
if key in self.observation_space.spaces.keys():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
if obs_.shape == ():
obs_ = obs_.reshape(-1)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
return observation, vectorized_env
class BasePolicy(BaseModel, ABC):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if episode_start is None:
# episode_start = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy, and reshape to the original action shape
actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions.squeeze(axis=0)
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": False,
}
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
actions = actions.reshape((-1,) + self.action_space.shape)
return actions, values, log_prob
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi)
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
| [] |
2024-01-10 | EdinburghClinicalNLP/chatgpt_icd_coding | scripts~inference.py | import argparse
import os
import sys
sys.path.append(os.getcwd())
import openai
import pandas as pd
from dotenv import load_dotenv
load_dotenv("env/.env")
from src import utils
from src.api import ChatGPTCall
from src.configs import Configs
def parse_configs() -> Configs:
parser = argparse.ArgumentParser()
parser.add_argument("--config_filepath", type=str, required=True)
parser.add_argument("--existing_output_dir", type=str)
args = parser.parse_args()
configs = Configs(**utils.load_yaml(args.config_filepath))
return configs, args
def main():
configs, args = parse_configs()
utils.setup_random_seed(configs.training_configs.random_seed)
if not args.existing_output_dir:
# Setup experiment folder to store config file used for the API call and the predictions
outputs_dir = utils.setup_experiment_folder(
os.path.join(os.getcwd(), configs.training_configs.outputs_dir)
)
utils.save_training_configs(configs, outputs_dir)
else:
outputs_dir = args.existing_output_dir
# Setup OpenAI API configs
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
openai.api_version = configs.api_configs.api_version
dataset = pd.read_csv(configs.training_configs.dataset_path)
print(f"Predicting {len(dataset)} test clinical notes")
chatgpt_api = ChatGPTCall(configs, outputs_dir)
chatgpt_outputs = chatgpt_api.predict(dataset)
print("Prediction finished!")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | apigpt-ai/langchain-cn | libs~langchain~langchain~vectorstores~qdrant.py | """Wrapper around Qdrant vector database."""
from __future__ import annotations
import asyncio
import functools
import uuid
import warnings
from itertools import islice
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from qdrant_client import grpc # noqa
from qdrant_client.conversions import common_types
from qdrant_client.http import models as rest
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
class QdrantException(Exception):
"""Base class for all the Qdrant related exceptions"""
def sync_call_fallback(method: Callable) -> Callable:
"""
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
"""
@functools.wraps(method)
async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
try:
return await method(self, *args, **kwargs)
except NotImplementedError:
# If the async method is not implemented, call the synchronous method
# by removing the first letter from the method name. For example,
# if the async method is called ``aaad_texts``, the synchronous method
# will be called ``aad_texts``.
sync_method = functools.partial(
getattr(self, method.__name__[1:]), *args, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, sync_method)
return wrapper
class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from qdrant_client import QdrantClient
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
VECTOR_NAME = None
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Optional[Embeddings] = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: Optional[str] = VECTOR_NAME,
embedding_function: Optional[Callable] = None, # deprecated
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
if embeddings is None and embedding_function is None:
raise ValueError(
"`embeddings` value can't be None. Pass `Embeddings` instance."
)
if embeddings is not None and embedding_function is not None:
raise ValueError(
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
self._embeddings = embeddings
self._embeddings_function = embedding_function
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
self.vector_name = vector_name or self.VECTOR_NAME
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead."
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated"
)
self._embeddings_function = embeddings
self._embeddings = None
self.distance_strategy = distance_strategy.upper()
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
self.client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
@sync_call_fallback
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: 64
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
await self.client.async_grpc_points.Upsert(
grpc.UpsertPoints(
collection_name=self.collection_name,
points=[RestToGrpc.convert_point_struct(point) for point in points],
)
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of documents most similar to the query text and distance for each.
"""
return self.similarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of documents most similar to the query text and distance for each.
"""
return await self.asimilarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of Documents most similar to the query.
"""
results = await self.asimilarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # Langchain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[MetadataFilter] = None,
search_params: Optional[common_types.SearchParams] = None,
offset: int = 0,
score_threshold: Optional[float] = None,
consistency: Optional[common_types.ReadConsistency] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
Returns:
List of documents most similar to the query text and distance for each.
"""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import RestToGrpc
from qdrant_client.http import models as rest
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter):
qdrant_filter = RestToGrpc.convert_filter(qdrant_filter)
response = await self.client.async_grpc_points.Search(
grpc.SearchPoints(
collection_name=self.collection_name,
vector_name=self.vector_name,
vector=embedding,
filter=qdrant_filter,
params=search_params,
limit=k,
offset=offset,
with_payload=grpc.WithPayloadSelector(enable=True),
with_vectors=grpc.WithVectorsSelector(enable=False),
score_threshold=score_threshold,
read_consistency=consistency,
**kwargs,
)
)
return [
(
self._document_from_scored_point_grpc(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in response.result
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
query_embedding, k, fetch_k, lambda_mult, **kwargs
)
@sync_call_fallback
async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return await self.amax_marginal_relevance_search_by_vector(
query_embedding, k, fetch_k, lambda_mult, **kwargs
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
results = await self.amax_marginal_relevance_search_with_score_by_vector(
embedding, k, fetch_k, lambda_mult, **kwargs
)
return list(map(itemgetter(0), results))
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
with_payload=True,
with_vectors=True,
limit=fetch_k,
)
embeddings = [
result.vector.get(self.vector_name) # type: ignore[index, union-attr]
if self.vector_name is not None
else result.vector
for result in results
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
),
results[i].score,
)
for i in mmr_selected
]
@sync_call_fallback
async def amax_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
from qdrant_client import grpc # noqa
from qdrant_client.conversions.conversion import GrpcToRest
response = await self.client.async_grpc_points.Search(
grpc.SearchPoints(
collection_name=self.collection_name,
vector_name=self.vector_name,
vector=embedding,
with_payload=grpc.WithPayloadSelector(enable=True),
with_vectors=grpc.WithVectorsSelector(enable=True),
limit=fetch_k,
)
)
results = [
GrpcToRest.convert_vectors(result.vectors) for result in response.result
]
embeddings: List[List[float]] = [
result.get(self.vector_name) # type: ignore
if isinstance(result, dict)
else result
for result in results
]
mmr_selected: List[int] = maximal_marginal_relevance(
np.array(embedding),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
return [
(
self._document_from_scored_point_grpc(
response.result[i],
self.content_payload_key,
self.metadata_payload_key,
),
response.result[i].score,
)
for i in mmr_selected
]
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from qdrant_client.http import models as rest
result = self.client.delete(
collection_name=self.collection_name,
points_selector=ids,
)
return result.status == rest.UpdateStatus.COMPLETED
@classmethod
def from_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
qdrant = cls._construct_instance(
texts,
embedding,
metadatas,
ids,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
force_recreate,
**kwargs,
)
qdrant.add_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
@sync_call_fallback
async def afrom_texts(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
batch_size: int = 64,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key: API key for authentication in Qdrant Cloud. Default: None
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
"""
qdrant = cls._construct_instance(
texts,
embedding,
metadatas,
ids,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
force_recreate,
**kwargs,
)
await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
def _construct_instance(
cls: Type[Qdrant],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: Optional[str] = VECTOR_NAME,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[common_types.HnswConfigDiff] = None,
optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
wal_config: Optional[common_types.WalConfigDiff] = None,
quantization_config: Optional[common_types.QuantizationConfig] = None,
init_from: Optional[common_types.InitFrom] = None,
force_recreate: bool = False,
**kwargs: Any,
) -> Qdrant:
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from grpc import RpcError
from qdrant_client.http import models as rest
from qdrant_client.http.exceptions import UnexpectedResponse
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
try:
# Skip any validation in case of forced collection recreate.
if force_recreate:
raise ValueError
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is being thrown.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_vector_config = current_vector_config.get(
vector_name
) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
raise QdrantException(
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}." # noqa
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
raise QdrantException(
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size: # type: ignore[union-attr]
raise QdrantException(
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} " # type: ignore[union-attr]
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
raise QdrantException(
f"Existing Qdrant collection is configured for "
f"{current_vector_config.distance} " # type: ignore[union-attr]
f"similarity. Please set `distance_func` parameter to "
f"`{distance_func}` if you want to reuse it. If you want to "
f"recreate the collection, set `force_recreate` parameter to "
f"`True`."
)
except (UnexpectedResponse, RpcError, ValueError):
vectors_config = rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.recreate_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
qdrant = cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
)
return qdrant
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == "COSINE":
return self._cosine_relevance_score_fn
elif self.distance_strategy == "DOT":
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == "EUCLID":
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, "
"max_inner_product, or euclidean"
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
return self.similarity_search_with_score(query, k, **kwargs)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
@classmethod
def _document_from_scored_point_grpc(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
from qdrant_client.conversions.conversion import grpc_to_payload
payload = grpc_to_payload(scored_point.payload)
return Document(
page_content=payload[content_payload_key],
metadata=payload.get(metadata_payload_key) or {},
)
def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter: Optional[DictFilter]
) -> Optional[rest.Filter]:
from qdrant_client.http import models as rest
if not filter:
return None
return rest.Filter(
must=[
condition
for key, value in filter.items()
for condition in self._build_condition(key, value)
]
)
def _embed_query(self, query: str) -> List[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
else:
if self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
raise ValueError("Neither of embeddings or embedding_function is set")
return embeddings
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]:
from qdrant_client.http import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = self._embed_texts(batch_texts)
points = [
rest.PointStruct(
id=point_id,
vector=vector
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
| [] |
2024-01-10 | apigpt-ai/langchain-cn | libs~langchain~tests~integration_tests~vectorstores~qdrant~async_api~test_from_texts.py | import uuid
from typing import Optional
import pytest
from langchain.schema import Document
from langchain.vectorstores import Qdrant
from langchain.vectorstores.qdrant import QdrantException
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import (
qdrant_locations,
)
from tests.integration_tests.vectorstores.qdrant.common import qdrant_is_not_running
@pytest.mark.asyncio
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_duplicated_texts(qdrant_location: str) -> None:
"""Test end to end Qdrant.afrom_texts stores duplicated texts separately."""
collection_name = uuid.uuid4().hex
vec_store = await Qdrant.afrom_texts(
["abc", "abc"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
location=qdrant_location,
)
client = vec_store.client
assert 2 == client.count(collection_name).count
@pytest.mark.asyncio
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("vector_name", [None, "my-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_ids(
batch_size: int, vector_name: Optional[str], qdrant_location: str
) -> None:
"""Test end to end Qdrant.afrom_texts stores provided ids."""
collection_name = uuid.uuid4().hex
ids = [
"fa38d572-4c31-4579-aedc-1960d79df6df",
"cdc1aa36-d6ab-4fb2-8a94-56674fd27484",
]
vec_store = await Qdrant.afrom_texts(
["abc", "def"],
ConsistentFakeEmbeddings(),
ids=ids,
collection_name=collection_name,
batch_size=batch_size,
vector_name=vector_name,
location=qdrant_location,
)
client = vec_store.client
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
@pytest.mark.asyncio
@pytest.mark.parametrize("vector_name", ["custom-vector"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_embeddings_as_named_vectors(
vector_name: str,
qdrant_location: str,
) -> None:
"""Test end to end Qdrant.afrom_texts stores named vectors if name is provided."""
collection_name = uuid.uuid4().hex
vec_store = await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(),
collection_name=collection_name,
vector_name=vector_name,
location=qdrant_location,
)
client = vec_store.client
assert 5 == client.count(collection_name).count
assert all(
vector_name in point.vector # type: ignore[operator]
for point in client.scroll(collection_name, with_vectors=True)[0]
)
@pytest.mark.asyncio
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_reuses_same_collection(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts reuses the same collection"""
collection_name = uuid.uuid4().hex
embeddings = ConsistentFakeEmbeddings()
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
embeddings,
collection_name=collection_name,
vector_name=vector_name,
)
vec_store = await Qdrant.afrom_texts(
["foo", "bar"],
embeddings,
collection_name=collection_name,
vector_name=vector_name,
)
client = vec_store.client
assert 7 == client.count(collection_name).count
@pytest.mark.asyncio
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_dimensionality(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts raises an exception if dimensionality does not
match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=vector_name,
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=vector_name,
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
["first_vector_name", "second_vector_name"],
[
(None, "custom-vector"),
("custom-vector", None),
("my-first-vector", "my-second_vector"),
],
)
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_vector_name(
first_vector_name: Optional[str],
second_vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts raises an exception if vector name does not match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=first_vector_name,
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=second_vector_name,
)
@pytest.mark.asyncio
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_raises_error_on_different_distance() -> None:
"""Test if Qdrant.afrom_texts raises an exception if distance does not match"""
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
distance_func="Cosine",
)
with pytest.raises(QdrantException):
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
distance_func="Euclid",
)
@pytest.mark.asyncio
@pytest.mark.parametrize("vector_name", [None, "custom-vector"])
@pytest.mark.skipif(qdrant_is_not_running(), reason="Qdrant is not running")
async def test_qdrant_from_texts_recreates_collection_on_force_recreate(
vector_name: Optional[str],
) -> None:
"""Test if Qdrant.afrom_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
await Qdrant.afrom_texts(
["lorem", "ipsum", "dolor", "sit", "amet"],
ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name,
vector_name=vector_name,
)
await Qdrant.afrom_texts(
["foo", "bar"],
ConsistentFakeEmbeddings(dimensionality=5),
collection_name=collection_name,
vector_name=vector_name,
force_recreate=True,
)
client = QdrantClient()
assert 2 == client.count(collection_name).count
@pytest.mark.asyncio
@pytest.mark.parametrize("batch_size", [1, 64])
@pytest.mark.parametrize("content_payload_key", [Qdrant.CONTENT_KEY, "foo"])
@pytest.mark.parametrize("metadata_payload_key", [Qdrant.METADATA_KEY, "bar"])
@pytest.mark.parametrize("qdrant_location", qdrant_locations())
async def test_qdrant_from_texts_stores_metadatas(
batch_size: int,
content_payload_key: str,
metadata_payload_key: str,
qdrant_location: str,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = await Qdrant.afrom_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
batch_size=batch_size,
location=qdrant_location,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
| [] |
2024-01-10 | gokamoda/TTA4FactualProbing | src~modules~augmenter.py | import pandas as pd
from tqdm import tqdm
import numpy as np
import torch
import itertools
from omegaconf import DictConfig
import torch.nn.functional as F
from collections import OrderedDict
from torch.utils.data import Dataset, DataLoader
import openai
import re
import os
from textattack.augmentation import Augmenter, WordNetAugmenter
from textattack.transformations import WordSwapEmbedding
import texthero as hero
from modules.models import get_model
tqdm.pandas()
# from pandarallel import pandarallel
# pandarallel.initialize(progress_bar=True)
class PromptDataset(Dataset):
def __init__(self, tokenized_texts, attention_mask):
self.tokenized_texts = tokenized_texts
self.attention_mask = attention_mask
def __len__(self):
return len(self.tokenized_texts)
def __getitem__(self, idx):
return self.tokenized_texts[idx], self.attention_mask[idx]
def split_list(sequence: list, num_cols = None, num_rows = None):
assert sequence != None
sequence_len = len(sequence)
if num_cols != None and num_rows != None:
assert num_cols * num_rows == sequence_len, "need num_cols * num_rows == sequence_len"
if num_cols == None:
assert num_rows != None, "at least one of num_cols or num_rows need to be set"
assert sequence_len % num_rows == 0, "sequence length not multiple of num_rows"
num_cols = int(sequence_len / num_rows)
return [sequence[i:i+num_cols] for i in range(0, sequence_len, num_cols)]
def augment_head(original_df: pd.DataFrame, method_name: str, args: DictConfig) -> pd.DataFrame:
augmenters = {
'back_translation': back_translation,
'word_swapping': word_swapping,
'stopwords_filtering': stopword_filtering,
'openai': openai_paraphrase
}
augmented_prompts: pd.DataFrame = augmenters[method_name](original_df.copy(), **args)
return augmented_prompts
def openai_paraphrase(original_df: pd.DataFrame, model: str, num_return_sequences: int, label: str) -> pd.DataFrame:
args = {
'model': model,
'max_tokens': 1024,
'temperature': 0.8,
'top_p': 1,
'frequency_penalty': 0,
'presence_penalty': 0
}
format_pattern = re.compile(r'^[0-9]+.(.+)')
openai.api_key = os.environ['CHATGPT_API']
def paraphrase(row):
original_prompt = row['prompt']
prompt = "Would you provide 10 paraphrases for the following question?\n{}".format(original_prompt)
response = None
received = False
while not received:
try:
response = openai.Completion.create(
prompt=prompt,
**args
)
received = True
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError: # something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
prompts = response["choices"][0]["text"]
prompts = prompts.strip().split('\n')
prompts = [format_pattern.match(prompt).group(1).strip() for prompt in prompts]
n_more_prompts = num_return_sequences - len(prompts)
prompts += ['']*n_more_prompts
assert len(prompts) == num_return_sequences, 'could not get enough augmented prompts'
row['augmented_prompt'] = prompts
return row
original_df = original_df.apply(paraphrase, axis=1)
augmented_prompts = original_df['augmented_prompt'].to_list()
augmented_prompts = list(itertools.chain.from_iterable(augmented_prompts))
assert len(augmented_prompts) == num_return_sequences * original_df.shape[0], 'could not get enough augmented prompts'
fact_id = [x for x in range(0, original_df.shape[0]) for _ in range(num_return_sequences)]
augmented_prompts_df = pd.DataFrame({'fact_id': fact_id, 'score': 1, 'prompt': augmented_prompts, 'label': label})
return augmented_prompts
def word_swapping(original_df: pd.DataFrame, type: str, num_return_sequences: int, label: str) -> pd.DataFrame:
augmenters = {
'wordswap': Augmenter(transformation = WordSwapEmbedding()),
'wordnet': WordNetAugmenter(pct_words_to_swap=0.2)
}
augmenter = augmenters[type]
augmenter.transformations_per_example = num_return_sequences
def augment(row):
augment_result = augmenter.augment(row['prompt'])
augmented_scores = [1]*len(augment_result)
if len(augment_result) < num_return_sequences:
n_more = num_return_sequences - len(augment_result)
augment_result += ['']*n_more
augmented_scores+= [0]*n_more
row['augmented_prompt'] = augment_result
row['augmented_scores'] = augmented_scores
return row
original_df = original_df.apply(augment, axis=1)
augmented_prompts = original_df['augmented_prompt'].to_list()
augmented_prompts = list(itertools.chain.from_iterable(augmented_prompts))
augmented_scores = original_df['augmented_scores'].to_list()
augmented_scores = list(itertools.chain.from_iterable(augmented_scores))
assert len(augmented_prompts) == num_return_sequences * original_df.shape[0], 'could not get enough augmented prompts'
fact_id = [x for x in range(0, original_df.shape[0]) for _ in range(num_return_sequences)]
augmented_prompts_df = pd.DataFrame({'fact_id': fact_id, 'score': augmented_scores, 'prompt': augmented_prompts, 'label': label})
return augmented_prompts_df
def stopword_filtering(original_df: pd.DataFrame, num_return_sequences: int, label: str):
original_prompts = original_df['prompt']
augmented_prompts = hero.remove_stopwords(original_prompts)
augmented_prompts = hero.remove_diacritics(augmented_prompts)
augmented_prompts_df = pd.DataFrame({
'fact_id': list(range(original_df.shape[0])),
'score': 1,
'prompt': augmented_prompts,
'label': label
})
return augmented_prompts_df
def back_translation(original_df: pd.DataFrame, target_language: str, num_return_sequences: int, label: str):
print(original_df.shape[0])
sequence_per_transform = num_return_sequences * 2
lm_src2tar = {
'family': 'marianmt',
'label': 'en-{}'.format(target_language),
'model_path': 'Helsinki-NLP/opus-mt-en-{}'.format(target_language),
'device_map': 'auto'
}
lm_tar2src = {
'family': 'marianmt',
'label': '{}-en'.format(target_language),
'model_path': 'Helsinki-NLP/opus-mt-{}-en'.format(target_language),
'device_map': 'auto'
}
beam_search_args = {
"do_sample": False, # do greedy or greedy beam-search
"output_scores": True,
"return_dict_in_generate": True,
"num_beams": sequence_per_transform, # beam-search if >2
"num_return_sequences": sequence_per_transform, # need to be <= num_beams
"max_new_tokens": 100,
}
original_prompts = original_df['prompt'].to_list()
tar_result = translate(
texts=original_prompts,
model_args=lm_src2tar,
generation_args=beam_search_args,
batch_size=16
)
## tar to src translation
back_result = translate(
tar_result["texts"],
model_args=lm_tar2src,
generation_args=beam_search_args,
batch_size=16
)
## aggregate backtranslation
final_texts = []
final_scores = []
back_result_per_fact_text = split_list(back_result["texts"], num_cols=sequence_per_transform ** 2)
back_result_per_fact_score = np.reshape(back_result["scores"], [-1, sequence_per_transform])
back_result_per_fact_score = np.reshape((back_result_per_fact_score.T * tar_result["scores"]).T, [-1, sequence_per_transform**2])
for i in range(len(back_result_per_fact_text)):
aggregated_backtranslation = aggregate(
texts = back_result_per_fact_text[i],
scores=back_result_per_fact_score[i],
score_max= 1.0
)
if len(aggregated_backtranslation["texts"]) < num_return_sequences:
n_more = num_return_sequences - len(aggregated_backtranslation["texts"])
aggregated_backtranslation["texts"] += [""]*n_more
aggregated_backtranslation["scores"] += [0.0]*n_more
final_texts += aggregated_backtranslation["texts"][:num_return_sequences]
final_scores += aggregated_backtranslation["scores"][:num_return_sequences]
fact_id = [x for x in range(0, original_df.shape[0]) for _ in range(num_return_sequences)]
augmented_prompts_df = pd.DataFrame({
'fact_id': fact_id,
'score': final_scores,
'prompt': final_texts,
'label': label
})
return augmented_prompts_df
def translate(
texts: list,
model_args: dict,
generation_args: dict,
batch_size: int
):
model, tokenizer = get_model(model_args)
result = generate(
texts=texts,
model=model,
tokenizer=tokenizer,
generation_args=generation_args,
batch_size=batch_size
)
return result
def generate(
texts: list,
model,
tokenizer,
generation_args: dict,
batch_size: int
):
tokenized_inputs = tokenizer(
texts,
return_tensors="pt",
padding=True
)
prompt_dataset = PromptDataset(tokenized_inputs.input_ids, tokenized_inputs.attention_mask)
prompt_loader = DataLoader(prompt_dataset, batch_size=batch_size, shuffle=False)
generated_tensors = []
generation_scores = []
model.eval()
for _, batch in enumerate(tqdm(prompt_loader)):
input_ids, attention_mask = batch
output = model.generate(
input_ids = input_ids.to(model.device),
attention_mask = attention_mask.to(model.device),
**generation_args
)
generated_tensors.append(tokenizer.batch_decode(output.sequences, skip_special_tokens=True))
generation_scores.append(output.sequences_scores.to("cpu").detach().numpy())
generated_tensors = list(itertools.chain.from_iterable(generated_tensors))
generation_scores = np.exp(np.hstack(generation_scores))
return {"texts": generated_tensors, "scores": generation_scores}
def aggregate(texts: list, scores:list, score_max=None, method = 'sum'):
if score_max != None:
assert isinstance(score_max, float), "score_max need to be a float"
aggregated_result = {}
for text, score in zip(texts, scores):
if method == 'sum':
aggregated_result[text] = aggregated_result.get(text, 0) + score
elif method == 'count':
aggregated_result[text] = aggregated_result.get(text, 0) + 1
aggregated_result = OrderedDict(sorted(aggregated_result.items(), key=lambda x: x[1], reverse=True))
text_list = list(aggregated_result.keys())
score_list = list(aggregated_result.values())
if score_max != None:
score_list = list(score_list / score_list[0] * score_max)
return {"texts": text_list, "scores": score_list} | [
"augmented_prompt",
"\n",
"Would you provide 10 paraphrases for the following question?\nPLACEHOLDER"
] |
2024-01-10 | Shubhamsaboo/kairos_gpt3 | GPT-3_Sandbox~email_generation~model_training_service.py | from training_data import defaultPrompt
from const import API_KEY
import openai
def set_openai_key(key):
"""Sets OpenAI key."""
openai.api_key = key
class Code:
def __init__(self):
print("Model Intilization--->")
#set_openai_key(API_KEY)
def query(self, prompt, myKwargs={}):
"""
wrapper for the API to save the prompt and the result
"""
# arguments to send the API
kwargs = {
"engine": "text-davinci-003",
"temperature": 0.50,
"max_tokens": 70,
"best_of": 2,
"stop": ["Input:"]
}
for kwarg in myKwargs:
kwargs[kwarg] = myKwargs[kwarg]
r = openai.Completion.create(prompt=prompt, **kwargs)["choices"][0]["text"].strip()
return r
def model_prediction(self, input, api_key):
"""
wrapper for the API to save the prompt and the result
"""
# Setting the OpenAI API key got from the OpenAI dashboard
set_openai_key(api_key)
output = self.query(defaultPrompt.format(input))
return output | [] |
2024-01-10 | kalebbroo/Str-AI-hd_von_ZaroBot | strahd.py | import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
import openai
from discord.ext.commands.errors import ExtensionAlreadyLoaded
load_dotenv()
token = os.getenv('DISCORD_TOKEN')
#gpt_token = os.getenv('GPT_TOKEN')
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='!', intents=intents)
#openai.api_key = gpt_token
#openai.Model.list()
async def load_extensions():
for filename in os.listdir('./core'):
if filename.endswith('.py'):
try:
await bot.load_extension(f'core.{filename[:-3]}')
except ExtensionAlreadyLoaded:
pass
@bot.event
async def on_ready():
print(f"Rising from the grave as {bot.user.name}")
await load_extensions()
fmt = await bot.tree.sync()
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=f"with your mind"))
#print(f"synced {len(fmt)} commands")
print(f"Loaded: {len(bot.cogs)} core files")
@bot.event
async def on_command_error(ctx, error):
# handle your errors here
if isinstance(error, commands.CommandNotFound):
await ctx.send(f"Command not found. Use {bot.command_prefix}help to see available commands.")
else:
print(f'Error occurred: {error}')
if __name__ == "__main__":
bot.run(token) | [] |
2024-01-10 | deyh2020/scikit-llm | skllm~models~gpt_zero_shot_clf.py | from typing import Optional, Union, List, Any
import numpy as np
import pandas as pd
from collections import Counter
import random
from tqdm import tqdm
from abc import ABC, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin
from skllm.openai.prompts import get_zero_shot_prompt_slc, get_zero_shot_prompt_mlc
from skllm.openai.chatgpt import (
construct_message,
get_chat_completion,
extract_json_key,
)
from skllm.config import SKLLMConfig as _Config
from skllm.utils import to_numpy as _to_numpy
from skllm.openai.mixin import OpenAIMixin as _OAIMixin
class _BaseZeroShotGPTClassifier(ABC, BaseEstimator, ClassifierMixin, _OAIMixin):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
):
self._set_keys(openai_key, openai_org)
self.openai_model = openai_model
def _to_np(self, X):
return _to_numpy(X)
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: Union[np.ndarray, pd.Series, List[str], List[List[str]]],
):
X = self._to_np(X)
self.classes_, self.probabilities_ = self._get_unique_targets(y)
return self
def predict(self, X: Union[np.ndarray, pd.Series, List[str]]):
X = self._to_np(X)
predictions = []
for i in tqdm(range(len(X))):
predictions.append(self._predict_single(X[i]))
return predictions
@abstractmethod
def _extract_labels(self, y: Any) -> List[str]:
pass
def _get_unique_targets(self, y):
labels = self._extract_labels(y)
counts = Counter(labels)
total = sum(counts.values())
classes, probs = [], []
for l, c in counts.items():
classes.append(l)
probs.append(c / total)
return classes, probs
def _get_chat_completion(self, x):
prompt = self._get_prompt(x)
msgs = []
msgs.append(construct_message("system", "You are a text classification model."))
msgs.append(construct_message("user", prompt))
completion = get_chat_completion(
msgs, self._get_openai_key(), self._get_openai_org(), self.openai_model
)
return completion
class ZeroShotGPTClassifier(_BaseZeroShotGPTClassifier):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
):
super().__init__(openai_key, openai_org, openai_model)
def _extract_labels(self, y: Any) -> List[str]:
if isinstance(y, (pd.Series, np.ndarray)):
labels = y.tolist()
else:
labels = y
return labels
def _get_prompt(self, x) -> str:
return get_zero_shot_prompt_slc(x, self.classes_)
def _predict_single(self, x):
completion = self._get_chat_completion(x)
try:
label = str(
extract_json_key(completion.choices[0].message["content"], "label")
)
except Exception as e:
label = ""
if label not in self.classes_:
label = random.choices(self.classes_, self.probabilities_)[0]
return label
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: Union[np.ndarray, pd.Series, List[str]],
):
y = self._to_np(y)
return super().fit(X, y)
class MultiLabelZeroShotGPTClassifier(_BaseZeroShotGPTClassifier):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
max_labels: int = 3,
):
super().__init__(openai_key, openai_org, openai_model)
if max_labels < 2:
raise ValueError("max_labels should be at least 2")
self.max_labels = max_labels
def _extract_labels(self, y) -> List[str]:
labels = []
for l in y:
for j in l:
labels.append(j)
return labels
def _get_prompt(self, x) -> str:
return get_zero_shot_prompt_mlc(x, self.classes_, self.max_labels)
def _predict_single(self, x):
completion = self._get_chat_completion(x)
try:
labels = extract_json_key(completion.choices[0].message["content"], "label")
if not isinstance(labels, list):
raise RuntimeError("Invalid labels type, expected list")
except Exception as e:
labels = []
labels = list(filter(lambda l: l in self.classes_, labels))
if len(labels) > self.max_labels:
labels = labels[: self.max_labels - 1]
elif len(labels) < 1:
labels = [random.choices(self.classes_, self.probabilities_)[0]]
return labels
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: List[List[str]],
):
return super().fit(X, y)
| [] |
2024-01-10 | chaliy/play-llamaindex | api_query.py | # %%
import nest_asyncio
nest_asyncio.apply()
from llama_index import download_loader
download_loader("GithubRepositoryReader")
from dotenv import load_dotenv
load_dotenv()
import os
import logging
import sys
import pickle
from pathlib import Path
import faiss
from llama_index import (
VectorStoreIndex,
load_index_from_storage,
get_response_synthesizer,
StorageContext,
ServiceContext
)
from llama_index.vector_stores.faiss import FaissVectorStore
from llama_hub.github_repo import GithubClient, GithubRepositoryReader
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
DATA_PATH = Path("./data")
def get_service_context(preset: str):
if preset == "bedrock":
from llama_index.llms import Bedrock
from llama_index.embeddings import BedrockEmbedding
# Requires AWS Environment Variables
return ServiceContext.from_defaults(
llm=Bedrock(
model="anthropic.claude-v2"
),
embed_model=BedrockEmbedding.from_credentials()
)
elif preset == "openai":
from llama_index.llms import OpenAI
# Requires OpenAI/Azure OpenAI Environment Variables
return ServiceContext.from_defaults(
llm=OpenAI(temperature=0.1, model="gpt-4")
)
raise ValueError(f"Unknown preset: {preset}")
def load_documents():
documents_pkl_path = DATA_PATH / "local-api-documents.pkl"
if os.path.exists(documents_pkl_path):
with open(documents_pkl_path, "rb") as f:
return pickle.load(f)
github_client = GithubClient()
loader = GithubRepositoryReader(
github_client,
owner = "gpsinsight",
repo = "api-v2-documentation",
filter_file_extensions = ([".md"], GithubRepositoryReader.FilterType.INCLUDE),
verbose = True,
concurrent_requests = 1,
)
documents = loader.load_data(branch="master")
with open(documents_pkl_path, "wb") as f:
pickle.dump(documents, f)
return documents
def get_or_build_index(service_context, preset: str):
faiss_index = faiss.IndexFlatL2(1536)
index_path = DATA_PATH / f"local-api-index-{preset}"
if os.path.exists(index_path):
vector_store = FaissVectorStore.from_persist_dir(index_path)
storage_context = StorageContext.from_defaults(
vector_store=vector_store, persist_dir=index_path
)
return load_index_from_storage(
storage_context=storage_context,
service_context=service_context,
)
documents = load_documents()
vector_store = FaissVectorStore(faiss_index=faiss_index)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
service_context=service_context
)
index.storage_context.persist(DATA_PATH / "local-api-index")
return index
preset = os.getenv("PRESET", "openai")
service_context = get_service_context(preset)
index = get_or_build_index(service_context, preset)
response_synthesizer = get_response_synthesizer(
service_context=service_context,
response_mode="compact",
verbose=True,
)
query_engine = index.as_query_engine(
response_synthesizer=response_synthesizer
)
def qa(question: str):
print(f"\033[94mQ: {question}\033[0m")
response = query_engine.query(question)
print(f"\033[92mA: {response}\033[0m")
# %%
qa("Can you please write code to get all alerts? In python.")
# %%
qa("Please write code to get alerts from last 40 days. In python.")
# %%
| [] |
2024-01-10 | ketwong/groceries_io | dev_vision.py | import base64
import requests
import os
from openai import OpenAI
api_key = os.environ.get("OPENAI_API_KEY")
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Path to your image
image_path = "image.JPG"
# Getting the base64 string
base64_image = encode_image(image_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Determine what is in the image with the object name only, i.e. 'Banana', 'Orange', 'Bread' etc..."
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail": "low"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
#print(response.json())
content = response.json()['choices'][0]['message']['content']
print(content)
| [
"[{'type': 'text', 'text': \"Determine what is in the image with the object name only, i.e. 'Banana', 'Orange', 'Bread' etc...\"}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER', 'detail': 'low'}}]"
] |
2024-01-10 | ketwong/groceries_io | dev_api.py | import os
import time
from openai import OpenAI
# Start the timer
start_time = time.time()
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
# Create a random message prompt
prompt = "Write a creative and random message about the day in the life of a space-traveling cat."
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt,
}
],
model="gpt-4", # Assuming GPT-4 is available and this is the correct identifier
max_tokens=50 # Limiting the response to 50 tokens
)
# Extract and print the response message
response_message = chat_completion.choices[0].message.content
print(response_message)
# Stop the timer and print the elapsed time
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time: {elapsed_time} seconds")
| [
"Write a creative and random message about the day in the life of a space-traveling cat."
] |
2024-01-10 | ketwong/groceries_io | dev_visionMulti.py | from openai import OpenAI
import os
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
)
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What are in these images? Is there any difference between them?",
},
{
"type": "image_url",
"image_url": {
"url": "https://contents.mediadecathlon.com/p1875564/k$4eec0a36fb3a9b4124fd59e676fc3a0d/sq/8529877.jpg",
},
},
{
"type": "image_url",
"image_url": {
"url": "https://contents.mediadecathlon.com/p2254190/k$b6432c2fb00743221776def4824206c1/sq/8558559.jpg",
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0]) | [
"[{'type': 'text', 'text': 'What are in these images? Is there any difference between them?'}, {'type': 'image_url', 'image_url': {'url': 'https://contents.mediadecathlon.com/p1875564/k$4eec0a36fb3a9b4124fd59e676fc3a0d/sq/8529877.jpg'}}, {'type': 'image_url', 'image_url': {'url': 'https://contents.mediadecathlon.com/p2254190/k$b6432c2fb00743221776def4824206c1/sq/8558559.jpg'}}]"
] |
2024-01-10 | armans-code/schedulr | ml-service~ml_tag_generator.py | import os
import openai
import json
import random
# If it doesn't work, open "Install Certificates.command"
# import nltk
# nltk.download('wordnet')
# nltk.download('omw-1.4')
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
# import nltk
# nltk.download('punkt')
from collections import defaultdict
import pprint
openai.api_key = os.environ['OPENAI_API_KEY']
def text_parser_synonym_finder(text: str):
tokens = word_tokenize(text)
# print(tokens)
synonyms = defaultdict(list)
for token in tokens:
for syn in wordnet.synsets(token):
for i in syn.lemmas():
synonyms[token].append(i.name())
return synonyms
def generate_tags(textInput):
# Keywords Parameters
response = openai.Completion.create(
model="text-davinci-002",
prompt="Extract keywords from this text:\n\n" + textInput,
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
# Find all Keywords/Phrases
keywords = response["choices"][0]["text"].lower()
index = keywords.index("\n\n")
keywords = keywords[index + 2:]
# print(keywords)
# Move Keywords into Array
keywordsArray = keywords.split()
for x in range(len(keywordsArray)):
keywordsArray[x] = ''.join(letter for letter in keywordsArray[x] if letter.isalnum())
keywordsNoFormatting = ""
for x in range(len(keywordsArray)):
keywordsNoFormatting += keywordsArray[x] + " "
keywordsNoFormatting = keywordsNoFormatting.strip()
# print(keywordsArray)
# Run Function to find synonyms with keywords
synonymsDictionary = text_parser_synonym_finder(text=keywordsNoFormatting)
# print(synonymsDictionary)
# print(keywordsArray)
# print(keywordsArray)
# x is keyword; add 2 synonyms from each word
shortlistSynonyms = []
for x in keywordsArray:
# Remove Duplicate Synonyms
synonymsOfWord = synonymsDictionary.get(x)
allSynonyms = {}
if synonymsOfWord is not None:
allSynonyms = list(dict.fromkeys(synonymsDictionary.get(x)))
for y in range(len(allSynonyms)):
allSynonyms[y] = allSynonyms[y].lower()
#print(allSynonyms)
# Remove Key Word is Present
if x in allSynonyms:
allSynonyms.remove(x)
# Get 2 random synonyms if 2 or more synonyms present, get 1 synonym if 1 present, leave alone if 0 synonyms
if len(allSynonyms) >= 2:
firstRandom = random.randint(0, len(allSynonyms) - 1)
secondRandom = firstRandom
while secondRandom == firstRandom:
secondRandom = random.randint(0, len(allSynonyms) - 1)
shortlistSynonyms.append(allSynonyms[firstRandom])
shortlistSynonyms.append(allSynonyms[secondRandom])
elif len(allSynonyms) >= 1:
shortlistSynonyms.append(allSynonyms[0])
# print(allSynonyms)
# print(shortlistSynonyms)
allKeywordsAndRelated = []
for x in keywordsArray:
allKeywordsAndRelated.append(x)
for x in shortlistSynonyms:
allKeywordsAndRelated.append(x)
return allKeywordsAndRelated | [
"Extract keywords from this text:\n\nPLACEHOLDER"
] |
2024-01-10 | argha48/smarthotels | models~reviewcompare.py | import nltk
import pandas as pd
import numpy as np
import re
import codecs
import json
import re
import numpy as np
import pandas as pd
from pprint import pprint
import pickle
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.models import Phrases
from gensim.corpora import Dictionary, MmCorpus
from gensim.models.word2vec import LineSentence
from gensim.models.ldamulticore import LdaMulticore
# spacy for lemmatization
import spacy
# NLTK for text cleaning
from nltk.tokenize import sent_tokenize, word_tokenize, RegexpTokenizer
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords, names
from nltk.tokenize import RegexpTokenizer
from nltk import tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# TextBlob package for translation and spelling correction
from textblob import TextBlob
nlp = spacy.load('en')
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
### Step 1: Initialization.
# Set up tokenizer.
tokenizer = RegexpTokenizer(r'\w+')
# Set up stop words.
en_stop = set(stopwords.words('english'))
# Set up stemmer.
p_stemmer = PorterStemmer()
# For sentiment analysis
senti = SentimentIntensityAnalyzer()
# Load saved models.
# I trained three LDA models from step 5, they have slightly different topics.
# I trained three models because there is randomness in LDA training.
ldamodel1 = pickle.load(open("PATH/model/lda_1.pickle", "rb"))
dict1 = pickle.load(open("PATH/model/dictionary_1.pickle", "rb"))
corpus1 = pickle.load(open("PATH/model/corpus_1.pickle", "rb"))
# # Topic dictionary
# lda_topics={
# 0:'Parking Service',
# 1:'Sleep Quality',
# 2:'WiFi',
# 3:'Online Service',
# 4:'Overall Experience',
# 5:'Value for Money',
# 6:'Swimming Pool/Spa',
# 7:'Front Desk Service',
# 8:'Food', ''
# 9:'Cleanliness',
# 10:'Surroundings',
# 11:'Distance from Transportation',
# 12:'Booking Experience',
# 13:'Hotel Staff',
# 14:'Early Check-in/Late Check-out',
# 15:'Noise'
# }
mallet_lda_topics={
0:'Hotel Staff',
1:'Accessibility',
2:'Food',
3:'Overall Experience',
4:'Noise',
5:'Value for Money',
6:'Room Amenities',
7:'Location in the city',
8:'Overall Experience',
9:'Cleanliness',
10:'Early Check-in/Late Check-out',
11:'Health and Wellness Amenities',
12:'Booking Experience',
13:'Sleep Quality',
14:'Parking Facility'
}
# helper functions for text preprocessing & LDA modeling:
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space or token.like_num or token.is_digit
def line_review(filename):
"""
generator function to read in reviews from Pandas Series
and un-escape the original line breaks in the text
"""
#with codecs.open(filename, encoding='utf_8') as f:
for review in filename:
yield review.replace('\\n', '\n')
def lemmatized_sentence_corpus(filename):
"""
generator function to use spaCy to parse reviews,
lemmatize the text, and yield sentences
"""
for parsed_review in nlp.pipe(line_review(filename), batch_size=10000, n_threads=10):
for sent in parsed_review.sents:
yield u' '.join([token.lemma_ for token in sent
if not punct_space(token)])
def trigram_bow_generator(filepath):
"""
generator function to read reviews from a file
and yield a bag-of-words representation
"""
# load finished dictionary from disk
trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
for review in LineSentence(filepath):
yield trigram_dictionary.doc2bow(review)
def LDA_Review(review_df,min_topic_freq=0):
"""
Takes Pandas series as input,
consisting of one review as
text string per row
"""
from tqdm import tqdm
"""
accept the original text of a review and (1) parse it with spaCy,
(2) apply text pre-proccessing steps, (3) create a bag-of-words
representation, (4) create an LDA representation, and
(5) print a sorted list of the top topics in the LDA representation
"""
text = review_df['FullReview']
# parse the review text with spaCy
with codecs.open('./uni_temporary.txt', 'w', encoding='utf_8') as f:
for sentence in tqdm(lemmatized_sentence_corpus(text)):
# print(sentence)
f.write(sentence + '\n')
f.close()
# load and apply the first-order and secord-order phrase models
bigram_model = Phrases.load('./models2/bigram_model.txt')
trigram_model = Phrases.load('./models2/trigram_model.txt')
unigram_review = LineSentence('./uni_temporary.txt')
bigram_review = bigram_model[unigram_review]
trigram_review = trigram_model[bigram_review]
# remove any remaining stopwords
trigram_review = [term for term in trigram_review
if term not in spacy.lang.en.stop_words.STOP_WORDS]
with codecs.open('./tri_temporary.txt', 'w', encoding='utf_8') as ftri:
for sentence in trigram_review:
sentence = u' '.join(sentence)
ftri.write(sentence + '\n')
ftri.close()
trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
lda = LdaMulticore.load('./models2/lda_model')
trigram_review = LineSentence('./tri_temporary.txt')
# create a bag-of-words representation
review_bow = trigram_dictionary.doc2bow(trigram_review)
# create an LDA representation
review_lda = lda.get_document_topics(review_bow)
review_lda = sorted(review_lda, key=itemgetter(1),reverse=True)
for topic_number, freq in review_lda:
if freq < min_topic_freq:
break
# print the most highly related topic names and frequencies
print('{:25} {}'.format(lda_topics[topic_number], round(freq, 3)))
### Step 2: Generate the contents of the doctors' snapshots.
counter = 0
# The temporary string that stores all of the review highlights in each round of the for loop below.
big_str = []
# For every doctor, find two things:
# 1. The most mentioned FIVE topics in their reviews.
# 1.1 The sentiments of these topics.
# 2. The 3 most positive sentences and the 3 most negative sentences.
# 2.1 Rank all sentences according to sentiment analysis.
# I do NOT keep info about individual reviews. All sentences are stored in a
# long list regardless of whether they are from the same reviews or not!
###########################################################################
# Build sentence dataframe for the current doctor.
###########################################################################
this_hotel = pd.DataFrame(columns = ["HotelName","Sentence", "Sentiment_neg",
"Sentiment_neu","Sentiment_pos",
"Sentiment_compound", "topic_1", "topic1_score",
"topic_2", "topic2_score"])
sent_count = 0
# For every review sentence
for sentence in unigram_review:
# Assess sentiment.
sentiments = senti.polarity_scores(sentence)
sentiment_neg = sentiments["neg"]
sentiment_neu = sentiments["neu"]
sentiment_pos = sentiments["pos"]
sentiment_compound = sentiments["compound"]
# Assign topic.
# Default topic to -1.
this_topic = -1
# Preprocess sentence.
sent_tokens = tokenizer.tokenize(str(sentence).lower())
cleaned_sent = [p_stemmer.stem(i) for i in sent_tokens]
# Evaluate for topic.
sent_topics = []
for mod_id in range(0, mod_num):
model = ldamodel[mod_id]
dicti = dictionary[mod_id]
lda_score = model[dicti.doc2bow(cleaned_sent)]
for item in lda_score:
sent_topics.append((mod_id, item[0], item[1]))
sent_topics = sorted(sent_topics, key=lambda x: x[2], reverse=True)
# Assign the most relevant topic to a sentence only if the topic is more than 70% dominant.
if sent_topics[0][2] > 0.7:
this_topic = topics_matrix[sent_topics[0][0]][sent_topics[0][1]]
# Add procressed sentence and its meta information to the sentence dataframe.
this_doc.loc[sent_count] = [sentence, sentiment, this_topic, sent_topics[0][2]]
sent_count += 1
###########################################################################
# Compiling results for a hotel.
###########################################################################
# Review highlights.
# Save the most positive and negative sentiments.
this_doc2 = this_doc.sort_values(["sentiment"], ascending=[0]).reset_index(drop=True)
this_doc2 = this_doc2.loc[this_doc2["topic"] != -1].reset_index(drop=True)
this_doc2 = this_doc2.loc[this_doc2["topic_score"] > 0.5].reset_index(drop=True)
sent_count_2 = len(this_doc2)
composite = "NONE"
# Save the most polarizing sentiments only if there are at least 6 sentences.
if sent_count_2 > 5:
sent1 = sent2 = sent3 = sent4 = sent5 = sent6 = ""
# Only keep positive sentiment if its score is above 0.4 (within [-1, 1]).
if this_doc2.loc[0]["sentiment"] > 0.4:
sent1 = this_doc2.loc[0]["sentence"]
if this_doc2.loc[1]["sentiment"] > 0.4:
sent2 = this_doc2.loc[1]["sentence"]
if this_doc2.loc[2]["sentiment"] > 0.4:
sent3 = this_doc2.loc[2]["sentence"]
# Only keep positive sentiment if its score is below -0.2 (within [-1, 1]).
if this_doc2.loc[sent_count_2-1]["sentiment"] < -0.2:
sent4 = this_doc2.loc[sent_count_2-1]["sentence"]
if this_doc2.loc[sent_count_2-2]["sentiment"] < -0.2:
sent5 = this_doc2.loc[sent_count_2-2]["sentence"]
if this_doc2.loc[sent_count_2-3]["sentiment"] < -0.2:
sent6 = this_doc2.loc[sent_count_2-3]["sentence"]
composite = sent1 + "SSEEPP" + sent2 + "SSEEPP" + sent3 + "SSEEPP" + sent4 + "SSEEPP" + sent5 + "SSEEPP" + sent6 + "SSEEPP" + str(sent_count)
# Add review highlights to the doctor dataframe.
doctor_info.set_value(doctor_id, "summary", composite)
# Top topics and their ratings.
# Ratings are the percent positive sentences belonging to a topic.
doc_topics = [ [ 0 for i in range(2) ] for j in range(topic_num) ] # [total count, count positive]
for index2 in range(0, len(this_doc2)):
topic_index = this_doc2.loc[index2]["topic"]
if topic_index != -1:
doc_topics[topic_index][0] += 1
topic_sentiment = this_doc2.loc[index2]["sentiment"]
# A topic sentence if positive if its sentiment is bigger than 0.1.
if topic_sentiment > 0.1:
doc_topics[topic_index][1] += 1
# Do not display dentist stuff for non-dentist
if not is_dentist:
doc_topics[3][0] = 0
# Do not output "positive comment" as a topic. It is non-informative.
doc_topics[0][0] = 0
# Putting the results into a format to be sparsed by the webapp.
doc_topic_tuples = []
for index3, item in enumerate(doc_topics):
doc_topic_tuples.append((index3, item[0], item[1]))
doc_topic_tuples = sorted(doc_topic_tuples, key=lambda x: x[1], reverse=True)
for index4 in range(0, 5):
if doc_topic_tuples[index4][1] >= 10:
topic_name = topics[doc_topic_tuples[index4][0]][0]
percent_positive = str(int(doc_topic_tuples[index4][2]/doc_topic_tuples[index4][1] * 100))
composite = topic_name + "SSEEPP" + percent_positive + "SSEEPP" + str(doc_topic_tuples[index4][1])
doctor_info.set_value(doctor_id, "percent{0}".format(str(index4+1)), composite)
print(topic_name, "XXXXXX", doctor_info.loc[doctor_id]["specialty"])
big_str.append(topic_name + "XXXXXX" + str(doctor_info.loc[doctor_id]["specialty"]))
else:
doctor_info.set_value(doctor_id, "percent{0}".format(str(index4+1)), "NONE")
# Print progress.
print(counter/5088)
counter += doctor_review_count
del this_doc
del this_doc2
# Save the updated doctor dataframe containing snapshot information.
doctor_info.to_csv("PATH/result/all_doctors.csv")
| [] |
2024-01-10 | argha48/smarthotels | models~ngram.py | import nltk
import pandas as pd
import numpy as np
import re
import codecs
import json
import re
import numpy as np
import pandas as pd
from pprint import pprint
import _pickle as pickle
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.models import Phrases
from gensim.corpora import Dictionary, MmCorpus
from gensim.models.word2vec import LineSentence
from gensim.models.ldamulticore import LdaMulticore
# spacy for lemmatization
import spacy
from nltk.tokenize import sent_tokenize, word_tokenize, RegexpTokenizer
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords, names
nlp = spacy.load('en')
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
def get_text(rev):
return rev[0] if str(rev)!='nan' else ''
def clean_text(text):
import re, string
regex = re.compile('[%s]' % re.escape(string.punctuation.replace('.','')))
text = regex.sub('', text)
text = re.sub('/', ' ', text)
# text = re.sub('\s+', ' ', text)
# text = re.sub("\'", "", text)
return text.lower()
import spacy
spacy.load('en')
from spacy.lang.en import English
parser = English()
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
else:
lda_tokens.append(token.lower_)
return lda_tokens
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
from nltk.stem.wordnet import WordNetLemmatizer
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def spell_correction(text):
from textblob import TextBlob
return TextBlob(text).correct()
def prepare_text_for_lda(text):
tokens = tokenize(text)
# tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
return tokens
# %%time
# alldata = pd.DataFrame(pd.concat([nydata['FullReview'],calidata['FullReview']],ignore_index=True))
# alldata.to_json('all_reviews.json', orient='columns')
alldata = pd.read_json('all_reviews.json',orient='columns', encoding='utf-8')
# alldata.sample()
# helper functions for text preprocessing & LDA modeling:
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space or token.like_num or token.is_digit
def line_review(filename):
"""
generator function to read in reviews from Pandas Series
and un-escape the original line breaks in the text
"""
#with codecs.open(filename, encoding='utf_8') as f:
for review in filename:
yield review.replace('\\n', '\n')
def lemmatized_sentence_corpus(filename):
"""
generator function to use spaCy to parse reviews,
lemmatize the text, and yield sentences
"""
for parsed_review in nlp.pipe(line_review(filename), batch_size=10000, n_threads=10):
for sent in parsed_review.sents:
yield u' '.join([token.lemma_ for token in sent
if not punct_space(token)])
def trigram_bow_generator(filepath):
"""
generator function to read reviews from a file
and yield a bag-of-words representation
"""
# load finished dictionary from disk
trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
for review in LineSentence(filepath):
yield trigram_dictionary.doc2bow(review)
def reviewPreProcess(text):
"""
Takes Pandas series as input,
consisting of one review as
text string per row
"""
from tqdm import tqdm
# lemmatized_sentence_corpus generator loops over original review text, segments the reviews into individual sentences
# and normalizes text. Writes data to new file with one normalized sentence per line:
# with codecs.open('./models2/unigram_sentences_p5.txt', 'a', encoding='utf_8') as f:
# for sentence in tqdm(lemmatized_sentence_corpus(text)):
# # print(sentence)
# f.write(sentence + '\n')
# f.close()
# Create object to stream unigram sentences from disk, rather than hold in memory:
# unigram_sentences = LineSentence('./models2/unigram_sentences.txt')
#
# # Train phrase model to link individual words into two-word phrases:
# bigram_model = Phrases(unigram_sentences)
# bigram_model.save('./models2/bigram_model.txt')
#
# # Apply trained bigram phrase model to the review sentences data:
# with codecs.open('./models2/bigram_sentences.txt', 'w', encoding='utf_8') as f:
# for unigram_sentence in tqdm(unigram_sentences):
# bigram_sentence = u' '.join(bigram_model[unigram_sentence])
# f.write(bigram_sentence + '\n')
#
# # Create object to stream bigram sentences from disk, rather than hold in memory:
# bigram_sentences = LineSentence('./models2/bigram_sentences.txt')
#
# # Train second-order phrase model to to generate trigrams:
# trigram_model = Phrases(bigram_sentences)
# trigram_model.save('./models2/trigram_model.txt')
#
# # Apply trained second-order phrase model to our first-order transformed sentences and write to a new file:
# with codecs.open('./models2/trigram_sentences.txt', 'w', encoding='utf_8') as f:
# for bigram_sentence in tqdm(bigram_sentences):
# trigram_sentence = u' '.join(trigram_model[bigram_sentence])
# f.write(trigram_sentence + '\n')
# Run complete text of the reviews through a pipeline that applies text normalization and phrase models.
# Also remove stopwords and write transformed text to a new file, one review per line:
# bigram_model = Phrases.load('./models2/bigram_model.txt')
# trigram_model = Phrases.load('./models2/trigram_model.txt')
# with codecs.open('./models2/tri/trigram_transformed_reviews_p6.txt', 'a', encoding='utf_8') as f:
# for parsed_review in tqdm(nlp.pipe(line_review(text),
# batch_size=10000, n_threads=10)):
#
# # lemmatize the text, removing punctuation and whitespace
# unigram_review = [token.lemma_ for token in parsed_review
# if not punct_space(token)]
#
# # apply the first-order and second-order phrase models
# bigram_review = bigram_model[unigram_review]
# trigram_review = trigram_model[bigram_review]
#
# # common_terms = ['order', 'come', 'bad', 'good', \
# # 'place', 'time', '\'s'] #'service',
#
# # remove any remaining stopwords
# trigram_review = [term for term in trigram_review
# if term not in spacy.lang.en.stop_words.STOP_WORDS]
# # trigram_review = [term for term in trigram_review
# # if term not in common_terms]
#
# # write the transformed review as a line in the new file
# trigram_review = u' '.join(trigram_review)
# f.write(trigram_review + '\n')
# Learn full vocabulary of corpus to be modeled, using gensim's Dictionary class. Stream
# reviews off of disk using LineSentence:
trigram_reviews = LineSentence('./models2/trigram_transformed_reviews.txt')
# learn the dictionary by iterating over all of the reviews
trigram_dictionary = Dictionary(trigram_reviews)
# filter tokens that are very rare or too common from
# the dictionary (filter_extremes) and reassign integer ids (compactify)
trigram_dictionary.filter_extremes(no_below=3, no_above=0.4)
trigram_dictionary.compactify()
trigram_dictionary.save('./models2/trigram_dict_all.dict')
return 1#bigram_model, trigram_model, trigram_dictionary
# def LDA_Model(topics, cores=11):
# """
# Topics represents desired LDA topics,
# cores should be physical cores minus one.
# Both should be integers.
# """
#
# # load finished dictionary from disk
# trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
#
# # generate bag-of-words representations for
# # all reviews and save them as a matrix
# MmCorpus.serialize('./models2/trigram_bow_corpus.nm',
# trigram_bow_generator('./models2/trigram_transformed_reviews.txt'))
#
# # load finished bag-of-words corpus from disk
# trigram_bow_corpus = MmCorpus('./models2/trigram_bow_corpus.nm')
#
#
# # Pass the bag-of-words matrix and Dictionary from previous steps to LdaMulticore as inputs,
# # along with the number of topics the model should learn
#
# # workers => sets the parallelism, and should be
# # set to your number of physical cores minus one
# lda = LdaMulticore(trigram_bow_corpus,
# num_topics=topics,
# id2word=trigram_dictionary,
# workers=cores)
#
# lda.save('./models2/lda_model')
#
# # load the finished LDA model from disk
# #lda = LdaMulticore.load('./models/lda_model_neg')
#
# return trigram_bow_corpus, lda
def guidedLDA_Model(topics, cores=11):
"""
Topics represents desired LDA topics,
cores should be physical cores minus one.
Both should be integers.
"""
# load finished dictionary from disk
trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
# generate bag-of-words representations for
# all reviews and save them as a matrix
MmCorpus.serialize('./models2/trigram_bow_corpus.nm',
trigram_bow_generator('./models2/trigram_transformed_reviews.txt'))
# load finished bag-of-words corpus from disk
trigram_bow_corpus = MmCorpus('./models2/trigram_bow_corpus.nm')
# Pass the bag-of-words matrix and Dictionary from previous steps to LdaMulticore as inputs,
# along with the number of topics the model should learn
# workers => sets the parallelism, and should be
# set to your number of physical cores minus one
lda = LdaMulticore(trigram_bow_corpus,
num_topics=topics,
id2word=trigram_dictionary,
workers=cores)
lda.save('./models2/lda_model')
# load the finished LDA model from disk
#lda = LdaMulticore.load('./models/lda_model_neg')
return trigram_bow_corpus, lda
# %%time
# N = len(alldata)
# ii=800000
# ff=ii+20000
# while ff<N:
# aa = reviewPreProcess(alldata['FullReview'][ii:ff])
# ii=ff
# ff=ii+20000
# print(ff)
# else:
# aa = reviewPreProcess(alldata['FullReview'][ii:N])
d = reviewPreProcess(alldata['FullReview'])
# bigram_model, trigram_model, trigram_dictionary = reviewPreProcess(alldata['FullReview'])
trigram_bow_corpus, lda = LDA_Model(15)
import pickle
trigram_dictionary = Dictionary.load('./models2/trigram_dict_all.dict')
trigram_bow_corpus = MmCorpus('./models2/trigram_bow_corpus.nm')
lda = LdaMulticore.load('./models2/lda_model')
LDAvis_prepared = pyLDAvis.gensim.prepare(lda, trigram_bow_corpus, trigram_dictionary)
# Save pre-prepared pyLDAvis data to disk:
with open('./models2/ldavis_prepared', 'wb') as f:
pickle.dump(LDAvis_prepared, f)
# load the pre-prepared pyLDAvis data from disk:
with open('./models2/ldavis_prepared', 'rb') as f:
LDAvis_prepared = pickle.load(f)
# pyLDAvis.display(LDAvis_prepared)
pyLDAvis.save_html(LDAvis_prepared,'./models2/lda.html')
| [] |
2024-01-10 | argha48/smarthotels | application~src~demo.py | #############################################################
# Copyright (C) 2019 Argha Mondal
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#############################################################
import nltk
import pandas as pd
import numpy as np
import re
import codecs
import json
import re
import pickle
# # Gensim
# import gensim
# import gensim.corpora as corpora
# from gensim.utils import simple_preprocess
# from gensim.models import CoherenceModel
# from gensim.models import Phrases
# from gensim.corpora import Dictionary, MmCorpus
# from gensim.models.word2vec import LineSentence
# from gensim.models.ldamulticore import LdaMulticore
# # spacy for lemmatization
# import spacy
# # NLTK for text cleaning
# from nltk.tokenize import sent_tokenize, word_tokenize, RegexpTokenizer
# from nltk.classify import NaiveBayesClassifier
# from nltk.corpus import stopwords, names
# from nltk.tokenize import RegexpTokenizer
# from nltk import tokenize
# from nltk.corpus import stopwords
# from nltk.stem.porter import PorterStemmer
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
# nltk.download('vader_lexicon')
# # TextBlob package for translation and spelling correction
# from textblob import TextBlob
#
# nlp = spacy.load('en')
# # Plotting tools
# import pyLDAvis
# import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
alldata = pd.read_json('./src/minidemo.json',orient='columns',encoding='utf-8')
star_score_df = pd.read_json('./src/hotel_star_scores.json',encoding='utf-8')
mallet_lda_topics={
0:'Hotel Staff',
1:'Accessibility',
2:'Food',
3:'Overall Experience',
4:'Noise',
5:'Value for Money',
6:'Room Amenities',
7:'Location in the city',
8:'Overall Experience',
9:'Cleanliness',
10:'Early Check-in/Late Check-out',
11:'Health and Wellness Amenities',
12:'Booking Experience',
13:'Sleep Quality',
14:'Parking Facility'
}
#
# def get_text(rev):
# if pd.DataFrame(rev).empty:
# return ''
# else:
# return rev[0] if str(rev)!='nan' else ''
#
# def review_cleaned(review_df):
# df = review_df[['HotelName','PositiveReview','NegativeReview','StayDate']].copy()#.applymap(get_text)
# df['FullReview'] = [pos+' '+neg for pos,neg in zip(df['PositiveReview'],df['NegativeReview'])]
# # df['StayDate'] = df['StayDate'].apply(lambda x: x.replace('\n','')).apply(lambda x: x.replace('Stayed in ',''))
# return df
#
# def review_to_sentence(df):
# all_sentences = []
# from nltk.tokenize import sent_tokenize
# import pandas as pd
# allreview = df['FullReview']
# for areview in allreview:
# all_sentences.extend(sent_tokenize(areview))
# tokensentence = pd.DataFrame(data=all_sentences,columns=['TokenSentence'])
# return tokensentence
#
# def sentence_sentiment(text):
# from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# analyzer = SentimentIntensityAnalyzer()
# compound_sentiment = analyzer.polarity_scores(text)['compound']
# return compound_sentiment
#
# def token_to_sentiment(df):
# df['CompoundSentiment'] = df['TokenSentence'].apply(sentence_sentiment)
# return df
#
# # helper functions for text preprocessing & LDA modeling:
#
# def punct_space(token):
# """
# helper function to eliminate tokens
# that are pure punctuation or whitespace
# """
#
# return token.is_punct or token.is_space or token.like_num or token.is_digit
#
# def line_review(filename):
# """
# generator function to read in reviews from Pandas Series
# and un-escape the original line breaks in the text
# """
#
# #with codecs.open(filename, encoding='utf_8') as f:
# for review in filename:
# yield review.replace('\\n', '\n')
#
# def lemmatized_sentence_corpus(filename):
# """
# generator function to use spaCy to parse reviews,
# lemmatize the text, and yield sentences
# """
#
# for parsed_review in nlp.pipe(line_review(filename), batch_size=10000, n_threads=10):
# for sent in parsed_review.sents:
# yield u' '.join([token.lemma_ for token in sent
# if not punct_space(token)])
#
# def trigram_bow_generator(filepath):
# """
# generator function to read reviews from a file
# and yield a bag-of-words representation
# """
# # load finished dictionary from disk
# trigram_dictionary = Dictionary.load('./src/saved_model/models2/trigram_dict_all.dict')
#
# for review in LineSentence(filepath):
# yield trigram_dictionary.doc2bow(review)
#
# def nCPU():
# import multiprocessing
# N = multiprocessing.cpu_count()-1
# return N
#
# def topic_extractor(df, min_topic_freq=0.10):
# from tqdm import tqdm
# from operator import itemgetter
# ncpu = nCPU()
# dfc=df.copy()
# text = dfc['TokenSentence'].copy()
# trigram_dictionary = Dictionary.load('./src/saved_model/models2/trigram_dict_all.dict')
# lda = LdaMulticore.load('./src/saved_model/models2/mallet_lda_model')
# # trigram_review = LineSentence('./tri_temporary.txt')
# bigram_model = Phrases.load('./src/saved_model/models2/bigram_model.txt')
# trigram_model = Phrases.load('./src/saved_model/models2/trigram_model.txt')
# topic_list = []
# trigram_list = []
# freq_list = []
# # parse the review text with spaCy
# for parsed_review in tqdm(nlp.pipe(line_review(text),
# batch_size=10000, n_threads=ncpu)):
# # lemmatize the text, removing punctuation and whitespace
# unigram_review = [token.lemma_ for token in parsed_review
# if not punct_space(token)]
# # apply the first-order and second-order phrase models
# bigram_review = bigram_model[unigram_review]
# trigram_review = trigram_model[bigram_review]
#
# common_terms = ['-PRON-','hotel'] #'service',
# # remove any remaining stopwords
# trigram_review = [term for term in trigram_review
# if term not in spacy.lang.en.stop_words.STOP_WORDS]
# trigram_review = [term for term in trigram_review
# if term not in common_terms]
# if len(trigram_review)==0:
# topic_number=-1
# freq = 0.0
# tri = str([])
# else:
# # create a bag-of-words representation
# review_bow = trigram_dictionary.doc2bow(trigram_review)
# # create an LDA representation
# review_lda = lda.get_document_topics(review_bow)
# # print the most highly related topic name and frequency
# review_lda = sorted(review_lda, key=itemgetter(1),reverse=True)[0]
# topic_number = review_lda[0]
# freq = review_lda[1]
# if freq < min_topic_freq:
# topic_number=-1
# freq = 0.0
#
# topic_list.append(topic_number)
# freq_list.append(round(freq,2))
# trigram_list.append(trigram_review)
# dfc['Topic']=topic_list
# dfc['TopicFrequency']=freq_list
# dfc['Trigram']=trigram_list
# return dfc
#
# def topic_scorer(df):
# xdf = pd.get_dummies(df,prefix='Topic',
# prefix_sep='_', dummy_na=False,
# columns=['Topic'])
# topics = ['Topic_0', 'Topic_1', 'Topic_2','Topic_3',
# 'Topic_4', 'Topic_5', 'Topic_6', 'Topic_7',
# 'Topic_8','Topic_9', 'Topic_10', 'Topic_11',
# 'Topic_12', 'Topic_13','Topic_14']
# topic_dict = {}
# for atopic in topics:
# if atopic in xdf.columns.values:
# xdf[atopic] = xdf[atopic] * xdf['CompoundSentiment']
# m = np.mean(list(filter(lambda a: a != 0, xdf[atopic])))
# topic_dict[mallet_lda_topics[int(atopic.replace('Topic_',''))]] = round(m,2)
# else:
# topic_dict[mallet_lda_topics[int(atopic.replace('Topic_',''))]] = 'No information available'
# return topic_dict
#
# def demo_(hotel_name):
# new_doc = alldata[alldata['HotelName']==hotel_name]
# # print(new_doc.shape)
# text = review_cleaned(new_doc)
# tokensentence = review_to_sentence(text)
# sentencesentiment = token_to_sentiment(tokensentence)
# topicdf = topic_extractor(sentencesentiment)
# topic_dict = topic_scorer(topicdf)
# return [(key,int(100*topic_dict[key])) for key in topic_dict]
def score_compare(hotel_name, hotel_star):
topics = ['Topic_0', 'Topic_1', 'Topic_2','Topic_3',
'Topic_4', 'Topic_5', 'Topic_6', 'Topic_7',
'Topic_8','Topic_9', 'Topic_10', 'Topic_11',
'Topic_12', 'Topic_13','Topic_14']
df_myhotel = star_score_df[star_score_df['HotelName']==hotel_name]
df = star_score_df[star_score_df['HotelStar']==str(hotel_star)]
topic_dict = {}
for i in range(len(topics)):
atopic = topics[i]
if atopic in df_myhotel.columns.values:
if df_myhotel[atopic].values >= -10.:
# print(str(df_myhotel[atopic]))
myscore = int(100.*float(df_myhotel[atopic]))
otherscore = pd.Series(df[atopic]).dropna().astype('int')
otherscore = otherscore.dropna()
# print(otherscore)
otherscore = 100*otherscore
othermean = int(100.*np.mean(otherscore))
if myscore > othermean:
topic_dict[mallet_lda_topics[int(atopic.replace('Topic_',''))]] = [myscore, othermean, 'Good']
else:
topic_dict[mallet_lda_topics[int(atopic.replace('Topic_',''))]] = [myscore, othermean, 'Bad']
topic_dict['Hotel_info'] = [hotel_name, str(int(hotel_star)+1), 'info']
return [(str(key),topic_dict[key][0],topic_dict[key][1]) for key in topic_dict]
def make_plot(title,data, hist, edges, x):
import numpy as np
import scipy.special
from scipy import stats
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_file
kernel = stats.gaussian_kde(data)
pdf = kernel(x)
p = figure(title=title, tools='', background_fill_color="#fafafa")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="navy", line_color="white", alpha=0.5, legend="Histogram")
p.line(x, pdf, line_color="#ff8888", line_width=4, alpha=0.7, legend="PDF")
p.y_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = 'Sentiment Score'
p.yaxis.axis_label = 'Frequency'
p.grid.grid_line_color="white"
return p
def compare_plot(hotel_name, hotel_star, atopic):
from bokeh.embed import components
from bokeh.models import Span
# Create the main plot
df_myhotel = star_score_df[star_score_df['HotelName']==hotel_name]
df = star_score_df[star_score_df['HotelStar']==str(hotel_star)]
myscore = 100.*float(df_myhotel[atopic])
otherscore = 100.*pd.Series(df[atopic].values).dropna()
hist, edges = np.histogram(otherscore, density=True, bins=50)
x = np.linspace(-100,100,5000)
p = make_plot(mallet_lda_topics[int(atopic.replace('Topic_',''))],otherscore, hist, edges, x)
othermean = int(100.*np.mean(otherscore))
if myscore > othermean:
# Vertical line
vcolor = 'red'
vline = Span(location=myscore, dimension='height', line_color=vcolor, line_width=5)
p.add_layout(vline)
p.title.text_font_size = '18pt'
p.xaxis.axis_label_text_font_size = "18pt"
p.yaxis.axis_label_text_font_size = "18pt"
else:
# Vertical line
vcolor = 'red'
vline = Span(location=myscore, dimension='height', line_color=vcolor, line_width=5)
p.add_layout(vline)
p.title.text_font_size = '18pt'
p.xaxis.axis_label_text_font_size = "18pt"
p.yaxis.axis_label_text_font_size = "18pt"
return p
| [] |
2024-01-10 | aalexsmithh/jaco-gym | jaco_gym~envs~gazebo_env.py | '''
This file is from gym_gazebo (https://github.com/erlerobot/gym-gazebo), a repo developed by Erle Robotics.
It is released under GPLv3.
It has been moved into this project to simplify the organization of the project.
'''
import gym
import rospy
import roslaunch
import os
import signal
import subprocess
from os import path
from std_srvs.srv import Empty
class GazeboEnv(gym.Env):
"""Superclass for all Gazebo environments.
"""
metadata = {'render.modes': ['human']}
def __init__(self, launchfile):
#start roscore
# subprocess.Popen("roscore")
# print ("Roscore launched!")
# Launch the simulation with the given launchfile name
# rospy.init_node('gym', anonymous=True)
if launchfile.startswith("/"):
fullpath = launchfile
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets","launch", launchfile)
if not path.exists(fullpath):
raise IOError("File "+fullpath+" does not exist")
# uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
# roslaunch.configure_logging(uuid)
# launch = roslaunch.parent.ROSLaunchParent(uuid, [fullpath])
# launch.start()
subprocess.Popen(["roslaunch",fullpath])
print ("Gazebo launched!")
self.gzclient_pid = 0
def _step(self, action):
# Implement this method in every subclass
# Perform a step in gazebo. E.g. move the robot
raise NotImplementedError
def _reset(self):
# Implemented in subclass
raise NotImplementedError
def _render(self, mode="human", close=False):
if close:
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount > 0:
if self.gzclient_pid != 0:
os.kill(self.gzclient_pid, signal.SIGTERM)
os.wait()
return
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount < 1:
subprocess.Popen("gzclient")
self.gzclient_pid = int(subprocess.check_output(["pidof","-s","gzclient"]))
else:
self.gzclient_pid = 0
def _close(self):
# Kill gzclient, gzserver and roscore
tmp = os.popen("ps -Af").read()
gzclient_count = tmp.count('gzclient')
gzserver_count = tmp.count('gzserver')
roscore_count = tmp.count('roscore')
rosmaster_count = tmp.count('rosmaster')
if gzclient_count > 0:
os.system("killall -9 gzclient")
if gzserver_count > 0:
os.system("killall -9 gzserver")
if rosmaster_count > 0:
os.system("killall -9 rosmaster")
if roscore_count > 0:
os.system("killall -9 roscore")
if (gzclient_count or gzserver_count or roscore_count or rosmaster_count >0):
os.wait()
def _configure(self):
# TODO
# From OpenAI API: Provides runtime configuration to the enviroment
# Maybe set the Real Time Factor?
pass
def _seed(self):
# TODO
# From OpenAI API: Sets the seed for this env's random number generator(s)
pass
| [] |
2024-01-10 | aalexsmithh/jaco-gym | run~gym-testing.py | import gym
import gym_gazebo
import jaco_gym
import time
import numpy
import random
import time
import csv
from tensorforce.agents import Agent
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# from tensorforce.agents.ppo_agent import PPOAgent
from tensorforce.agents.trpo_agent import TRPOAgent
network_spec = [
dict(type='dense', size=32, activation='relu'),
dict(type='dense', size=32, activation='relu')
]
train_data = []
def main():
#tensorforce
env = OpenAIGym('JacoArm-v0')
agent = TRPOAgent(
states_spec=env.states,
actions_spec=env.actions,
network_spec=network_spec,
batch_size=512
)
# agent = PPOAgent(
# states_spec=env.states,
# actions_spec=env.actions,
# network_spec=network_spec,
# batch_size=512,
# step_optimizer=dict(
# type='adam',
# learning_rate=1e-4
# )
# )
runner = Runner(agent=agent, environment=env)
raw_input("hit enter when gazebo is loaded...")
print()
env.gym.unpause()
env.gym.hold_init_robot_pos([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0])
runner.run(episodes=1500, max_episode_timesteps=1000, episode_finished=episode_finished)
#old-fashioned way
# env = gym.make('JacoArm-v0')
# print "launching the world..."
# #gz loaing issues, let user start the learning
# raw_input("hit enter when gazebo is loaded...")
# env.set_physics_update(0.0001, 10000)
# raw_input("hit enter when gazebo is loaded...")
# # env.set_goal([0.167840578046, 0.297489331432, 0.857454500127])
# total_episodes = 100
# action = [1,1,1,1,1,1,1,1,1,1]
# x = 0
# # for x in range(total_episodes):
# while True:
# # if x % 10 is 0:
# action = numpy.random.rand(1, 10)[0]
# # print 'new action is', action
# state, reward, done, _ = env.step(action)
# print reward
# time.sleep(0.2)
# x += 1
write_to_csv(train_data, 'test.csv')
env.close()
def episode_finished(r):
# print("Finished episode {ep} after {ts} timesteps (reward: {reward})".format(ep=r.episode, ts=r.episode_timestep, reward=r.episode_rewards[-1]))
print("{ep}, {ts}, {reward}".format(ep=r.episode, ts=r.episode_timestep, reward=r.episode_rewards[-1]))
train_data.append([r.episode_timestep, r.episode_rewards[-1]])
return True
def write_to_csv(data, fn):
with open(fn, 'wb') as f:
w = csv.writer(f, dialect='excel')
for row in data:
w.writerow(row)
f.close()
if __name__ == '__main__':
main() | [] |
2024-01-10 | liuweisj/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | liuweisj/nas-tools | app~plugins~modules~autosub.py | import copy
import os
import re
import subprocess
import tempfile
import time
import traceback
from datetime import timedelta
import iso639
import psutil
import srt
from lxml import etree
from app.helper import FfmpegHelper
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
from config import RMT_MEDIAEXT
class AutoSub(_IPluginModule):
# 插件名称
module_name = "AI字幕自动生成"
# 插件描述
module_desc = "使用whisper自动生成视频文件字幕。"
# 插件图标
module_icon = "autosubtitles.jpeg"
# 主题色
module_color = "#2C4F7E"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "olly"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
module_config_prefix = "autosub"
# 加载顺序
module_order = 14
# 可使用的用户级别
auth_level = 1
# 私有属性
_running = False
# 语句结束符
_end_token = ['.', '!', '?', '。', '!', '?', '。"', '!"', '?"', '."', '!"', '?"']
_noisy_token = [('(', ')'), ('[', ']'), ('{', '}'), ('【', '】'), ('♪', '♪'), ('♫', '♫'), ('♪♪', '♪♪')]
def __init__(self):
self.additional_args = '-t 4 -p 1'
self.translate_zh = False
self.translate_only = False
self.whisper_model = None
self.whisper_main = None
self.file_size = None
self.process_count = 0
self.skip_count = 0
self.fail_count = 0
self.success_count = 0
self.send_notify = False
self.asr_engine = 'whisper.cpp'
self.faster_whisper_model = 'base'
self.faster_whisper_model_path = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
[
{
'title': '媒体路径',
'required': '',
'tooltip': '要进行字幕生成的路径,每行一个路径,请确保路径正确',
'type': 'textarea',
'content':
{
'id': 'path_list',
'placeholder': '文件路径',
'rows': 5
}
}
],
# asr 引擎
[
{
'title': '文件大小(MB)',
'required': "required",
'tooltip': '单位 MB, 大于该大小的文件才会进行字幕生成',
'type': 'text',
'content':
[{
'id': 'file_size',
'placeholder': '文件大小, 单位MB'
}]
},
{
'title': 'ASR引擎',
'required': "required",
'tooltip': '自动语音识别引擎选择',
'type': 'select',
'content': [
{
'id': 'asr_engine',
'options': {
'whisper.cpp': 'whisper.cpp',
'faster-whisper': 'faster-whisper'
},
'default': 'whisper.cpp',
'onchange': 'AutoSub_asr_engine_change(this)'
}
]
}
]
]
},
{
'type': 'details',
'id': 'whisper_config',
'summary': 'whisper.cpp 配置',
'tooltip': '使用 whisper.cpp 引擎时的配置',
'hidden': False,
'content': [
[
{
'title': 'whisper.cpp路径',
'required': "",
'tooltip': '填写whisper.cpp主程序路径,如/config/plugin/autosub/main \n'
'推荐教程 https://ddsrem.com/autosub',
'type': 'text',
'content': [
{
'id': 'whisper_main',
'placeholder': 'whisper.cpp主程序路径'
}
]
}
],
[
{
'title': 'whisper.cpp模型路径',
'required': "",
'tooltip': '填写whisper.cpp模型路径,如/config/plugin/autosub/models/ggml-base.en.bin\n'
'可从https://github.com/ggerganov/whisper.cpp/tree/master/models处下载',
'type': 'text',
'content':
[{
'id': 'whisper_model',
'placeholder': 'whisper.cpp模型路径'
}]
}
],
[
{
'title': '高级参数',
'tooltip': 'whisper.cpp的高级参数,请勿随意修改',
'required': "",
'type': 'text',
'content': [
{
'id': 'additional_args',
'placeholder': '-t 4 -p 1'
}
]
}
]
]
},
{
'type': 'details',
'id': 'faster_whisper_config',
'summary': 'faster-whisper 配置',
'tooltip': '使用 faster-whisper 引擎时的配置,安装参考 https://github.com/guillaumekln/faster-whisper',
'hidden': True,
'content': [
[
{
'title': '模型',
'required': "",
'tooltip': '选择模型后第一次运行会从Hugging Face Hub下载模型,可能需要一段时间',
'type': 'select',
'content': [
{
'id': 'faster_whisper_model',
'options': {
# tiny, tiny.en, base, base.en,
# small, small.en, medium, medium.en,
# large-v1, or large-v2
'tiny': 'tiny',
'tiny.en': 'tiny.en',
'base': 'base',
'base.en': 'base.en',
'small': 'small',
'small.en': 'small.en',
'medium': 'medium',
'medium.en': 'medium.en',
'large-v1': 'large-v1',
'large-v2': 'large-v2',
},
'default': 'base'
}
]
}
],
[
{
'title': '模型保存路径',
'required': "",
'tooltip': '配置模型保存路径,如/config/plugin/autosub/faster-whisper/models',
'type': 'text',
'content': [
{
'id': 'faster_whisper_model_path',
'placeholder': 'faster-whisper配置模型保存路径'
}
]
}
]
]
},
{
'type': 'div',
'content': [
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次',
'type': 'switch',
'id': 'run_now',
},
{
'title': '翻译为中文',
'required': "",
'tooltip': '打开后将自动翻译非中文字幕,生成双语字幕,关闭后只生成英文字幕,需要配置OpenAI API Key',
'type': 'switch',
'id': 'translate_zh',
},
{
'title': '仅已有字幕翻译',
'required': "",
'tooltip': '打开后仅翻译已有字幕,不做语音识别,关闭后将自动识别语音并生成字幕',
'type': 'switch',
'id': 'translate_only',
}
],
[
{
'title': '运行时通知',
'required': "",
'tooltip': '打开后将在单个字幕生成开始和完成后发送通知, 需要开启插件消息推送通知',
'type': 'switch',
'id': 'send_notify',
}
]
]
}
]
@staticmethod
def get_script():
"""
返回插件额外的JS代码
"""
return """
function AutoSub_asr_engine_change(obj) {
if ($(obj).val() == 'faster-whisper') {
$('#autosubwhisper_config').hide();
$('#autosubfaster_whisper_config').show();
}else{
$('#autosubwhisper_config').show();
$('#autosubfaster_whisper_config').hide();
}
}
"""
def init_config(self, config=None):
# 如果没有配置信息, 则不处理
if not config:
return
# config.get('path_list') 用 \n 分割为 list 并去除重复值和空值
path_list = list(set(config.get('path_list').split('\n')))
# file_size 转成数字
self.file_size = config.get('file_size')
self.whisper_main = config.get('whisper_main')
self.whisper_model = config.get('whisper_model')
self.translate_zh = config.get('translate_zh', False)
self.translate_only = config.get('translate_only', False)
self.additional_args = config.get('additional_args', '-t 4 -p 1')
self.send_notify = config.get('send_notify', False)
self.asr_engine = config.get('asr_engine', 'whisper.cpp')
self.faster_whisper_model = config.get('faster_whisper_model', 'base')
self.faster_whisper_model_path = config.get('faster_whisper_model_path')
run_now = config.get('run_now')
if not run_now:
return
config['run_now'] = False
self.update_config(config)
# 如果没有配置信息, 则不处理
if not path_list or not self.file_size:
self.warn(f"配置信息不完整,不进行处理")
return
# 校验文件大小是否为数字
if not self.file_size.isdigit():
self.warn(f"文件大小不是数字,不进行处理")
return
# asr 配置检查
if not self.translate_only and not self.__check_asr():
return
if self._running:
self.warn(f"上一次任务还未完成,不进行处理")
return
# 依次处理每个目录
try:
self._running = True
self.success_count = self.skip_count = self.fail_count = self.process_count = 0
for path in path_list:
self.info(f"开始处理目录:{path} ...")
# 如果目录不存在, 则不处理
if not os.path.exists(path):
self.warn(f"目录不存在,不进行处理")
continue
# 如果目录不是文件夹, 则不处理
if not os.path.isdir(path):
self.warn(f"目录不是文件夹,不进行处理")
continue
# 如果目录不是绝对路径, 则不处理
if not os.path.isabs(path):
self.warn(f"目录不是绝对路径,不进行处理")
continue
# 处理目录
self.__process_folder_subtitle(path)
except Exception as e:
self.error(f"处理异常: {e}")
finally:
self.info(f"处理完成: "
f"成功{self.success_count} / 跳过{self.skip_count} / 失败{self.fail_count} / 共{self.process_count}")
self._running = False
def __check_asr(self):
if self.asr_engine == 'whisper.cpp':
if not self.whisper_main or not self.whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.whisper_main):
self.warn(f"whisper.cpp主程序不存在,不进行处理")
return False
if not os.path.exists(self.whisper_model):
self.warn(f"whisper.cpp模型文件不存在,不进行处理")
return False
# 校验扩展参数是否包含异常字符
if self.additional_args and re.search(r'[;|&]', self.additional_args):
self.warn(f"扩展参数包含异常字符,不进行处理")
return False
elif self.asr_engine == 'faster-whisper':
if not self.faster_whisper_model_path or not self.faster_whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.faster_whisper_model_path):
self.warn(f"faster-whisper模型文件夹不存在,不进行处理")
return False
try:
from faster_whisper import WhisperModel, download_model
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False
return True
else:
self.warn(f"未配置asr引擎,不进行处理")
return False
return True
def __process_folder_subtitle(self, path):
"""
处理目录字幕
:param path:
:return:
"""
# 获取目录媒体文件列表
for video_file in self.__get_library_files(path):
if not video_file:
continue
# 如果文件大小小于指定大小, 则不处理
if os.path.getsize(video_file) < int(self.file_size):
continue
self.process_count += 1
start_time = time.time()
file_path, file_ext = os.path.splitext(video_file)
file_name = os.path.basename(video_file)
try:
self.info(f"开始处理文件:{video_file} ...")
# 判断目的字幕(和内嵌)是否已存在
if self.__target_subtitle_exists(video_file):
self.warn(f"字幕文件已经存在,不进行处理")
self.skip_count += 1
continue
# 生成字幕
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始处理文件 ... ")
ret, lang = self.__generate_subtitle(video_file, file_path, self.translate_only)
if not ret:
message = f" 媒体: {file_name}\n "
if self.translate_only:
message += "内嵌&外挂字幕不存在,不进行翻译"
self.skip_count += 1
else:
message += "生成字幕失败,跳过后续处理"
self.fail_count += 1
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
continue
if self.translate_zh:
# 翻译字幕
self.info(f"开始翻译字幕为中文 ...")
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始翻译字幕为中文 ... ")
self.__translate_zh_subtitle(lang, f"{file_path}.{lang}.srt", f"{file_path}.zh.srt")
self.info(f"翻译字幕完成:{file_name}.zh.srt")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理完成\n 字幕原始语言: {lang}\n "
if self.translate_zh:
message += f"字幕翻译语言: zh\n "
message += f"耗时:{round(end_time - start_time, 2)}秒"
self.info(f"自动字幕生成 处理完成:{message}")
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
self.success_count += 1
except Exception as e:
self.error(f"自动字幕生成 处理异常:{e}")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理失败\n 耗时:{round(end_time - start_time, 2)}秒"
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
# 打印调用栈
traceback.print_exc()
self.fail_count += 1
def __do_speech_recognition(self, audio_lang, audio_file):
"""
语音识别, 生成字幕
:param audio_lang:
:param audio_file:
:return:
"""
lang = audio_lang
if self.asr_engine == 'whisper.cpp':
command = [self.whisper_main] + self.additional_args.split()
command += ['-l', lang, '-m', self.whisper_model, '-osrt', '-of', audio_file, audio_file]
ret = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if ret.returncode == 0:
if lang == 'auto':
# 从output中获取语言 "whisper_full_with_state: auto-detected language: en (p = 0.973642)"
output = ret.stdout.decode('utf-8') if ret.stdout else ""
lang = re.search(r"auto-detected language: (\w+)", output)
if lang and lang.group(1):
lang = lang.group(1)
else:
lang = "en"
return True, lang
elif self.asr_engine == 'faster-whisper':
try:
from faster_whisper import WhisperModel, download_model
# 设置缓存目录, 防止缓存同目录出现 cross-device 错误
cache_dir = os.path.join(self.faster_whisper_model_path, "cache")
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
os.environ["HUGGINGFACE_HUB_CACHE"] = cache_dir
model = WhisperModel(download_model(self.faster_whisper_model),
device="cpu", compute_type="int8", cpu_threads=psutil.cpu_count(logical=False))
segments, info = model.transcribe(audio_file,
language=lang if lang != 'auto' else None,
word_timestamps=True,
temperature=0,
beam_size=5)
if lang == 'auto':
lang = info.language
subs = []
if lang in ['en', 'eng']:
# 英文先生成单词级别字幕,再合并
idx = 0
for segment in segments:
for word in segment.words:
idx += 1
subs.append(srt.Subtitle(index=idx,
start=timedelta(seconds=word.start),
end=timedelta(seconds=word.end),
content=word.word))
subs = self.__merge_srt(subs)
else:
for i, segment in enumerate(segments):
subs.append(srt.Subtitle(index=i,
start=timedelta(seconds=segment.start),
end=timedelta(seconds=segment.end),
content=segment.text))
self.__save_srt(f"{audio_file}.srt", subs)
return True, lang
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False, None
except Exception as e:
traceback.print_exc()
self.error(f"faster-whisper 处理异常:{e}")
return False, None
return False, None
def __generate_subtitle(self, video_file, subtitle_file, only_extract=False):
"""
生成字幕
:param video_file: 视频文件
:param subtitle_file: 字幕文件, 不包含后缀
:return: 生成成功返回True,字幕语言,否则返回False, None
"""
# 获取文件元数据
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
self.error(f"获取视频文件元数据失败,跳过后续处理")
return False, None
# 获取视频文件音轨和语言信息
ret, audio_index, audio_lang = self.__get_video_prefer_audio(video_meta)
if not ret:
return False, None
if not iso639.find(audio_lang) or not iso639.to_iso639_1(audio_lang):
self.info(f"未知语言音轨")
audio_lang = 'auto'
expert_subtitle_langs = ['en', 'eng'] if audio_lang == 'auto' else [audio_lang, iso639.to_iso639_1(audio_lang)]
self.info(f"使用 {expert_subtitle_langs} 匹配已有外挂字幕文件 ...")
exist, lang = self.__external_subtitle_exists(video_file, expert_subtitle_langs)
if exist:
self.info(f"外挂字幕文件已经存在,字幕语言 {lang}")
return True, iso639.to_iso639_1(lang)
self.info(f"外挂字幕文件不存在,使用 {expert_subtitle_langs} 匹配内嵌字幕文件 ...")
# 获取视频文件字幕信息
ret, subtitle_index, \
subtitle_lang, subtitle_count = self.__get_video_prefer_subtitle(video_meta, expert_subtitle_langs)
if ret and (audio_lang == subtitle_lang or subtitle_count == 1):
if audio_lang == subtitle_lang:
# 如果音轨和字幕语言一致, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言一致,直接提取字幕 ...")
elif subtitle_count == 1:
# 如果音轨和字幕语言不一致,但只有一个字幕, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言不一致,但只有一个字幕,直接提取字幕 ...")
audio_lang = iso639.to_iso639_1(subtitle_lang) \
if (iso639.find(subtitle_lang) and iso639.to_iso639_1(subtitle_lang)) else 'und'
FfmpegHelper().extract_subtitle_from_video(video_file, f"{subtitle_file}.{audio_lang}.srt", subtitle_index)
self.info(f"提取字幕完成:{subtitle_file}.{audio_lang}.srt")
return True, audio_lang
if audio_lang != 'auto':
audio_lang = iso639.to_iso639_1(audio_lang)
if only_extract:
self.info(f"未开启语音识别,且无已有字幕文件,跳过后续处理")
return False, None
# 清理异常退出的临时文件
tempdir = tempfile.gettempdir()
for file in os.listdir(tempdir):
if file.startswith('autosub-'):
os.remove(os.path.join(tempdir, file))
with tempfile.NamedTemporaryFile(prefix='autosub-', suffix='.wav', delete=True) as audio_file:
# 提取音频
self.info(f"提取音频:{audio_file.name} ...")
FfmpegHelper().extract_wav_from_video(video_file, audio_file.name, audio_index)
self.info(f"提取音频完成:{audio_file.name}")
# 生成字幕
self.info(f"开始生成字幕, 语言 {audio_lang} ...")
ret, lang = self.__do_speech_recognition(audio_lang, audio_file.name)
if ret:
self.info(f"生成字幕成功,原始语言:{lang}")
# 复制字幕文件
SystemUtils.copy(f"{audio_file.name}.srt", f"{subtitle_file}.{lang}.srt")
self.info(f"复制字幕文件:{subtitle_file}.{lang}.srt")
# 删除临时文件
os.remove(f"{audio_file.name}.srt")
return ret, lang
else:
self.error(f"生成字幕失败")
return False, None
@staticmethod
def __get_library_files(in_path, exclude_path=None):
"""
获取目录媒体文件列表
"""
if not os.path.isdir(in_path):
yield in_path
return
for root, dirs, files in os.walk(in_path):
if exclude_path and any(os.path.abspath(root).startswith(os.path.abspath(path))
for path in exclude_path.split(",")):
continue
for file in files:
cur_path = os.path.join(root, file)
# 检查后缀
if os.path.splitext(file)[-1].lower() in RMT_MEDIAEXT:
yield cur_path
@staticmethod
def __load_srt(file_path):
"""
加载字幕文件
:param file_path: 字幕文件路径
:return:
"""
with open(file_path, 'r', encoding="utf8") as f:
srt_text = f.read()
return list(srt.parse(srt_text))
@staticmethod
def __save_srt(file_path, srt_data):
"""
保存字幕文件
:param file_path: 字幕文件路径
:param srt_data: 字幕数据
:return:
"""
with open(file_path, 'w', encoding="utf8") as f:
f.write(srt.compose(srt_data))
def __get_video_prefer_audio(self, video_meta, prefer_lang=None):
"""
获取视频的首选音轨,如果有多音轨, 优先指定语言音轨,否则获取默认音轨
:param video_meta
:return:
"""
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选音轨
audio_lang = None
audio_index = None
audio_stream = filter(lambda x: x.get('codec_type') == 'audio', video_meta.get('streams', []))
for index, stream in enumerate(audio_stream):
if not audio_index:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取默认音轨
if stream.get('disposition', {}).get('default'):
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取指定语言音轨
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
break
# 如果没有音轨, 则不处理
if audio_index is None:
self.warn(f"没有音轨,不进行处理")
return False, None, None
self.info(f"选中音轨信息:{audio_index}, {audio_lang}")
return True, audio_index, audio_lang
def __get_video_prefer_subtitle(self, video_meta, prefer_lang=None):
"""
获取视频的首选字幕,如果有多字幕, 优先指定语言字幕, 否则获取默认字幕
:param video_meta:
:return:
"""
# from https://wiki.videolan.org/Subtitles_codecs/
"""
https://trac.ffmpeg.org/wiki/ExtractSubtitles
ffmpeg -codecs | grep subtitle
DES... ass ASS (Advanced SSA) subtitle (decoders: ssa ass ) (encoders: ssa ass )
DES... dvb_subtitle DVB subtitles (decoders: dvbsub ) (encoders: dvbsub )
DES... dvd_subtitle DVD subtitles (decoders: dvdsub ) (encoders: dvdsub )
D.S... hdmv_pgs_subtitle HDMV Presentation Graphic Stream subtitles (decoders: pgssub )
..S... hdmv_text_subtitle HDMV Text subtitle
D.S... jacosub JACOsub subtitle
D.S... microdvd MicroDVD subtitle
D.S... mpl2 MPL2 subtitle
D.S... pjs PJS (Phoenix Japanimation Society) subtitle
D.S... realtext RealText subtitle
D.S... sami SAMI subtitle
..S... srt SubRip subtitle with embedded timing
..S... ssa SSA (SubStation Alpha) subtitle
D.S... stl Spruce subtitle format
DES... subrip SubRip subtitle (decoders: srt subrip ) (encoders: srt subrip )
D.S... subviewer SubViewer subtitle
D.S... subviewer1 SubViewer v1 subtitle
D.S... vplayer VPlayer subtitle
DES... webvtt WebVTT subtitle
"""
image_based_subtitle_codecs = (
'dvd_subtitle',
'dvb_subtitle',
'hdmv_pgs_subtitle',
)
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选字幕
subtitle_lang = None
subtitle_index = None
subtitle_count = 0
subtitle_stream = filter(lambda x: x.get('codec_type') == 'subtitle', video_meta.get('streams', []))
for index, stream in enumerate(subtitle_stream):
# 如果是强制字幕,则跳过
if stream.get('disposition', {}).get('forced'):
continue
# image-based 字幕,跳过
if (
'width' in stream
or stream.get('codec_name') in image_based_subtitle_codecs
):
continue
if not subtitle_index:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取默认字幕
if stream.get('disposition', {}).get('default'):
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取指定语言字幕
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
subtitle_count += 1
# 如果没有字幕, 则不处理
if subtitle_index is None:
self.debug(f"没有内嵌字幕")
return False, None, None, None
self.debug(f"命中内嵌字幕信息:{subtitle_index}, {subtitle_lang}")
return True, subtitle_index, subtitle_lang, subtitle_count
def __is_noisy_subtitle(self, content):
"""
判断是否为背景音等字幕
:param content:
:return:
"""
for token in self._noisy_token:
if content.startswith(token[0]) and content.endswith(token[1]):
return True
return False
def __merge_srt(self, subtitle_data):
"""
合并整句字幕
:param subtitle_data:
:return:
"""
subtitle_data = copy.deepcopy(subtitle_data)
# 合并字幕
merged_subtitle = []
sentence_end = True
for index, item in enumerate(subtitle_data):
# 当前字幕先将多行合并为一行,再去除首尾空格
content = item.content.replace('\n', ' ').strip()
# 去除html标签
parse = etree.HTML(content)
if parse is not None:
content = parse.xpath('string(.)')
if content == '':
continue
item.content = content
# 背景音等字幕,跳过
if self.__is_noisy_subtitle(content):
merged_subtitle.append(item)
sentence_end = True
continue
if not merged_subtitle or sentence_end:
merged_subtitle.append(item)
elif not sentence_end:
merged_subtitle[-1].content = f"{merged_subtitle[-1].content} {content}"
merged_subtitle[-1].end = item.end
# 如果当前字幕内容以标志符结尾,则设置语句已经终结
if content.endswith(tuple(self._end_token)):
sentence_end = True
# 如果上句字幕超过一定长度,则设置语句已经终结
elif len(merged_subtitle[-1].content) > 350:
sentence_end = True
else:
sentence_end = False
return merged_subtitle
def __do_translate_with_retry(self, text, retry=3):
# 调用OpenAI翻译
# 免费OpenAI Api Limit: 20 / minute
ret, result = OpenAiHelper().translate_to_zh(text)
for i in range(retry):
if ret and result:
break
if "Rate limit reached" in result:
self.info(f"OpenAI Api Rate limit reached, sleep 60s ...")
time.sleep(60)
else:
self.warn(f"翻译失败,重试第{i + 1}次")
ret, result = OpenAiHelper().translate_to_zh(text)
if not ret or not result:
return None
return result
def __translate_zh_subtitle(self, source_lang, source_subtitle, dest_subtitle):
"""
调用OpenAI 翻译字幕
:param source_subtitle:
:param dest_subtitle:
:return:
"""
# 读取字幕文件
srt_data = self.__load_srt(source_subtitle)
# 合并字幕语句,目前带标点带英文效果较好,非英文或者无标点的需要NLP处理
if source_lang in ['en', 'eng']:
self.info(f"开始合并字幕语句 ...")
merged_data = self.__merge_srt(srt_data)
self.info(f"合并字幕语句完成,合并前字幕数量:{len(srt_data)}, 合并后字幕数量:{len(merged_data)}")
srt_data = merged_data
batch = []
max_batch_tokens = 1000
for srt_item in srt_data:
# 跳过空行和无意义的字幕
if not srt_item.content:
continue
if self.__is_noisy_subtitle(srt_item.content):
continue
# 批量翻译,减少调用次数
batch.append(srt_item)
# 当前批次字符数
batch_tokens = sum([len(x.content) for x in batch])
# 如果当前批次字符数小于最大批次字符数,且不是最后一条字幕,则继续
if batch_tokens < max_batch_tokens and srt_item != srt_data[-1]:
continue
batch_content = '\n'.join([x.content for x in batch])
result = self.__do_translate_with_retry(batch_content)
# 如果翻译失败,则跳过
if not result:
batch = []
continue
translated = result.split('\n')
if len(translated) != len(batch):
self.info(
f"翻译结果数量不匹配,翻译结果数量:{len(translated)}, 需要翻译数量:{len(batch)}, 退化为单条翻译 ...")
# 如果翻译结果数量不匹配,则退化为单条翻译
for index, item in enumerate(batch):
result = self.__do_translate_with_retry(item.content)
if not result:
continue
item.content = result + '\n' + item.content
else:
self.debug(f"翻译结果数量匹配,翻译结果数量:{len(translated)}")
for index, item in enumerate(batch):
item.content = translated[index].strip() + '\n' + item.content
batch = []
# 保存字幕文件
self.__save_srt(dest_subtitle, srt_data)
@staticmethod
def __external_subtitle_exists(video_file, prefer_langs=None):
"""
外部字幕文件是否存在
:param video_file:
:return:
"""
video_dir, video_name = os.path.split(video_file)
video_name, video_ext = os.path.splitext(video_name)
if type(prefer_langs) == str and prefer_langs:
prefer_langs = [prefer_langs]
for subtitle_lang in prefer_langs:
dest_subtitle = os.path.join(video_dir, f"{video_name}.{subtitle_lang}.srt")
if os.path.exists(dest_subtitle):
return True, subtitle_lang
return False, None
def __target_subtitle_exists(self, video_file):
"""
目标字幕文件是否存在
:param video_file:
:return:
"""
if self.translate_zh:
prefer_langs = ['zh', 'chi']
else:
prefer_langs = ['en', 'eng']
exist, lang = self.__external_subtitle_exists(video_file, prefer_langs)
if exist:
return True
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
return False
ret, subtitle_index, subtitle_lang, _ = self.__get_video_prefer_subtitle(video_meta, prefer_lang=prefer_langs)
if ret and subtitle_lang in prefer_langs:
return True
return False
def get_state(self):
return False
def stop_service(self):
"""
退出插件
"""
pass
| [
"{'id': 'path_list', 'placeholder': '文件路径', 'rows': 5}",
"[{'id': 'faster_whisper_model_path', 'placeholder': 'faster-whisper配置模型保存路径'}]",
"[{'id': 'asr_engine', 'options': {'whisper.cpp': 'whisper.cpp', 'faster-whisper': 'faster-whisper'}, 'default': 'whisper.cpp', 'onchange': 'AutoSub_asr_engine_change(this)'}]",
"[{'id': 'faster_whisper_model', 'options': {'tiny': 'tiny', 'tiny.en': 'tiny.en', 'base': 'base', 'base.en': 'base.en', 'small': 'small', 'small.en': 'small.en', 'medium': 'medium', 'medium.en': 'medium.en', 'large-v1': 'large-v1', 'large-v2': 'large-v2'}, 'default': 'base'}]",
"[[{'title': '媒体路径', 'required': '', 'tooltip': '要进行字幕生成的路径,每行一个路径,请确保路径正确', 'type': 'textarea', 'content': {'id': 'path_list', 'placeholder': '文件路径', 'rows': 5}}], [{'title': '文件大小(MB)', 'required': 'required', 'tooltip': '单位 MB, 大于该大小的文件才会进行字幕生成', 'type': 'text', 'content': [{'id': 'file_size', 'placeholder': '文件大小, 单位MB'}]}, {'title': 'ASR引擎', 'required': 'required', 'tooltip': '自动语音识别引擎选择', 'type': 'select', 'content': [{'id': 'asr_engine', 'options': {'whisper.cpp': 'whisper.cpp', 'faster-whisper': 'faster-whisper'}, 'default': 'whisper.cpp', 'onchange': 'AutoSub_asr_engine_change(this)'}]}]]",
"[{'id': 'additional_args', 'placeholder': '-t 4 -p 1'}]",
"[[{'title': '立即运行一次', 'required': '', 'tooltip': '打开后立即运行一次', 'type': 'switch', 'id': 'run_now'}, {'title': '翻译为中文', 'required': '', 'tooltip': '打开后将自动翻译非中文字幕,生成双语字幕,关闭后只生成英文字幕,需要配置OpenAI API Key', 'type': 'switch', 'id': 'translate_zh'}, {'title': '仅已有字幕翻译', 'required': '', 'tooltip': '打开后仅翻译已有字幕,不做语音识别,关闭后将自动识别语音并生成字幕', 'type': 'switch', 'id': 'translate_only'}], [{'title': '运行时通知', 'required': '', 'tooltip': '打开后将在单个字幕生成开始和完成后发送通知, 需要开启插件消息推送通知', 'type': 'switch', 'id': 'send_notify'}]]",
"[{'id': 'whisper_model', 'placeholder': 'whisper.cpp模型路径'}]",
"[{'id': 'whisper_main', 'placeholder': 'whisper.cpp主程序路径'}]",
"[[{'title': 'whisper.cpp路径', 'required': '', 'tooltip': '填写whisper.cpp主程序路径,如/config/plugin/autosub/main \\n推荐教程 https://ddsrem.com/autosub', 'type': 'text', 'content': [{'id': 'whisper_main', 'placeholder': 'whisper.cpp主程序路径'}]}], [{'title': 'whisper.cpp模型路径', 'required': '', 'tooltip': '填写whisper.cpp模型路径,如/config/plugin/autosub/models/ggml-base.en.bin\\n可从https://github.com/ggerganov/whisper.cpp/tree/master/models处下载', 'type': 'text', 'content': [{'id': 'whisper_model', 'placeholder': 'whisper.cpp模型路径'}]}], [{'title': '高级参数', 'tooltip': 'whisper.cpp的高级参数,请勿随意修改', 'required': '', 'type': 'text', 'content': [{'id': 'additional_args', 'placeholder': '-t 4 -p 1'}]}]]",
"[[{'title': '模型', 'required': '', 'tooltip': '选择模型后第一次运行会从Hugging Face Hub下载模型,可能需要一段时间', 'type': 'select', 'content': [{'id': 'faster_whisper_model', 'options': {'tiny': 'tiny', 'tiny.en': 'tiny.en', 'base': 'base', 'base.en': 'base.en', 'small': 'small', 'small.en': 'small.en', 'medium': 'medium', 'medium.en': 'medium.en', 'large-v1': 'large-v1', 'large-v2': 'large-v2'}, 'default': 'base'}]}], [{'title': '模型保存路径', 'required': '', 'tooltip': '配置模型保存路径,如/config/plugin/autosub/faster-whisper/models', 'type': 'text', 'content': [{'id': 'faster_whisper_model_path', 'placeholder': 'faster-whisper配置模型保存路径'}]}]]",
"[{'id': 'file_size', 'placeholder': '文件大小, 单位MB'}]"
] |
2024-01-10 | raffg/harry_potter_nlp | src~helper_functions.py | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import pandas as pd
from datetime import timedelta
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# NLTK stopwords
from nltk.corpus import stopwords
# spacy for lemmatization
import spacy
# sumy
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from sumy.summarizers.edmundson import EdmundsonSummarizer
# Download File: http://mallet.cs.umass.edu/dist/mallet-2.0.8.zip
mallet_path = '~/mallet-2.0.8/bin/mallet' # update this path
def print_time(t1, t2):
'''
'''
tm = t2 - t1
if tm > 3600:
print('Complete. Elapsed time: {}'.
format(timedelta(seconds=tm).__str__()[:-4]))
elif tm > 60:
print('Complete. Elapsed time: {}'.
format(timedelta(seconds=tm).__str__()[2:-4]))
elif tm > 10:
print('Complete. Elapsed time: {}'.
format(timedelta(seconds=tm).__str__()[5:-4]))
else:
print('Complete. Elapsed time: {}'.
format(timedelta(seconds=tm).__str__()[6:-4]))
def sent_to_words(sentences):
'''
'''
for sentence in sentences:
# deacc=True removes punctuations
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))
def text_to_words(texts):
'''
'''
return list(sent_to_words(texts))
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
'''
'''
stop_words = stopwords.words('english')
return ([[word for word
in simple_preprocess(str(doc))
if word not in stop_words] for doc in texts])
def make_bigrams(texts):
'''
'''
data_words = text_to_words(texts)
# higher threshold fewer phrases.
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
return [bigram_mod[doc] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
'''
https://spacy.io/api/annotation
'''
texts_out = []
nlp = spacy.load('en', disable=['parser', 'ner'])
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc
if token.pos_ in allowed_postags])
return texts_out
def create_id2word(data):
'''
'''
return corpora.Dictionary(data)
def create_corpus(id2word, data):
'''
'''
return [id2word.doc2bow(text) for text in data]
def compute_coherence_values(texts, start=2, stop=30, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with
respective number of topics
"""
coherence_values = []
model_list = []
id2word = create_id2word(texts)
corpus = create_corpus(id2word, texts)
for num_topics in range(start, stop, step):
print('Calculating {}-topic model'.format(num_topics))
model = gensim.models.wrappers.LdaMallet(mallet_path,
corpus=corpus,
num_topics=num_topics,
id2word=id2word)
model_list.append((num_topics, model))
coherencemodel = CoherenceModel(model=model,
texts=texts,
dictionary=id2word,
coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values, id2word, corpus
def format_topics_sentences(ldamodel, corpus, texts):
'''
'''
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(ldamodel[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['dominant_topic', 'percent_contribution', 'topic_keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
def summarize(text, sentence_count, bonus_words, language='english'):
'''
'''
summarizer = EdmundsonSummarizer(Stemmer(language))
summarizer.stop_words = get_stop_words(language)
summarizer.bonus_words = bonus_words
summarizer.stigma_words = ['zdfgthdvndadv']
summarizer.null_words = stopwords.words('english')
summary = summarizer(PlaintextParser(text, Tokenizer(language)).document, sentence_count)
return summary
| [] |
2024-01-10 | dnvgl/qats | qats~signal.py | #!/usr/bin/env python
# encoding: utf8
"""
Module with functions for signal processing.
"""
import warnings
import numpy as np
from scipy.fftpack import fft, ifft, irfft, rfft
from scipy.signal import butter
from scipy.signal import coherence as spcoherence
from scipy.signal import csd as spcsd
from scipy.signal import filtfilt, sosfiltfilt, welch
def extend_signal_ends(x: np.ndarray, n: int) -> np.ndarray:
"""Extend the signal ends with `n` values to mitigate the edge effect.
Parameters
----------
x : array_like
Signal
n : int
Number of values prepended and appended to signal.
Notes
-----
At each end of the signal `n` values of the signal are replicated, flipped and joined with the signal to maintain
continuity in the signal level and slope at the joining points. This should mitigate end effects when filterin
the signal.
The original signal is retrieved as `x[n:-n:1]`.
"""
start = 2. * x[0] - 1. * x[n:0:-1]
end = 2. * x[-1] - 1. * x[-2:-(n + 2):-1]
return np.concatenate((start, x, end))
def smooth(x: np.ndarray, window_len: int = 11, window: str = 'rectangular', mode: str = 'same') -> np.ndarray:
"""
Smooth time serie based on convolution of a window function and the time serie.
Parameters
----------
x : array
The input signal.
window_len : int, optional
The dimension of the smoothing window.
window : {'rectangular', 'hanning', 'hamming', 'bartlett', 'blackman'}, optional
The type of window. Rectangular window will produce a moving average smoothing.
mode : {‘same’, ‘valid’, ‘full’}, optional
full:
This returns the convolution at each point of overlap, with an output
shape of (N+M-1,). At the end-points of the convolution, the signals
do not overlap completely, and boundary effects may be seen.
same:
By default mode is 'same' which returns output of length max(M, N).
Boundary effects are still visible.
valid:
Mode valid returns output of length max(M, N) - min(M, N) + 1. The
convolution product is only given for points where the signals overlap
completely. Values outside the signal boundary have no effect
Returns
-------
array
The smoothed signal.
Notes
-----
This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing
reflected copies of the signal (with the window size) in both ends so that transient parts are minimized
in the beginning and end of the output signal.
Examples
--------
>>> from numpy import linspace
>>> from numpy.random import randn
>>> t = linspace(-2,2,0.1)
>>> x = sin(t)+randn(len(t))*0.1
>>> y = smooth(x)
References
----------
1. Wikipedia, http://en.wikipedia.org/wiki/Convolution
See Also
--------
numpy.convolve
"""
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ('rectangular', 'hanning', 'hamming', 'bartlett', 'blackman'):
raise ValueError("Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'".format(
*('rectangular', 'hanning', 'hamming', 'bartlett', 'blackman')))
if window == 'rectangular': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
if mode == 'valid':
s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
y = np.convolve(w / w.sum(), s, mode=mode)
else:
s = np.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]]
y = np.convolve(w / w.sum(), s, mode=mode)
y = y[window_len - 1:-window_len + 1]
return y
def taper(x: np.ndarray, window: str = 'tukey', alpha: float = 0.001) -> (np.ndarray, float):
"""
Taper the input time serie using a window function
Parameters
----------
x : array
Time series (without time vector), dimension `n*1`.
window : {'tukey','cosine','hanning', 'flat', ....}
Window function type. See numpy documentation for more windows
alpha : float, optional
Fraction of time domain signal to be tapered. Applies only
to tukey and kaiser windows.
Returns
-------
array
Tapered time domain signal
float
correction factor to prevent FFT components from diminishing after the windowing.
Notes
-----
All FFT based measurements assume that the signal is periodic in the time frame. When the measured signal is
not periodic then leakage occurs. Leakage results in misleading information about the spectral amplitude and
frequency. A window is shaped so that it is exactly zero at the beginning and end of the data block and has
some special shape in between. This function is then multiplied with the time data block forcing the signal to be
periodic and ultimately reduces the effects of leakage. There are many windows to choose from, each with advantages
for specific applications. You must understand the effects of leakage and know the tradeoffs and advantages of the
various windowing functions to accurately interpret frequency domain measurements.
The cosine window is also known as the sine window.
The Tukey window is also known as the tapered cosine window.
See Also
--------
numpy.bartlett, numpy.blackman, numpy.hamming, numpy.hanning, numpy.kaiser
References
----------
1. Wikipedia, http://en.wikipedia.org/wiki/Window_function
2. Melbourne G. Briscoe (1972), Energy loss in surface wave spectra due to data windowing, North Atlantic Treaty Organization (NATO), Saclant ASW Research Centre,
"""
window_len = np.size(x)
window = window.lower()
# choice of window function
if window == 'rectangular':
w = np.ones(window_len)
elif window == 'tukey':
# alpha = 0 - rectangular window, alpha - Hann window
w = np.zeros(window_len)
for i in range(window_len):
if (i >= 0) & (i < alpha * window_len / 2):
w[i] = 0.5 * (1 + np.cos(np.pi * (2 * i / (alpha * window_len) - 1)))
if (i >= alpha * window_len / 2) & (i <= window_len * (1 - alpha / 2)):
w[i] = 1
if (i > window_len * (1 - alpha / 2)) & (i <= window_len):
w[i] = 0.5 * (1 + np.cos(np.pi * (2 * i / (alpha * window_len) - 2 / alpha + 1)))
elif window == 'cosine':
# also known as sine window
n = np.arange(window_len)
w = np.sin(np.pi * n / (window_len - 1))
elif window == 'kaiser':
w = eval('np.' + window + '(window_len,alpha)')
else:
w = eval('np.' + window + '(window_len)')
# calculate tapered time series
y = x * w
# calculate weighting factor that should be applied so that the correct FFT signal amplitude level is recovered
# after the windowing.
wcorr = np.sum(w ** 2) / window_len
return y, wcorr
def lowpass(x: np.ndarray, dt: float, fc: float, order: int = 5) -> np.ndarray:
"""
Low pass filter data signal x at cut off frequency fc, blocking harmonic content above fc.
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
fc : float
Cut off frequency (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.filtfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = fc / nyq # normalized cut off frequency
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, x)
return y
def highpass(x: np.ndarray, dt: float, fc: float, order: int = 5) -> np.ndarray:
"""
High pass filter data signal x at cut off frequency fc, blocking harmonic content below fc.
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
fc : float
Cut off frequency (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.filtfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = fc / nyq # normalized cut off frequency
b, a = butter(order, normal_cutoff, btype='highpass', analog=False)
y = filtfilt(b, a, x)
return y
def bandpass(x: np.ndarray, dt: float, flow: float, fupp: float, order: int = 5) -> np.ndarray:
"""
Band pass filter data signal x at cut off frequencies flow and fupp, blocking harmonic content outside the
frequency band [flow, fupp]
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
flow, fupp : float
Passing frequency band (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.sosfiltfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = (flow / nyq, fupp / nyq) # normalized cut off frequencies
sos = butter(order, normal_cutoff, btype='bandpass', analog=False, output='sos')
y = sosfiltfilt(sos, x)
return y
def bandblock(x: np.ndarray, dt: float, flow: float, fupp: float, order: int = 5) -> np.ndarray:
"""
Band block filter data signal x at cut off frequencies flow and fupp, blocking harmonic content inside the
frequency band [flow, fupp]
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
flow, fupp : float
Blocked frequency band (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
Notes
-----
SciPy bandpass/bandstop filters designed with b, a are unstable and may result in erroneous filters at higher
filter orders. Here we use sos (second-order sections) output of filter design instead.
See Also
--------
scipy.signal.butter, scipy.signal.sosfiltfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = (flow / nyq, fupp / nyq) # normalized cut off frequencies
sos = butter(order, normal_cutoff, btype='bandstop', analog=False, output='sos')
y = sosfiltfilt(sos, x)
return y
def threshold(x: np.ndarray, thresholds: tuple) -> np.ndarray:
"""
Allow only frequency components whose amplitudes are between the lower threshold value and the upper threshold
value to pass.
Parameters
----------
x : array_like
input data signal
thresholds : tuple
passing amplitude range, thresholds as fraction of maximum frequency component amplitude
Returns
-------
array
filtered data signal
Notes
-----
FFT filter.
See Also
--------
scipy.fftpack
"""
real_signal = np.all(np.isreal(x))
n = x.size
nfft = int(pow(2, np.ceil(np.log(n) / np.log(2))))
lth, uth = thresholds # unpack lower and upper thresholds of passing range
if real_signal:
fa = rfft(x, nfft)
h = np.zeros(np.shape(fa))
h[(lth*max(abs(fa)) < abs(fa)) & (abs(fa) <= uth*max(abs(fa)))] = 1.0
x1 = irfft(fa * h, nfft)
else:
fa = fft(x, nfft)
h = np.zeros(np.shape(fa))
h[(lth*max(abs(fa)) < abs(fa)) & (abs(fa) <= uth*max(abs(fa)))] = 1.0
x1 = ifft(fa * h, nfft)
return x1[:n]
def autocorrelation(series: np.ndarray) -> list:
"""
Estimation of the auto-correlation coefficients of *series*
Parameters
----------
series : array_like
data series
Returns
-------
list
arrays of autocorrelation coefficients for the entire *series* for lags in the range [dt, dt, duration]
Notes
-----
I took a part of code from pandas autocorrelation_plot() function. I checked the answers and the values are
matching exactly.
The auto-correlation coefficients can be plotted against the time vector associated with series.
References
----------
1. Wikipedia, http://en.wikipedia.org/wiki/Autocorrelation
"""
n = len(series)
data = np.asarray(series)
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
"""
Calculation of autocorrelation coefficients for lag *h*
Parameters
----------
h : float
lag
Returns
-------
array
autocorrelation coefficients for lag *h*
"""
acf_lag = ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
return round(acf_lag, 3)
x = np.arange(n) # Avoiding lag 0 calculation
return list(map(r, x))
def average_frequency(t: np.ndarray, x: np.ndarray, up: bool = True) -> float:
"""
Average frequency of mean level crossings.
Parameters
----------
t : array_like
Time (seconds).
x : array_like
Signal.
up : bool, optional
- True: Period based on average time between up-crossings
- False: Period based on average time between down-crossings
Returns
-------
float
Average frequency of mean level crossings (Hz)
"""
# remove mean value from time series
x_ = x - np.mean(x)
if up:
crossings = 1 * (x_ > 0.)
indicator = 1
else:
crossings = 1 * (x_ < 0.)
indicator = -1
crossings = np.diff(crossings) # array with value=1 at position of each up-crossing and -1 at each down-crossing
crossings[crossings != indicator] = 0 # remove crossings with opposite direction
i = np.where(crossings == indicator)[0] + 1 # indices for crossings
d = (t[i[-1]] - t[i[0]]) / (np.abs(np.sum(crossings)) - 1) # duration between first and last crossing
return 1./d
def find_maxima(x, local: bool = False, threshold: float = None, up: bool = True) -> (np.ndarray, np.ndarray):
"""
Return sorted maxima
Parameters
----------
x : array
Signal.
local : bool, optional
If True, local maxima are also included (see notes below). Default is to include only global maxima.
threshold : float, optional
Include only maxima larger than specified treshold. Default is not to remove any of the maxima identified.
up : bool, optional
This parameter is deprecated and has no effect on the peak/maxima identification.
Returns
-------
array
Signal maxima, sorted from smallest to largest.
array
Indices of signal maxima.
Notes
-----
By default only 'global' maxima are considered, i.e. the largest maximum between each mean-level up-crossing.
If ``local=True``, local maxima are also included (first derivative is zero, second derivative is negative).
*Changes from version 4.12.*
When extracting global maxima (``local=False``, the default), positive half-cycles at the beginning or end of the
time series are included. For example, if there is a mean-level up-crossing before the first down-crossing, the
maximum value between these crossings is included. Similarly, if there is a down-crossing after the last
up-crossing, the maximum value between these crossings is included. This also implies that there is no difference
in considering mean level up-crossings (`up=True`) vs. down-crossings (`up=False`). The `up` parameter is
therefore deprecated.
Examples
--------
Extract global maxima from time series signal `x`:
>>> maxima, _ = find_maxima(x)
Extract global maxima and corresponding indices:
>>> maxima, indices = find_maxima(x)
Assuming `time` is the time vector (numpy array) for signal `x`, the following example will provide an array of
time instants associated with the maxima sample:
>>> time_maxima = time[indices]
Note that the returned maxima (and corresponding indices) are sorted in ascending order. To reverse this and obtain
an array with the maxima arranged in order of appearance, the following code may be used:
>>> indsort = np.argsort(indices)
>>> maxima = maxima[indsort]
>>> indices = indices[indsort]
>>> time_maxima = time[indices]
"""
# parameter `up` is deprecated and has no effect
if up is not True:
warnings.warn("qats.signal.find_maxima: parameter `up` is deprecated and has no effect", category=DeprecationWarning)
# remove mean value from time series to identify crossings
x_ = x - np.mean(x)
# find maxima
if not local:
# global maxima (largest between mean-level crossings)
# identify crossings
crossings = 1 * (x_ > 0.)
crossings = np.diff(crossings) # array with 1 at position of each up-crossing and -1 at each down-crossing
# get array indices for up-/down-crossings
crossing_indices_up = np.where(crossings == 1)[0] + 1 # up-crossings
crossing_indices_do = np.where(crossings == -1)[0] + 1 # down-crossings
# use up-crossings as the basis
crossing_indices = crossing_indices_up
# if there is a down-crossing after the last up-crossing, add that crossing as well
# (this is to avoid losing the last peak, and is particularly important for problems with few crossings,
# e.g., due to low-frequent oscillations)
if crossing_indices_up[-1] < crossing_indices_do[-1]:
crossing_indices = np.append(crossing_indices, crossing_indices_do[-1])
# number of crossings and number of peaks
n_crossings = crossing_indices.size
n_peaks = n_crossings - 1
# no global maxima if the signal crosses mean only once
if n_crossings < 2:
return np.array([]), np.array([], dtype=int)
# initiate arrays to be populated subsequently
maxima = np.zeros(n_peaks)
maxima_indices = np.zeros(n_peaks, dtype=int)
# loop to find max. between each up-crossing:
for j, start in enumerate(crossing_indices[:-1]):
stop = crossing_indices[j + 1]
maxima[j] = x[start:stop].max()
maxima_indices[j] = start + np.argmax(x[start:stop])
else:
# local maxima (all peaks)
ds = 1 * (np.diff(x) < 0) # zero while ascending (positive derivative) and 1 while descending
ds = np.append(ds, [0]) # lost data points when differentiating, close cycles by adding 0 at end
d2s = np.diff(ds) # equal to +/-1 at each turning point, +1 indicates maxima
d2s = np.insert(d2s, 0, [0]) # lost data points when differentiating, close cycles by adding 0 at start
maxima_indices = np.where(d2s == 1)[0] # unpack tuple returned from np.where
maxima = x[maxima_indices]
n_peaks = maxima.size
# return quickly if no peaks/local maxima were found (e.g., if time series is monotonically increasing)
if n_peaks == 0:
return np.array([]), np.array([], dtype=int)
# discard maxima lower than specified threshold
if threshold is not None:
above_threshold = (maxima >= threshold)
maxima = maxima[above_threshold]
maxima_indices = maxima_indices[above_threshold]
else:
pass
# sort ascending
ascending = np.argsort(maxima)
maxima = maxima[ascending]
maxima_indices = maxima_indices[ascending]
return maxima, maxima_indices
def psd(x: np.ndarray, dt: float, **kwargs) -> (np.ndarray, np.ndarray):
"""
Estimate power spectral density of discrete time signal X using Welch’s method.
Parameters
----------
x : array_like
Time series data.
dt : float
Time step.
kwargs : optional
See `scipy.signal.welch` documentation for available options.
Returns
-------
array
Frequencies
array
Corresponding power spectral density
Notes
-----
This function basically wraps `scipy.signal.welch` to control defaults etc.
See also
--------
scipy.signal.welch, scipy.signal.periodogram
"""
x = np.asarray(x)
# estimate psd using welch's definition
f, p = welch(x, fs=1./dt, **kwargs)
return f, p
def csd(x: np.ndarray, y: np.ndarray, dt: float, **kwargs) -> (np.ndarray, np.ndarray):
"""
Estimate cross power spectral density of discrete-time signals X and Y using Welch’s method.
Parameters
----------
x : array_like
Time series data.
y : array_like
Time series data.
dt : float
Time step.
kwargs : optional
See `scipy.signal.csd` documentation for available options.
Returns
-------
array
Frequencies
array
Corresponding cross power spectral density
Notes
-----
This function basically wraps `scipy.signal.csd` to control defaults etc.
See also
--------
scipy.signal.welch, scipy.signal.csd
"""
x = np.asarray(x)
y = np.asarray(y)
# estimate csd using welch's definition
f, p = spcsd(x, y, fs=1./dt, **kwargs)
return f, p
def coherence(x: np.ndarray, y: np.ndarray, dt: float, **kwargs) -> (np.ndarray, np.ndarray):
"""
Estimate the magnitude squared coherence estimate of discrete-time signals X and Y using Welch’s method.
Parameters
----------
x : array_like
Time series data.
y : array_like
Time series data.
dt : float
Time step.
kwargs : optional
See `scipy.signal.coherence` documentation for available options.
Returns
-------
array
Frequencies
array
Corresponding cross power spectral density
Notes
-----
This function basically wraps `scipy.signal.coherence` to control defaults etc.
See also
--------
scipy.signal.welch, scipy.signal.coherence
"""
x = np.asarray(x)
y = np.asarray(y)
# estimate coherence using welch's definition
f, c = spcoherence(x, y, fs=1./dt, **kwargs)
return f, c
def tfe(x: np.ndarray, y: np.ndarray, dt: float, clim: float = None, **kwargs) -> (np.ndarray, np.ndarray):
"""
Estimate the transfer function between two discrete-time signals X and Y using Welch’s method.
Parameters
----------
x : array_like
Time series data.
y : array_like
Time series data.
dt : float
Time step.
clim : float, optional
Discard transfer function estimates where the magnitude squared coherence estimate is below this limit.
kwargs : optional
See `scipy.signal.welch`, `scipy.signal.csd` and `scipy.signal.coherence` documentation for available options.
Returns
-------
array
Frequencies
array
Corresponding transfer function estimate
Examples
--------
Estimate the transfer function between wave elevation and vessel heave motion recorded to a file "somefile.mat".
>>> from qats import TsDB
>>> from qats.signal import tfe
>>> import matplotlib.pyplot as plt
>>>
>>> db = TsDB.fromfile("somefile.mat")
>>> wave = db.get("wave_2")
>>> heave = db.get("heave")
>>>
>>> # discard the transient signal
>>> t, w = wave.get(twin=(1000, 1e8))
>>> _, h = heave.get(twin=(1000, 1e8))
>>> dt = t[1] - t[0]
>>> # discard part of signals with poor coherence and smooth using Welch's method with 1000 values per segment.
>>> f, tf = tfe(w, h, dt, clim=0.3, nperseg=1000)
>>> # plot transfer function against period and limit to periods larger than 2 seconds (0.5Hz)
>>> plt.plot(1. / f[(0. < f) & (f <= 0.5)], abs(tf[(0. < f) & (f <= 0.5)]))
>>> plt.xlabel("Period (Hz)")
>>> plt.ylabel("Transfer function (-)")
>>> plt.grid()
>>> plt.show()
Notes
-----
For single input/single-output systems like this the transfer function is estimated as Pyx / Pxx where Pxx is the
power spectral density of x and Pyx is the complex conjugate of the cross power spectral density of x and y.
See also
--------
scipy.signal.welch, scipy.signal.coherence
"""
x = np.asarray(x)
y = np.asarray(y)
f, pxx = psd(x, dt, **kwargs)
_, pyx = csd(x, y, dt, **kwargs)
tf = pyx / pxx
if clim is not None:
_, cyx = coherence(x, y, dt, **kwargs)
return f[cyx >= clim], tf[cyx >= clim]
else:
return f, tf
| [] |
2024-01-10 | dominiwe/matrix-ai-assistant | matrix_ai_assistant~commands.py | from .log import logger_group
from logbook import Logger
import simplematrixbotlib as botlib
import nio
import markdown
import mistletoe
from . import db
import openai
import threading
import time
logger = Logger("bot.commands")
logger_group.add_logger(logger)
async def help(room_id: str, bot: botlib.Bot, command: str):
if command == 'info':
await _send_message(
room_id,
bot,
f"""
The `info` command returns some information about the bot.
Usage:
```plaintext
info
```
""",
f"""
The <code>info</code> command returns some information about the bot.<br>
Usage:<br>
<pre><code>info
</code></pre>
""")
elif command == 'list':
await _send_message(
room_id,
bot,
f"""
The `list` command lists the current sessions in this room.
Usage:
```plaintext
list
```
""",
f"""
The <code>list</code> command lists the current sessions in this room.<br>
Usage:<br>
<pre><code>list
</code></pre>
""")
elif command == 'delete':
await _send_message(
room_id,
bot,
f"""
The `delete` command deletes a session from this room.
Usage:
```plaintext
delete (session-hash)
```
""",
f"""
The <code>delete</code> command deletes a session from this room.<br>
Usage:<br>
<pre><code>delete (session-hash)
</code></pre>
""")
elif command == 'active':
await _send_message(
room_id,
bot,
f"""
The `active` command returns the hash of the currently active session in this room.
Usage:
```plaintext
active
```
""",
f"""
The <code>active</code> command returns the hash of the currently active session in this room.<br>
Usage:<br>
<pre><code>active
</code></pre>
""")
elif command == 'activate':
await _send_message(
room_id,
bot,
f"""
The `activate` command selectes the session to activate/use.
Usage:
```plaintext
activate (session-hash)
```
""",
f"""
The <code>activate</code> command selectes the session to activate/use.<br>
Usage:<br>
<pre><code>activate (session-hash)
</code></pre>
""")
elif command == 'new':
await _send_message(
room_id,
bot,
f"""
The `new` command starts a new session with the given prompt.
Usage:
```plaintext
new (prompt...)
```
""",
f"""
The <code>new</code> command starts a new session with the given prompt.<br>
Usage:<br>
<pre><code>new (prompt...)
</code></pre>
""")
else:
await _send_message(
room_id,
bot,
f"""
To see some information about this bot, use the `info` command.
This `help` command provides usage information for all the available commands.
Usage:
```plaintext
help [command]
```
Here is a list of commands:
|command|description|
|---|---|
|`help`|Returns usage information about commands.|
|`info`|Returns some information about the bot.|
|`list`|Lists the current sessions in this room.|
|`delete`|Deletes a session from this room.|
|`active`|Returns information about currently active session in this room.|
|`activate`|Activates a session for this room.|
|`new`|Creates a new session starting with the prompt following the command.|
""",
f"""
To see some information about this bot, use the <code>info</code> command.<br>
This <code>help</code> command provides usage information for all the available commands.<br>
Usage:<br>
<pre><code>help [command]
</code></pre>
Here is a list of commands:<br>
<table>
<thead>
<tr>
<th>command</th><th>description</th>
</tr>
</thead>
<tbody>
<tr><td><code>help</code></td><td>Returns usage information about commands.</td></tr>
<tr><td><code>info</code></td><td>Returns some information about the bot.</td></tr>
<tr><td><code>list</code></td><td>Lists the current sessions in this room.</td></tr>
<tr><td><code>delete</code></td><td>Deletes a session from this room.</td></tr>
<tr><td><code>active</code></td><td>Returns information about currently active session in this room.</td></tr>
<tr><td><code>activate</code></td><td>Activates a session for this room.</td></tr>
<tr><td><code>new</code></td><td>Creates a new session starting with the prompt following the command.</td></tr>
</tbody>
</table>
""")
async def info(room_id: str, bot: botlib.Bot):
await bot.api.send_markdown_message(
room_id,
f"""
This bot was created by Domi.
It brings the functionality of ChatGPT into any matrix room.
You are seeing this text because you used `info`.
To interact with the bot, you have to mention it!
To see a list of available commands, use `help`.
""",
"m.text"
)
async def list(room_id: str, bot: botlib.Bot):
result = db.get_session_list(room_id)
if result:
md_table = ''
html_table = ''
for row in result:
md_table += f"|`{row[0]}`|{row[1]}|{row[2]}|\n"
html_table += f"<tr><td><code>{row[0]}</code></td><td>{row[1]}</td><td>{row[2]}</td></tr>"
await _send_message(
room_id,
bot,
f"""
Here is a list of the current sessions in this room:
|session hash|description|timestamp|
|---|---|---|
{md_table}
""",
f"""
Here is a list of the current sessions in this room:<br>
<table>
<thead>
<tr>
<th>session hash</th><th>description</th><th>timestamp</th>
</tr>
</thead>
<tbody>
{html_table}
</tbody>
</table>
""")
else:
await _send_message(
room_id,
bot,
f"There are currently no sessions in this room...",
f"There are currently no sessions in this room...")
async def delete(room_id: str, bot: botlib.Bot, hash_part: str):
if len(hash_part) > 32:
await bot.api.send_markdown_message(room_id, "Specified hash part too long (should be under 33 chars).")
return
# Check if there are multiple session beginning with that hash part
result = db.get_sessions_by_hash(room_id, hash_part)
if len(result) > 1:
md_table = ''
html_table = ''
for row in result:
md_table += f"|`{row[0]}`|{row[1]}|{row[2]}|\n"
html_table += f"<tr><td><code>{row[0]}</code></td><td>{row[1]}</td><td>{row[2]}</td></tr>"
await _send_message(
room_id,
bot,
f"""
Could not delete specified session because there are multiple sessions starting with the same hash part.
Please specify more digits of the hash.
Here is a list of the sessions starting with the specified hash part:
|session hash|description|timestamp|
|---|---|---|
{md_table}
""",
f"""
Could not delete specified session because there are multiple sessions starting with the same hash part.<br>
Please specify more digits of the hash.<br>
Here is a list of the sessions starting with the specified hash part:<br>
<table>
<thead>
<tr>
<th>session hash</th><th>description</th><th>timestamp</th>
</tr>
</thead>
<tbody>
{html_table}
</tbody>
</table>
""")
else:
# delete the session
result = db.delete_session(room_id, hash_part)
if result[1]:
await _send_message(
room_id,
bot,
f"""
Deleted session specified by hash.
Activated session with hash `{result[1][0]}`.
Session description:
> {result[1][1]}
""",
f"""
Deleted session specified by hash.<br>
Activated session with hash <code>{result[1][0]}</code>.<br>
Session description:<br>
<blockquote>{result[1][1]}</blockquote>
""")
else:
await bot.api.send_markdown_message(
room_id,
"Deleted session specified by hash. No session activated because it was the last session of the room.")
async def active(room_id: str, bot: botlib.Bot):
result = db.get_active_session(room_id)
if result:
await _send_message(
room_id,
bot,
f"""
Active session has hash `{result[0]}`.
Session description:
> {result[1]}
""",
f"""
Active session has hash <code>{result[0]}</code>.<br>
Session description:<br>
<blockquote>{result[1]}</blockquote>
""")
else:
await bot.api.send_markdown_message(
room_id,
"No active session because room contains no sessions.")
async def activate(room_id: str, bot: botlib.Bot, hash_part: str):
if len(hash_part) > 32:
await bot.api.send_markdown_message(room_id, "Specified hash part too long (should be under 33 chars).")
return
# Check if there are multiple session beginning with that hash part
result = db.get_sessions_by_hash(room_id, hash_part)
if len(result) > 1:
md_table = ''
html_table = ''
for row in result:
md_table += f"|`{row[0]}`|{row[1]}|{row[2]}|\n"
html_table += f"<tr><td><code>{row[0]}</code></td><td>{row[1]}</td><td>{row[2]}</td></tr>"
await _send_message(
room_id,
bot,
f"""
Could not activate specified session because there are multiple sessions starting with the same hash part.
Please specify more digits of the hash.
Here is a list of the sessions starting with the specified hash part:
|session hash|description|timestamp|
|---|---|---|
{md_table}
""",
f"""
Could not activate specified session because there are multiple sessions starting with the same hash part.<br>
Please specify more digits of the hash.<br>
Here is a list of the sessions starting with the specified hash part:<br>
<table>
<thead>
<tr>
<th>session hash</th><th>description</th><th>timestamp</th>
</tr>
</thead>
<tbody>
{html_table}
</tbody>
</table>
""")
else:
# delete the session
result = db.activate_session(room_id, hash_part)
if result[1]:
await _send_message(
room_id,
bot,
f"""
Activated session with hash `{result[1][0]}`.
Session description:
> {result[1][1]}
""",
f"""
Activated session with hash <code>{result[1][0]}</code>.<br>
Session description:<br>
<blockquote>{result[1][1]}</blockquote>
""")
else:
await bot.api.send_markdown_message(
room_id,
"No session activated because there are no sessions in the room.")
async def generic(room_id: str, bot: botlib.Bot, message: str, new=False):
db.create_room_if_not_exists(room_id)
raw_conversation = db.get_or_create_conversation(room_id, message, 10, new=new)
if raw_conversation[0][0] == 0:
# last messag from assisstand. not possible.
raise Exception("At this point, the last message should be from the user...")
messages = []
for i in range(len(raw_conversation) - 1, -1, -1):
messages.append(
{
"role": "user" if raw_conversation[i][0] == 1 else "assistant",
"content": raw_conversation[i][1]
})
result = [None]
req_thread = threading.Thread(target=ai_api_request, args=(messages, result))
req_thread.start()
while req_thread.is_alive():
await bot.api.async_client.room_typing(
room_id, True, 500
)
await bot.api.async_client.room_typing(room_id, False)
resp = result[0]
if resp:
resp = resp.get("choices")[0].get("message")
if resp:
content = resp.get("content")
db.create_new_message(room_id, content, False)
await bot.api.send_markdown_message(room_id, mistletoe.markdown(content))
def ai_api_request(messages, result):
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages,
temperature = 0,
)
result[0] = response
async def new_error(room_id: str, bot: botlib.Bot):
await bot.api.send_markdown_message(room_id, "Could not create new session. Please provide a prompt.")
return
async def _room_send(room_id: str, bot: botlib.Bot, content: dict):
# CAUTION Does not support encryption...
resp = await bot.api.async_client.room_send(
room_id,
"m.room.message", # messagetype
content=content,
ignore_unverified_devices=bot.config.ignore_unverified_devices
)
return resp
async def _send_message(room_id: str, bot: botlib.Bot, message: str, formatted_message: str):
# CAUTION Does not support encryption...
resp = await _room_send(
room_id,
bot,
{
"msgtype": "m.text",
"body": message,
"format": "org.matrix.custom.html",
"formatted_body": formatted_message
}
)
if isinstance(resp, nio.RoomSendResponse):
return resp.event_id
async def _edit_message(room_id: str, bot: botlib.Bot, event_id: str, message: str, formatted_message: str):
# CAUTION Does not support encryption...
resp = await _room_send(
room_id,
bot,
{
"m.new_content": {
"msgtype": "m.text",
"body": message,
"format": "org.matrix.custom.html",
"formatted_body": markdown.markdown(message, extensions=['nl2br'])
},
"m.relates_to": {
"rel_type": "m.replace",
"event_id": event_id
},
"msgtype": "m.text",
"body": message,
"format": "org.matrix.custom.html",
"formatted_body": formatted_message
}
)
if isinstance(resp, nio.RoomSendResponse):
return resp.event_id
def _prepend_start(message: str) -> str:
return " * " + message
def _markdownify(message: str) -> str:
return markdown.markdown(message, extensions=['nl2br']) | [
"nl2br"
] |
2024-01-10 | Mkoot007/Auto-Evaluator | Auto-Evaluator~auto-evaluator.py | import os
import json
import time
from typing import List
import faiss
import pypdf
import random
import itertools
import text_utils
import pandas as pd
import altair as alt
import streamlit as st
from io import StringIO
from llama_index import Document
from langchain.llms import Anthropic
from langchain import HuggingFaceHub
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from llama_index import LangchainEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.retrievers import SVMRetriever
from langchain.chains import QAGenerationChain
from langchain.retrievers import TFIDFRetriever
from langchain.evaluation.qa import QAEvalChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from gpt_index import LLMPredictor, ServiceContext, GPTFaissIndex
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from text_utils import GRADE_DOCS_PROMPT, GRADE_ANSWER_PROMPT, GRADE_DOCS_PROMPT_FAST, GRADE_ANSWER_PROMPT_FAST, GRADE_ANSWER_PROMPT_BIAS_CHECK, GRADE_ANSWER_PROMPT_OPENAI
if "existing_df" not in st.session_state:
summary = pd.DataFrame(columns=['chunk_chars',
'overlap',
'split',
'model',
'retriever',
'embedding',
'num_neighbors',
'Latency',
'Retrieval score',
'Answer score'])
st.session_state.existing_df = summary
else:
summary = st.session_state.existing_df
@st.cache_data
def load_docs(files: List) -> str:
"""
Load docs from files
@param files: list of files to load
@return: string of all docs concatenated
"""
st.info("`Reading doc ...`")
all_text = ""
for file_path in files:
file_extension = os.path.splitext(file_path.name)[1]
if file_extension == ".pdf":
pdf_reader = pypdf.PdfReader(file_path)
file_content = ""
for page in pdf_reader.pages:
file_content += page.extract_text()
file_content = text_utils.clean_pdf_text(file_content)
all_text += file_content
elif file_extension == ".txt":
stringio = StringIO(file_path.getvalue().decode("utf-8"))
file_content = stringio.read()
all_text += file_content
else:
st.warning('Please provide txt or pdf.', icon="⚠️")
return all_text
@st.cache_data
def generate_eval(text: str, num_questions: int, chunk: int):
"""
Generate eval set
@param text: text to generate eval set from
@param num_questions: number of questions to generate
@param chunk: chunk size to draw question from in the doc
@return: eval set as JSON list
"""
st.info("`Generating eval set ...`")
n = len(text)
starting_indices = [random.randint(0, n - chunk) for _ in range(num_questions)]
sub_sequences = [text[i:i + chunk] for i in starting_indices]
chain = QAGenerationChain.from_llm(ChatOpenAI(temperature=0))
eval_set = []
for i, b in enumerate(sub_sequences):
try:
qa = chain.run(b)
eval_set.append(qa)
except:
st.warning('Error generating question %s.' % str(i + 1), icon="⚠️")
eval_set_full = list(itertools.chain.from_iterable(eval_set))
return eval_set_full
@st.cache_resource
def split_texts(text, chunk_size: int, overlap, split_method: str):
"""
Split text into chunks
@param text: text to split
@param chunk_size:
@param overlap:
@param split_method:
@return: list of str splits
"""
st.info("`Splitting doc ...`")
if split_method == "RecursiveTextSplitter":
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=overlap)
elif split_method == "CharacterTextSplitter":
text_splitter = CharacterTextSplitter(separator=" ",
chunk_size=chunk_size,
chunk_overlap=overlap)
else:
st.warning("`Split method not recognized. Using RecursiveCharacterTextSplitter`", icon="⚠️")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=overlap)
split_text = text_splitter.split_text(text)
return split_text
@st.cache_resource
def make_llm(model_version: str):
"""
Make LLM from model version
@param model_version: model_version
@return: LLN
"""
if (model_version == "gpt-3.5-turbo") or (model_version == "gpt-4"):
chosen_model = ChatOpenAI(model_name=model_version, temperature=0)
elif model_version == "anthropic":
chosen_model = Anthropic(temperature=0)
elif model_version == "flan-t5-xl":
chosen_model = HuggingFaceHub(repo_id="google/flan-t5-xl",model_kwargs={"temperature":0,"max_length":64})
else:
st.warning("`Model version not recognized. Using gpt-3.5-turbo`", icon="⚠️")
chosen_model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
return chosen_model
@st.cache_resource
def make_retriever(splits, retriever_type, embedding_type, num_neighbors, _llm):
"""
Make document retriever
@param splits: list of str splits
@param retriever_type: retriever type
@param embedding_type: embedding type
@param num_neighbors: number of neighbors for retrieval
@param _llm: model
@return: retriever
"""
st.info("`Making retriever ...`")
if embedding_type == "OpenAI":
embedding = OpenAIEmbeddings()
elif embedding_type == "HuggingFace":
embedding = HuggingFaceEmbeddings()
else:
st.warning("`Embedding type not recognized. Using OpenAI`", icon="⚠️")
embedding = OpenAIEmbeddings()
if retriever_type == "similarity-search":
try:
vector_store = FAISS.from_texts(splits, embedding)
except ValueError:
st.warning("`Error using OpenAI embeddings (disallowed TikToken token in the text). Using HuggingFace.`",
icon="⚠️")
vector_store = FAISS.from_texts(splits, HuggingFaceEmbeddings())
retriever_obj = vector_store.as_retriever(k=num_neighbors)
elif retriever_type == "SVM":
retriever_obj = SVMRetriever.from_texts(splits, embedding)
elif retriever_type == "TF-IDF":
retriever_obj = TFIDFRetriever.from_texts(splits)
elif retriever_type == "Llama-Index":
documents = [Document(t, LangchainEmbedding(embedding)) for t in splits]
llm_predictor = LLMPredictor(llm)
context = ServiceContext.from_defaults(chunk_size_limit=512, llm_predictor=llm_predictor)
d = 1536
faiss_index = faiss.IndexFlatL2(d)
retriever_obj = GPTFaissIndex.from_documents(documents, faiss_index=faiss_index, service_context=context)
else:
st.warning("`Retriever type not recognized. Using SVM`", icon="⚠️")
retriever_obj = SVMRetriever.from_texts(splits, embedding)
return retriever_obj
def make_chain(llm, retriever, retriever_type: str) -> RetrievalQA:
"""
Make chain
@param llm: model
@param retriever: retriever
@param retriever_type: retriever type
@return: chain (or return retriever for Llama-Index)
"""
st.info("`Making chain ...`")
if retriever_type == "Llama-Index":
qa = retriever
else:
qa = RetrievalQA.from_chain_type(llm,
chain_type="stuff",
retriever=retriever,
input_key="question")
return qa
def grade_model_answer(predicted_dataset: List, predictions: List, grade_answer_prompt: str) -> List:
"""
Grades the distilled answer based on ground truth and model predictions.
@param predicted_dataset: A list of dictionaries containing ground truth questions and answers.
@param predictions: A list of dictionaries containing model predictions for the questions.
@param grade_answer_prompt: The prompt level for the grading. Either "Fast" or "Full".
@return: A list of scores for the distilled answers.
"""
st.info("`Grading model answer ...`")
if grade_answer_prompt == "Fast":
prompt = GRADE_ANSWER_PROMPT_FAST
elif grade_answer_prompt == "Descriptive w/ bias check":
prompt = GRADE_ANSWER_PROMPT_BIAS_CHECK
elif grade_answer_prompt == "OpenAI grading prompt":
prompt = GRADE_ANSWER_PROMPT_OPENAI
else:
prompt = GRADE_ANSWER_PROMPT
eval_chain = QAEvalChain.from_llm(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
prompt=prompt
)
graded_outputs = eval_chain.evaluate(
predicted_dataset,
predictions,
question_key="question",
prediction_key="result"
)
return graded_outputs
def grade_model_retrieval(gt_dataset: List, predictions: List, grade_docs_prompt: str):
"""
Grades the relevance of retrieved documents based on ground truth and model predictions.
@param gt_dataset: list of dictionaries containing ground truth questions and answers.
@param predictions: list of dictionaries containing model predictions for the questions
@param grade_docs_prompt: prompt level for the grading. Either "Fast" or "Full"
@return: list of scores for the retrieved documents.
"""
st.info("`Grading relevance of retrieved docs ...`")
prompt = GRADE_DOCS_PROMPT_FAST if grade_docs_prompt == "Fast" else GRADE_DOCS_PROMPT
eval_chain = QAEvalChain.from_llm(
llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0),
prompt=prompt
)
graded_outputs = eval_chain.evaluate(
gt_dataset,
predictions,
question_key="question",
prediction_key="result"
)
return graded_outputs
def run_evaluation(chain, retriever, eval_set, grade_prompt, retriever_type, num_neighbors):
"""
Runs evaluation on a model's performance on a given evaluation dataset.
@param chain: Model chain used for answering questions
@param retriever: Document retriever used for retrieving relevant documents
@param eval_set: List of dictionaries containing questions and corresponding ground truth answers
@param grade_prompt: String prompt used for grading model's performance
@param retriever_type: String specifying the type of retriever used
@param num_neighbors: Number of neighbors to retrieve using the retriever
@return: A tuple of four items:
- answers_grade: A dictionary containing scores for the model's answers.
- retrieval_grade: A dictionary containing scores for the model's document retrieval.
- latencies_list: A list of latencies in seconds for each question answered.
- predictions_list: A list of dictionaries containing the model's predicted answers and relevant documents for each question.
"""
st.info("`Running evaluation ...`")
predictions_list = []
retrieved_docs = []
gt_dataset = []
latencies_list = []
for data in eval_set:
start_time = time.time()
if retriever_type != "Llama-Index":
predictions_list.append(chain(data))
elif retriever_type == "Llama-Index":
answer = chain.query(data["question"], similarity_top_k=num_neighbors, response_mode="tree_summarize",
use_async=True)
predictions_list.append({"question": data["question"], "answer": data["answer"], "result": answer.response})
gt_dataset.append(data)
end_time = time.time()
elapsed_time = end_time - start_time
latencies_list.append(elapsed_time)
retrieved_doc_text = ""
if retriever_type == "Llama-Index":
for i, doc in enumerate(answer.source_nodes):
retrieved_doc_text += "Doc %s: " % str(i + 1) + doc.node.text + " "
else:
docs = retriever.get_relevant_documents(data["question"])
for i, doc in enumerate(docs):
retrieved_doc_text += "Doc %s: " % str(i + 1) + doc.page_content + " "
retrieved = {"question": data["question"], "answer": data["answer"], "result": retrieved_doc_text}
retrieved_docs.append(retrieved)
answers_grade = grade_model_answer(gt_dataset, predictions_list, grade_prompt)
retrieval_grade = grade_model_retrieval(gt_dataset, retrieved_docs, grade_prompt)
return answers_grade, retrieval_grade, latencies_list, predictions_list
st.sidebar.image("img/diagnostic.jpg")
oai_api_key = st.sidebar.text_input("`OpenAI API Key:`", type="password")
ant_api_key = st.sidebar.text_input("`(Optional) Anthropic API Key:`", type="password")
hf_api_key = st.sidebar.text_input("`(Optional) HuggingFace API Token:`", type="password")
with st.sidebar.form("user_input"):
num_eval_questions = st.select_slider("`Number of eval questions`",
options=[1, 5, 10, 15, 20], value=5)
chunk_chars = st.select_slider("`Choose chunk size for splitting`",
options=[500, 750, 1000, 1500, 2000], value=1000)
overlap = st.select_slider("`Choose overlap for splitting`",
options=[0, 50, 100, 150, 200], value=100)
split_method = st.radio("`Split method`",
("RecursiveTextSplitter",
"CharacterTextSplitter"),
index=0)
model = st.radio("`Choose model`",
("gpt-3.5-turbo",
"gpt-4",
"anthropic"),
# Error raised by inference API: Model google/flan-t5-xl time out
# "flan-t5-xl"),
index=0)
retriever_type = st.radio("`Choose retriever`",
("TF-IDF",
"SVM",
"Llama-Index",
"similarity-search"),
index=3)
num_neighbors = st.select_slider("`Choose # chunks to retrieve`",
options=[3, 4, 5, 6, 7, 8])
embeddings = st.radio("`Choose embeddings`",
("HuggingFace",
"OpenAI"),
index=1)
grade_prompt = st.radio("`Grading style prompt`",
("Fast",
"Descriptive",
"Descriptive w/ bias check",
"OpenAI grading prompt"),
index=0)
submitted = st.form_submit_button("Submit evaluation")
st.header("`Auto-Evaluator`")
st.info(
"I am an evaluation tool for question-answering built on LangChain. Given documents, I will auto-generate a question-answer eval "
"set and evaluate using the selected chain settings. Experiments with different configurations are logged.")
with st.form(key='file_inputs'):
uploaded_file = st.file_uploader("`Please upload a file to evaluate (.txt or .pdf):` ",
type=['pdf', 'txt'],
accept_multiple_files=True)
uploaded_eval_set = st.file_uploader("`[Optional] Please upload eval set (.json):` ",
type=['json'],
accept_multiple_files=False)
submitted = st.form_submit_button("Submit files")
if uploaded_file and oai_api_key:
os.environ["OPENAI_API_KEY"] = oai_api_key
os.environ["ANTHROPIC_API_KEY"] = ant_api_key
os.environ["HUGGINGFACEHUB_API_TOKEN"] = hf_api_key
text = load_docs(uploaded_file)
if not uploaded_eval_set:
eval_set = generate_eval(text, num_eval_questions, 3000)
else:
eval_set = json.loads(uploaded_eval_set.read())
splits = split_texts(text, chunk_chars, overlap, split_method)
llm = make_llm(model)
retriever = make_retriever(splits, retriever_type, embeddings, num_neighbors, llm)
qa_chain = make_chain(llm, retriever, retriever_type)
graded_answers, graded_retrieval, latency, predictions = run_evaluation(qa_chain, retriever, eval_set, grade_prompt,
retriever_type, num_neighbors)
d = pd.DataFrame(predictions)
d['answer score'] = [g['text'] for g in graded_answers]
d['docs score'] = [g['text'] for g in graded_retrieval]
d['latency'] = latency
mean_latency = d['latency'].mean()
correct_answer_count = len([text for text in d['answer score'] if "INCORRECT" not in text])
correct_docs_count = len([text for text in d['docs score'] if "Context is relevant: True" in text])
percentage_answer = (correct_answer_count / len(graded_answers)) * 100
percentage_docs = (correct_docs_count / len(graded_retrieval)) * 100
st.subheader("`Run Results`")
st.info(
"`I will grade the chain based on: 1/ the relevance of the retrived documents relative to the question and 2/ "
"the summarized answer relative to the ground truth answer. You can see (and change) to prompts used for "
"grading in text_utils`")
st.dataframe(data=d, use_container_width=True)
st.subheader("`Aggregate Results`")
st.info(
"`Retrieval and answer scores are percentage of retrived documents deemed relevant by the LLM grader ("
"relative to the question) and percentage of summarized answers deemed relevant (relative to ground truth "
"answer), respectively. The size of point correponds to the latency (in seconds) of retrieval + answer "
"summarization (larger circle = slower).`")
new_row = pd.DataFrame({'chunk_chars': [chunk_chars],
'overlap': [overlap],
'split': [split_method],
'model': [model],
'retriever': [retriever_type],
'embedding': [embeddings],
'num_neighbors': [num_neighbors],
'Latency': [mean_latency],
'Retrieval score': [percentage_docs],
'Answer score': [percentage_answer]})
summary = pd.concat([summary, new_row], ignore_index=True)
st.dataframe(data=summary, use_container_width=True)
st.session_state.existing_df = summary
show = summary.reset_index().copy()
show.columns = ['expt number', 'chunk_chars', 'overlap',
'split', 'model', 'retriever', 'embedding', 'num_neighbors', 'Latency', 'Retrieval score',
'Answer score']
show['expt number'] = show['expt number'].apply(lambda x: "Expt #: " + str(x + 1))
c = alt.Chart(show).mark_circle().encode(x='Retrieval score',
y='Answer score',
size=alt.Size('Latency'),
color='expt number',
tooltip=['expt number', 'Retrieval score', 'Latency', 'Answer score'])
st.altair_chart(c, use_container_width=True, theme="streamlit")
else:
st.warning("Please input file and API key(s)!")
# st.text("MADE by US WITH HATE FOR WORLD 💗") | [
"OpenAI grading prompt",
"Descriptive",
"Descriptive w/ bias check",
"`Grading style prompt`"
] |
2024-01-10 | robotrobo/EFHackathon_medical_diagnosis | src~backend.py | from langchain.document_loaders import TextLoader, DirectoryLoader
import os
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains import StuffDocumentsChain
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.schema import SystemMessage, HumanMessage
from langchain.chains import load_chain
########################
from prompts import system_message_prompt, human_message_prompt
########################
# ChatGPT API
os.environ['OPENAI_API_KEY'] = "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"
DATASET_PATH = "../datasets/Data/Transcripts/"
VECTORSTORE_FILE_NAME = "vectorstore.faiss"
MODEL_NAME = "gpt-3.5-turbo-16k"
# Load Documents
def load_documents():
text_loader_kwargs={'autodetect_encoding': True}
loader = DirectoryLoader(DATASET_PATH,show_progress=True, use_multithreading=True, glob="*.txt", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
docs = loader.load()
print(f"successfully loaded {len(docs)} docs.")
return docs
################
# Split Documents
def split_documents(docs):
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=4000, chunk_overlap=0
)
split_documents = text_splitter.split_documents(docs)
print(f"Successfuly split documents into : {len(split_documents)} chunks.")
return split_documents
################
# Faiss vector store
def create_vector_store(split_documents):
db = FAISS.from_documents(split_documents, OpenAIEmbeddings())
db.save_local(VECTORSTORE_FILE_NAME)
return db
# query = "Hey doctor, I have a headache."
# docs = db.similarity_search(query)
# print(f"Found : {len(docs)} chunks for the query : {query}")
# print(docs[0].page_content)
# print("=====================================")
# print(docs[1].page_content)
# print("=====================================")
# print(docs[2].page_content)
# print("=====================================")
def create_chain(db):
llm = ChatOpenAI(model_name=MODEL_NAME, temperature=0)
combine_docs_chain_kwargs = {
"prompt": ChatPromptTemplate.from_messages([
system_message_prompt,
human_message_prompt,
]),
}
qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever(), combine_docs_chain_kwargs=combine_docs_chain_kwargs)
return qa
def prep_chain():
# If the vectorstore exists, just use it
if os.path.exists(VECTORSTORE_FILE_NAME):
print("Using loaded vectordb")
db = FAISS.load_local(VECTORSTORE_FILE_NAME, OpenAIEmbeddings())
else:
docs = load_documents()
split_docs = split_documents(docs)
db = create_vector_store(split_docs)
chain = create_chain(db)
return chain
def get_final_analysis(overall_analysis: dict):
final_analysis_model = ChatOpenAI(model_name="gpt-4", temperature=0)
messages = [SystemMessage(content="You are a summariser model that can summarise medical diagnoses."), HumanMessage(content=f"Summarise the given diagnostic data into a final report.\n{str(overall_analysis)}")]
return final_analysis_model(messages).content
def main():
chain = prep_chain()
query = """Hey, how is your day going
I have a headache.
How long have you had it?
For about 2 days now."""
patient_info = """
Age: 25,
Gender: Male,
Occupation: Student,
Family medical history: Unknown,
"""
result = chain({"question": query, "chat_history": [], "patient_info": patient_info})
print(result)
if __name__ == "__main__":
main() | [
"Summarise the given diagnostic data into a final report.\nPLACEHOLDER",
"[PLACEHOLDER, PLACEHOLDER]",
"You are a summariser model that can summarise medical diagnoses."
] |
2024-01-10 | tky5622/stable-dreamfusion | run_stable_dreamfusion.py | import torch
import argparse
import pandas as pd
import sys
from nerf.provider import NeRFDataset
from nerf.utils import *
# torch.autograd.set_detect_anomaly(True)
from argparse import Namespace
def main(
workspace='', #workspace path
file='',
text='',
negative='',
# iters=2000,
# lr=0.001,
# dt_lr=0.001,
# parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray")
# parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla")
test=False,
six_views=True,
eval_interval=1,
test_interval=100,
seed=None,
image=None,
image_config=None,
known_view_interval=4,
IF=True,
guidance=['SD'],
guidance_scale=100,
save_mesh=True, #"export an obj mesh with texture")
mcubes_resolutio=256, #help="mcubes resolution for extracting mesh")
decimate_target=5e4, #help="target face number for mesh decimation")
dmtet=False,
tet_grid_size=128, #help="tet grid size")
init_with='', #help="ckpt to init dmtet")
lock_geo=False, # help="disable dmtet to learn geometry")
## Perp-Neg options
perpneg=False, # help="use perp_neg")
negative_w=-2, # help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt")
front_decay_factor=2, #help="decay factor for the front prompt")
side_decay_factor=10, #help="decay factor for the side prompt")
### training options
iters=10000, #help="training iters")
lr=1e-3, #help="max learning rate")
ckpt='latest', # help="possible options are ['latest', 'scratch', 'best', 'latest_model']")
cuda_ray=False, #help="use CUDA raymarching instead of pytorch")
taichi_ray=False,
max_steps=1024, # help="max num steps sampled per ray (only valid when using --cuda_ray)")
num_steps=64, #help="num steps sampled per ray (only valid when not using --cuda_ray)")
upsample_steps=32, #help="num steps up-sampled per ray (only valid when not using --cuda_ray)")
update_extra_interval=16, #help="iter interval to update extra status (only valid when using --cuda_ray)")
max_ray_batch=4096, #help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)")
latent_iter_ratio=0.2, #help="training iters that only use albedo shading")
albedo_iter_ratio=0, #help="training iters that only use albedo shading")
min_ambient_ratio=0.1, #help="minimum ambient ratio to use in lambertian shading")
textureless_ratio=0.2, #help="ratio of textureless shading")
jitter_pose=False, #action='store_true', help="add jitters to the randomly sampled camera poses")
jitter_center=0.2, #help="amount of jitter to add to sampled camera pose's center (camera location)")
jitter_target=0.2, #help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')")
jitter_up=0.02, #help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')")
uniform_sphere_rate=0, #help="likelihood of sampling camera location uniformly on the sphere surface area")
grad_clip=-1, #help="clip grad of all grad to this limit, negative value disables it")
grad_clip_rgb=-1, #help="clip grad of rgb space grad to this limit, negative value disables it")
# model options
bg_radius=1.4, #help="if positive, use a background model at sphere(bg_radius)")
density_activation='exp',# choices=['softplus', 'exp'], help="density activation function")
density_thresh=10, #help="threshold for density grid to be occupied")
blob_density=5, #help="max (center) density for the density blob")
blob_radius=0.2, #help="control the radius for the density blob")
# network backbone
backbone='grid', #choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone")
optim='adan', #choices=['adan', 'adam'], help="optimizer")
sd_version='2.1', #choices=['1.5', '2.0', '2.1'], help="stable diffusion version")
hf_key=None, #help="hugging face Stable diffusion model key")
# try this if CUDA OOM
fp16=False, #help="use float16 for training")
vram_O=False, # help="optimization for low VRAM usage")
# rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled.
w=64, #help="render width for NeRF in training")
h=64, #help="render height for NeRF in training")
known_view_scale=1.5, #help="multiply --h/w by this for known view rendering")
known_view_noise_scale=2e-3, #help="random camera noise added to rays_o and rays_d")
dmtet_reso_scale=8, #help="multiply --h/w by this for dmtet finetuning")
batch_size=1, #help="images to render per batch using NeRF")
### dataset options
bound=1, #help="assume the scene is bounded in box(-bound, bound)")
dt_gamma=0, #help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
min_near=0.01, #help="minimum near distance for camera")
radius_range=[3.0, 3.5], #help="training camera radius range")
theta_range=[45, 105], #help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.")
phi_range=[-180, 180], #help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.")
fovy_range=[10, 30], #help="training camera fovy range")
default_radius=3.2, #help="radius for the default view")
default_polar=90, #help="polar for the default view")
default_azimuth=0, #help="azimuth for the default view")
default_fovy=20, #help="fovy for the default view")
progressive_view=False, #action='store_true', help="progressively expand view sampling range from default to full")
progressive_view_init_ratio=0.2, #help="initial ratio of final range, used for progressive_view")
progressive_level=False, #help="progressively increase gridencoder's max_level")
angle_overhead=30, #help="[0, angle_overhead] is the overhead region")
angle_front=60, #help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.")
t_range=[0.02, 0.98], #help="stable diffusion time steps range")
dont_override_stuff=False, #',action='store_true', help="Don't override t_range, etc.")
### regularizations
lambda_entropy=1e-3, #help="loss scale for alpha entropy")
lambda_opacity=0, #help="loss scale for alpha value")
lambda_orient=1e-2, #help="loss scale for orientation")
lambda_tv=0, #help="loss scale for total variation")
lambda_wd=0, #help="loss scale")
lambda_mesh_normal=0.5, #help="loss scale for mesh normal smoothness")
lambda_mesh_laplacian=0.5, #help="loss scale for mesh laplacian")
lambda_guidance=1, #help="loss scale for SDS")
lambda_rgb=1000, #help="loss scale for RGB")
lambda_mask=500, #help="loss scale for mask (alpha)")
lambda_normal=0, #help="loss scale for normal map")
lambda_depth=10, #help="loss scale for relative depth")
lambda_2d_normal_smooth=0, #help="loss scale for 2D normal image smoothness")
lambda_3d_normal_smooth=0, #help="loss scale for 3D normal image smoothness")
save_guidance=False, #action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!")
save_guidance_interval=10, #help="save guidance every X step")
gui=False, #action='store_true', help="start a GUI")
W=800, #help="GUI width")
H=800, #help="GUI height")
radius=5, #help="default GUI camera radius from center")
fovy=20, #help="default GUI camera fovy")
light_theta=60, #help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]")
light_phi=0, #help="default GUI light direction in [0, 360), azimuth")
max_spp=1, #help="GUI rendering max sample per pixel")
zero123_config='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml',#, help="config file for zero123")
zero123_ckpt='pretrained/zero123/zero123-xl.ckpt', #, help="ckpt for zero123")
zero123_grad_scale='angle', #, help="whether to scale the gradients based on 'angle' or 'None'")
dataset_size_train=100, #help="Length of train dataset i.e. # of iterations per epoch")
dataset_size_valid=8, #help="# of frames to render in the turntable video in validation")
dataset_size_test=100, #help="# of frames to render in the turntable video at test time")
exp_start_iter=None, #', te, help="start iter # for experiment, to calculate progressive_view and progressive_level")
exp_end_iter=None,#', typ help="end iter # for experiment, to calculate progressive_view and progressive_level")
# 以下に必要なすべてのパラメータを続けてください
):
opt = Namespace(
workspace=workspace, #workspace path
file=file,
text=text,
negative=negative,
# iters=iters, #help="training iterations")
# lr=lr, #help="learning rate")
# dt_lr=dt_lr, #help="dt learning rate")
# parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray")
# parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla")
test=test,
six_views=six_views,
eval_interval=eval_interval,
test_interval=test_interval,
seed=seed,
image=image,
image_config=image_config,
known_view_interval=known_view_interval,
IF=IF,
guidance=guidance,
guidance_scale=guidance_scale,
save_mesh=save_mesh, #"export an obj mesh with texture")
mcubes_resolutio=mcubes_resolutio, #help="mcubes resolution for extracting mesh")
decimate_target=decimate_target, #help="target face number for mesh decimation")
dmtet=dmtet, #help="use dmtet")
tet_grid_size=tet_grid_size, #help="tet grid size")
init_with=init_with, #help="ckpt to init dmtet")
lock_geo=lock_geo, # help="disable dmtet to learn geometry")
## Perp-Neg options
perpneg=perpneg, # help="use perp_neg")
negative_w=negative_w, # help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt")
front_decay_factor=front_decay_factor, #help="decay factor for the front prompt")
side_decay_factor=side_decay_factor, #help="decay factor for the side prompt")
### training options
iters=iters, #help="training iters")
lr=lr, #help="max learning rate")
ckpt=ckpt, # help="possible options are ['latest', 'scratch', 'best', 'latest_model']")
cuda_ray=cuda_ray, #help="use CUDA raymarching instead of pytorch")
taichi_ray=taichi_ray, #help="use taichi raymarching instead of pytorch")
max_steps=max_steps, # help="max num steps sampled per ray (only valid when using --cuda_ray)")
num_steps=num_steps, #help="num steps sampled per ray (only valid when not using --cuda_ray)")
upsample_steps=upsample_steps, #help="num steps up-sampled per ray (only valid when not using --cuda_ray)")
update_extra_interval=update_extra_interval, #help="iter interval to update extra status (only valid when using --cuda_ray)")
max_ray_batch=max_ray_batch, #help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)")
latent_iter_ratio=latent_iter_ratio, #help="training iters that only use albedo shading")
albedo_iter_ratio=albedo_iter_ratio, #help="training iters that only use albedo shading")
min_ambient_ratio=min_ambient_ratio, #help="minimum ambient ratio to use in lambertian shading")
textureless_ratio=textureless_ratio, #help="ratio of textureless shading")
jitter_pose=jitter_pose, #action='store_true', help="add jitters to the randomly sampled camera poses")
jitter_center=jitter_center, #help="amount of jitter to add to sampled camera pose's center (camera location)")
jitter_target=jitter_target, #help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')")
jitter_up=jitter_up, #help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')")
uniform_sphere_rate=uniform_sphere_rate, #help="likelihood of sampling camera location uniformly on the sphere surface area")
grad_clip=grad_clip, #help="clip grad of all grad to this limit, negative value disables it")
grad_clip_rgb=grad_clip_rgb, #help="clip grad of rgb space grad to this limit, negative value disables it")
# model options
bg_radius=bg_radius, #help="if positive, use a background model at sphere(bg_radius)")
density_activation=density_activation,# choices=['softplus', 'exp'], help="density activation function")
density_thresh=density_thresh, #help="threshold for density grid to be occupied")
blob_density=blob_density, #help="max (center) density for the density blob")
blob_radius=blob_radius, #help="control the radius for the density blob")
# network backbone
backbone=backbone, #choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone")
optim=optim, #choices=['adan', 'adam'], help="optimizer")
sd_version=sd_version, #choices=['1.5', '2.0', '2.1'], help="stable diffusion version")
hf_key=hf_key, #help="hugging face Stable diffusion model key")
# try this if CUDA OOM
fp16=fp16, #help="use float16 for training")
vram_O=vram_O, # help="optimization for low VRAM usage")
# rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled.
w=w, #help="render width for NeRF in training")
h=h, #help="render height for NeRF in training")
known_view_scale=known_view_scale, #help="multiply --h/w by this for known view rendering")
known_view_noise_scale=known_view_noise_scale, #help="random camera noise added to rays_o and rays_d")
dmtet_reso_scale=dmtet_reso_scale, #help="multiply --h/w by this for dmtet finetuning")
batch_size=batch_size, #help="images to render per batch using NeRF")
### dataset options
bound=bound, #help="assume the scene is bounded in box(-bound, bound)")
dt_gamma=dt_gamma , #help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
min_near=min_near, #help="minimum near distance for camera")
radius_range=radius_range, #help="training camera radius range")
theta_range=theta_range, #help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.")
phi_range=phi_range, #help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.")
fovy_range=fovy_range, #help="training camera fovy range")
default_radius=default_radius, #help="radius for the default view")
default_polar=default_polar, #help="polar for the default view")
default_azimuth=default_azimuth, #help="azimuth for the default view")
default_fovy=default_fovy, #help="fovy for the default view")
progressive_view=progressive_view, #action='store_true', help="progressively expand view sampling range from default to full")
progressive_view_init_ratio=progressive_view_init_ratio, #help="initial ratio of final range, used for progressive_view")
progressive_level=progressive_level, #help="progressively increase gridencoder's max_level")
angle_overhead=angle_overhead, #help="[0, angle_overhead] is the overhead region")
angle_front=angle_front, #help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.")
t_range=t_range, #help="stable diffusion time steps range")
dont_override_stuff=dont_override_stuff, #',action='store_true', help="Don't override t_range, etc.")
### regularizations
lambda_entropy=lambda_entropy, #help="loss scale for alpha entropy")
lambda_opacity=lambda_opacity, #help="loss scale for alpha value")
lambda_orient=lambda_orient, #help="loss scale for orientation")
lambda_tv=lambda_tv, #help="loss scale for total variation")
lambda_wd=lambda_wd, #help="loss scale")
lambda_mesh_normal=lambda_mesh_normal, #help="loss scale for mesh normal smoothness")
lambda_mesh_laplacian=lambda_mesh_laplacian, #help="loss scale for mesh laplacian")
lambda_guidance=lambda_guidance, #help="loss scale for SDS")
lambda_rgb=lambda_rgb, #help="loss scale for RGB")
lambda_mask=lambda_mask, #help="loss scale for mask (alpha)")
lambda_normal=lambda_normal, #help="loss scale for normal map")
lambda_depth=lambda_depth, #help="loss scale for relative depth")
lambda_2d_normal_smooth=lambda_2d_normal_smooth, #help="loss scale for 2D normal image smoothness")
lambda_3d_normal_smooth=lambda_3d_normal_smooth, #help="loss scale for 3D normal image smoothness")
save_guidance=save_guidance, #action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!")
save_guidance_interval=save_guidance_interval, #help="save guidance every X step")
gui=gui, #action='store_true', help="start a GUI")
W=W, #help="GUI width")
H=H, #help="GUI height")
radius=radius, #help="default GUI camera radius from center")
fovy=fovy, #help="default GUI camera fovy")
light_theta=light_theta, #help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]")
light_phi=light_phi, #help="default GUI light direction in [0, 360), azimuth")
max_spp=max_spp, #help="GUI rendering max sample per pixel")
zero123_config=zero123_config,
zero123_ckpt=zero123_ckpt,
zero123_grad_scale=zero123_grad_scale, #, help="whether to scale the gradients based on 'angle' or 'None'")
dataset_size_train=dataset_size_train, #help="Length of train dataset i.e. # of iterations per epoch")
dataset_size_valid=dataset_size_valid, #help="# of frames to render in the turntable video in validation")
dataset_size_test=dataset_size_test, #help="# of frames to render in the turntable video at test time")
exp_start_iter=exp_start_iter, #', te, help="start iter # for experiment, to calculate progressive_view and progressive_level")
exp_end_iter=exp_end_iter,#', typ help="end iter # for experiment, to calculate progressive_view and progressive_level")
)
# 以下のコードはそのまま残します...
# See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre
class LoadFromFile (argparse.Action):
def __call__ (self, parser, namespace, values, option_string = None):
with values as f:
# parse arguments in the file and store them in the target namespace
parser.parse_args(f.read().split(), namespace)
parser = argparse.ArgumentParser()
opt = parser.parse_args()
if opt.O:
opt.fp16 = True
opt.cuda_ray = True
elif opt.O2:
opt.fp16 = True
opt.backbone = 'vanilla'
opt.progressive_level = True
if opt.IF:
if 'SD' in opt.guidance:
opt.guidance.remove('SD')
opt.guidance.append('IF')
opt.latent_iter_ratio = 0 # must not do as_latent
opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], []
opt.default_zero123_w = 1
opt.exp_start_iter = opt.exp_start_iter or 0
opt.exp_end_iter = opt.exp_end_iter or opt.iters
# parameters for image-conditioned generation
if opt.image is not None or opt.image_config is not None:
if opt.text is None:
# use zero123 guidance model when only providing image
opt.guidance = ['zero123']
if not opt.dont_override_stuff:
opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov
opt.guidance_scale = 5
opt.lambda_3d_normal_smooth = 10
else:
# use stable-diffusion when providing both text and image
opt.guidance = ['SD', 'clip']
if not opt.dont_override_stuff:
opt.guidance_scale = 10
opt.t_range = [0.2, 0.6]
opt.known_view_interval = 2
opt.lambda_3d_normal_smooth = 20
opt.bg_radius = -1
# smoothness
opt.lambda_entropy = 1
opt.lambda_orient = 1
# latent warmup is not needed
opt.latent_iter_ratio = 0
if not opt.dont_override_stuff:
opt.albedo_iter_ratio = 0
# make shape init more stable
opt.progressive_view = True
opt.progressive_level = True
if opt.image is not None:
opt.images += [opt.image]
opt.ref_radii += [opt.default_radius]
opt.ref_polars += [opt.default_polar]
opt.ref_azimuths += [opt.default_azimuth]
opt.zero123_ws += [opt.default_zero123_w]
if opt.image_config is not None:
# for multiview (zero123)
conf = pd.read_csv(opt.image_config, skipinitialspace=True)
opt.images += list(conf.image)
opt.ref_radii += list(conf.radius)
opt.ref_polars += list(conf.polar)
opt.ref_azimuths += list(conf.azimuth)
opt.zero123_ws += list(conf.zero123_weight)
if opt.image is None:
opt.default_radius = opt.ref_radii[0]
opt.default_polar = opt.ref_polars[0]
opt.default_azimuth = opt.ref_azimuths[0]
opt.default_zero123_w = opt.zero123_ws[0]
# reset to None
if len(opt.images) == 0:
opt.images = None
# default parameters for finetuning
if opt.dmtet:
opt.h = int(opt.h * opt.dmtet_reso_scale)
opt.w = int(opt.w * opt.dmtet_reso_scale)
opt.known_view_scale = 1
if not opt.dont_override_stuff:
opt.t_range = [0.02, 0.50] # ref: magic3D
if opt.images is not None:
opt.lambda_normal = 0
opt.lambda_depth = 0
if opt.text is not None and not opt.dont_override_stuff:
opt.t_range = [0.20, 0.50]
# assume finetuning
opt.latent_iter_ratio = 0
opt.albedo_iter_ratio = 0
opt.progressive_view = False
# opt.progressive_level = False
# record full range for progressive view expansion
if opt.progressive_view:
if not opt.dont_override_stuff:
# disable as they disturb progressive view
opt.jitter_pose = False
opt.uniform_sphere_rate = 0
# back up full range
opt.full_radius_range = opt.radius_range
opt.full_theta_range = opt.theta_range
opt.full_phi_range = opt.phi_range
opt.full_fovy_range = opt.fovy_range
if opt.backbone == 'vanilla':
from nerf.network import NeRFNetwork
elif opt.backbone == 'grid':
from nerf.network_grid import NeRFNetwork
elif opt.backbone == 'grid_tcnn':
from nerf.network_grid_tcnn import NeRFNetwork
elif opt.backbone == 'grid_taichi':
opt.cuda_ray = False
opt.taichi_ray = True
import taichi as ti
from nerf.network_grid_taichi import NeRFNetwork
taichi_half2_opt = True
taichi_init_args = {"arch": ti.cuda, "device_memory_GB": 4.0}
if taichi_half2_opt:
taichi_init_args["half2_vectorization"] = True
ti.init(**taichi_init_args)
else:
raise NotImplementedError(f'--backbone {opt.backbone} is not implemented!')
print(opt)
if opt.seed is not None:
seed_everything(int(opt.seed))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeRFNetwork(opt).to(device)
if opt.dmtet and opt.init_with != '':
if opt.init_with.endswith('.pth'):
# load pretrained weights to init dmtet
state_dict = torch.load(opt.init_with, map_location=device)
model.load_state_dict(state_dict['model'], strict=False)
if opt.cuda_ray:
model.mean_density = state_dict['mean_density']
model.init_tet()
else:
# assume a mesh to init dmtet (experimental, not working well now!)
import trimesh
mesh = trimesh.load(opt.init_with, force='mesh', skip_material=True, process=False)
model.init_tet(mesh=mesh)
print(model)
if opt.six_views:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1)
trainer.test(test_loader, write_video=False)
if opt.save_mesh:
trainer.save_mesh()
elif opt.test:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer)
gui.render()
else:
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
trainer.test(test_loader)
if opt.save_mesh:
trainer.save_mesh()
else:
train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader()
if opt.optim == 'adan':
from optimizer import Adan
# Adan usually requires a larger LR
optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False)
else: # adam
optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15)
if opt.backbone == 'vanilla':
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
else:
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed
# scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
guidance = nn.ModuleDict()
if 'SD' in opt.guidance:
from guidance.sd_utils import StableDiffusion
guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range)
if 'IF' in opt.guidance:
from guidance.if_utils import IF
guidance['IF'] = IF(device, opt.vram_O, opt.t_range)
if 'zero123' in opt.guidance:
from guidance.zero123_utils import Zero123
guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt)
if 'clip' in opt.guidance:
from guidance.clip_utils import CLIP
guidance['clip'] = CLIP(device)
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True)
trainer.default_view_data = train_loader._data.get_default_view_data()
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()
else:
valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1)
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32)
trainer.train(train_loader, valid_loader, test_loader, max_epoch)
if opt.save_mesh:
trainer.save_mesh()
| [] |
2024-01-10 | Chiritover/2023FdDataMining | hallucination_snowball~try_senator.py | import json
import os
import openai
import time
import re
data_path = 'data/senator_search.json'
with open(data_path, 'r') as f:
data = json.load(f)
print(len(data))
openai_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_key
for questions in data:
try:
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo-0613',
messages = [{'role':'user','content':questions}],
)
except:
time.sleep(60)
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo-0613',
messages = [{'role':'user','content':questions}],
)
answer = completion.choices[0].message['content']
# 将answer写入json文件
with open('./result/senator_answer_original_gpt3.5_0613.json', 'a+') as f:
query = {'question':questions, 'answer':answer}
b = json.dumps(query)
f.write(b)
f.write('\n') | [] |
2024-01-10 | Chiritover/2023FdDataMining | hallucination_selfcheck~selfcheck.py | import openai
import torch
import spacy
import numpy as np
from typing import List
import bert_score
import re
import time
from transformers import logging
logging.set_verbosity_warning()
logging.set_verbosity_error()
# from selfcheckgpt.modeling_selfcheck import SelfCheckBERTScore
def expand_list1(mylist, num):
expanded = []
for x in mylist:
for _ in range(num):
expanded.append(x)
return expanded
def expand_list2(mylist, num):
expanded = []
for _ in range(num):
for x in mylist:
expanded.append(x)
return expanded
class SelfCheckBERTScore:
"""
SelfCheckGPT (BERTScore variant): Checking LLM's text against its own sampled texts via BERTScore (against best-matched sampled sentence)
"""
def __init__(self, default_model="en"):
if default_model == 'zh':
self.nlp = spacy.load("zh_core_web_sm")
elif default_model == 'en':
self.nlp = spacy.load("en_core_web_sm")
self.default_model = default_model # en => roberta-large
print("SelfCheck-BERTScore initialized")
@torch.no_grad()
def predict(
self,
sentences: List[str],
sampled_passages: List[str],
):
"""
This function takes sentences (to be evaluated) with sampled passages (evidence), and return sent-level scores
:param sentences: list[str] -- sentences to be evaluated, e.g. GPT text response spilt by spacy
:param sampled_passages: list[str] -- stochastically generated responses (without sentence splitting)
:return sent_scores: sentence-level score which is 1.0 - bertscore
"""
num_sentences = len(sentences)
num_samples = len(sampled_passages)
bertscore_array = np.zeros((num_sentences, num_samples))
for s in range(num_samples):
sample_passage = sampled_passages[s]
sentences_sample = [sent for sent in self.nlp(
sample_passage).sents] # List[spacy.tokens.span.Span]
sentences_sample = [sent.text.strip()
for sent in sentences_sample]
num_sentences_sample = len(sentences_sample)
# r1,r1,r1,....
refs = expand_list1(sentences, num_sentences_sample)
# s1,s2,s3,...
cands = expand_list2(sentences_sample, num_sentences)
P, R, F1 = bert_score.score(
cands, refs, lang=self.default_model, verbose=False)
F1_arr = F1.reshape(num_sentences, num_sentences_sample)
F1_arr_max_axis1 = F1_arr.max(axis=1).values
F1_arr_max_axis1 = F1_arr_max_axis1.numpy()
bertscore_array[:, s] = F1_arr_max_axis1
bertscore_mean_per_sent = bertscore_array.mean(axis=-1)
one_minus_bertscore_mean_per_sent = 1.0 - bertscore_mean_per_sent
return one_minus_bertscore_mean_per_sent
openai.api_key = "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"
def chat_gpt(prompt):
# 调用 ChatGPT 接口
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
except:
time.sleep(60)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
response = completion.choices[0].message['content']
# remove_chars = '[.。]+'
# response = re.sub(remove_chars, "", response)
return response.replace("\n", " ").strip()
def selfcheck(prompt):
answer = chat_gpt(prompt)
samples = []
for _ in range(10):
sample = chat_gpt(prompt)
samples.append(sample)
nlp = spacy.load("en_core_web_sm")
sentences = [sent for sent in nlp(answer).sents]
sentences = [sent.text.strip() for sent in sentences]
selfcheck_bertscore = SelfCheckBERTScore(default_model='en')
sent_scores_bertscore = selfcheck_bertscore.predict(
sentences,
samples,
)
# print("BERTScore:")
sum = 0
num = 0
for s1 in sent_scores_bertscore:
num += 1
sum += s1
# print("{:.4f}".format(s1))
return sum/num
if __name__ == "__main__":
prompt = "What is the result of 8514 multiplied by 3978?"
score = selfcheck(prompt)
print("回答的平均得分:{:.4f}".format(score))
# answer = chat_gpt(prompt)
# samples = []
# for _ in range(10):
# sample = chat_gpt(prompt)
# samples.append(sample)
# print(len(samples))
# print("用户提问: ",prompt)
# print("LLM回答: ",answer)
# print("--------")
# for i in range(10):
# print("第{}个sample: ".format(i+1),samples[i])
# print("---------------")
# nlp = spacy.load("en_core_web_sm")
# sentences = [sent for sent in nlp(answer).sents]
# # print(sentences)
# sentences = [sent.text.strip() for sent in sentences]
# # print(sentences)
# selfcheck_bertscore = SelfCheckBERTScore(default_model='en')
# # print(1)
# sent_scores_bertscore = selfcheck_bertscore.predict(
# sentences,
# samples,
# )
# print("BERTScore:")
# for s1 in sent_scores_bertscore:
# print("{:.4f}".format(s1))
| [
"What is the result of 8514 multiplied by 3978?"
] |
2024-01-10 | Chiritover/2023FdDataMining | hallucination_snowball~try_prime.py | import json
import os
import openai
import time
import re
data_path = 'data/primality_testing.json'
with open(data_path, 'r') as f:
data = json.load(f)
print(len(data))
openai_key = os.getenv('OPENAI_API_KEY')
openai.api_key = openai_key
for questions in data:
try:
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo-0613',
messages = [{'role':'user','content':questions['question']}],
)
except:
time.sleep(60)
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo-0613',
messages = [{'role':'user','content':questions['question']}],
)
answer = completion.choices[0].message['content']
# 将answer写入json文件
with open('./result/answer_original_gpt3.5_0613.json', 'a+') as f:
query = {'question':questions['question'], 'answer':answer}
b = json.dumps(query)
f.write(b)
f.write('\n')
| [
"question"
] |
2024-01-10 | MrSean666/WPeChatGPT | WPeChatGPT.py | import functools
import idaapi
import ida_hexrays
import ida_kernwin
import idc
import openai
import re
import textwrap
import threading
import json
import sys, os
# Windows
path = os.path.dirname(os.path.abspath(__file__)) + "\\Auto-WPeGPT_WPeace\\"
# MacOS
#path = os.path.dirname(os.path.abspath(__file__)) + "/Auto-WPeGPT_WPeace/"
sys.path.append(path)
import Auto_WPeGPT
# 是否使用中文代码解释
ZH_CN = True
# Set your API key here, or put in in the OPENAI_API_KEY environment variable.
openai.api_key = "ENTER_OPEN_API_KEY_HERE"
# Set OpenAI-Proxy
#print("WPeChatGPT has appointed the proxy.")
#proxies = {'http': "http://127.0.0.1:7890", 'https': "http://127.0.0.1:7890"}
#openai.proxy = proxies
# WPeChatGPT 分析解释函数
class ExplainHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
funcComment = getFuncComment(idaapi.get_screen_ea())
if "---GPT_START---" in funcComment:
if ZH_CN:
print("当前函数已经完成过 WPeChatGPT:Explain 分析,请查看注释或删除注释重新分析。@WPeace")
else:
print("The current function has been analyzed by WPeChatGPT:Explain, please check the comment or delete the comment to re-analyze. @WPeace")
return 0
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
# 中文
if ZH_CN:
query_model_async("对下面的C语言伪代码函数进行分析,分别推测该函数的使用环境、预期目的、详细的函数功能,最后为这个函数取一个新的名字。(用简体中文回答我,并且回答开始前加上'---GPT_START---'字符串结束后加上'---GPT_END---'字符串)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=80, cmtFlag=0, printFlag=0),
0)
# English
else:
query_model_async("Analyze the following C language pseudo-code function, respectively speculate on the use environment, expected purpose, and detailed function of the function, and finally choose a new name for this function. (add '---GPT_START---' string before the beginning of the answer and add '---GPT_END---' string after the end)\n" + str(decompiler_output), functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=80, cmtFlag=0, printFlag=0), 0)
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# WPeChatGPT 重命名变量函数
class RenameHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
query_model_async("Analyze the following C function:\n" + str(decompiler_output) +
"\nSuggest better variable names, reply with a JSON array where keys are the original names"
"and values are the proposed names. Do not explain anything, only print the JSON "
"dictionary.",
functools.partial(rename_callback, address=idaapi.get_screen_ea(), view=v),
0)
return 1
# This action is always available.
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# WPeChatGPT 使用python3对函数进行还原
class PythonHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
# lastAddr 为函数的最后一行汇编代码地址
lastAddr = idc.prev_head(idc.get_func_attr(idaapi.get_screen_ea(), idc.FUNCATTR_END))
# 获取对应注释
addrComment = getAddrComment(lastAddr)
if "---GPT_Python_START---" in str(addrComment):
if ZH_CN:
print("当前函数已经完成过 WPeChatGPT:Python 分析,请查看注释或删除注释重新分析。@WPeace")
else:
print("The current function has been analyzed by WPeChatGPT:Python, please check the comment or delete the comment to re-analyze. @WPeace")
return 0
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
# 中文
if ZH_CN:
query_model_async("分析下面的C语言伪代码并用python3代码进行还原。(回答开始前加上'---GPT_Python_START---'字符串结束后加上'---GPT_Python_END---'字符串)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=lastAddr, view=v, wrapWidth=120, cmtFlag=1, printFlag=1),
0)
# English
else:
query_model_async("Analyze the following C language pseudocode and restore it with python3 code. (Add '---GPT_Python_START---' string before the beginning of the answer and add '---GPT_Python_END---' string after the end)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=lastAddr, view=v, wrapWidth=120, cmtFlag=1, printFlag=1),
0)
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# WPeChatGPT 尝试寻找函数漏洞
class FindVulnHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
funcComment = getFuncComment(idaapi.get_screen_ea())
if "---GPT_VulnFinder_START---" in funcComment:
if ZH_CN:
print("当前函数已经完成过 WPeChatGPT:VulnFinder 分析,请查看注释或删除注释重新分析。@WPeace")
else:
print("The current function has been analyzed by WPeChatGPT:VulnFinder, please check the comment or delete the comment to re-analyze. @WPeace")
return 0
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
# 中文
if ZH_CN:
query_model_async("查找下面这个C语言伪代码函数的漏洞并提出可能的利用方法。(用简体中文回答我,并且回答开始前加上'---GPT_VulnFinder_START---'字符串结束后加上'---GPT_VulnFinder_END---'字符串)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=80, cmtFlag=0, printFlag=2),
0)
# English
else:
query_model_async("Find the following C function vulnerabilty and suggest a possible way to exploit it.(Use English to answer me, and answer before plus '---GPT_VulnFinder_START---' the end of the string plus '---GPT_VulnFinder_END---' string)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=80, cmtFlag=0, printFlag=2),
0)
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# WPeChatGPT 尝试对漏洞函数生成EXP
class expCreateHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
funcComment = getFuncComment(idaapi.get_screen_ea())
if "---GPT_VulnPython_START---" in funcComment:
if ZH_CN:
print("当前函数已经完成过 WPeChatGPT:ExpCreater 分析,请查看注释或删除注释重新分析。@WPeace")
else:
print("The current function has been analyzed by WPeChatGPT:ExpCreater, please check the comment or delete the comment to re-analyze. @WPeace")
return 0
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
# 中文
if ZH_CN:
query_model_async("使用Python构造代码来利用下面函数中的漏洞。(用简体中文回答我,并且回答开始前加上'---GPT_VulnPython_START---'字符串结束后加上'---GPT_VulnPython_END---'字符串)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=120, cmtFlag=0, printFlag=3),
0)
# English
else:
query_model_async("Use Python to construct code to exploit the vulnerabilities in the following functions.(Answer before plus '---GPT_VulnPython_START---' the end of the string plus '---GPT_VulnPython_END---' string)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v, wrapWidth=120, cmtFlag=0, printFlag=3),
0)
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
def autoChatFunc(funcTree:str, strings:str, callback):
messages = []
input_funcTree = funcTree
messages.append({"role": "user", "content": input_funcTree})
input_strings = strings
messages.append({"role": "user", "content": input_strings})
if ZH_CN:
messages.append({"role": "user", "content": "结合该程序的函数调用结构及其所包含的字符串,猜测其运行目的及功能。"})
messages.append({"role": "user", "content": "请再仔细分析后告诉我该程序的运行目的及大概功能。"})
else:
messages.append({"role": "user", "content": "Combining the function call structure of the program and the strings it contains, guess its purpose and function."})
messages.append({"role": "user", "content": "Please tell me the purpose and general function of the program after careful analysis."})
t = threading.Thread(target=chat_api_worker, args=(messages, "gpt-3.5-turbo", callback))
t.start()
def chat_api_worker(messages, model, callback):
response = openai.ChatCompletion.create(messages=messages, model=model)
callback(response)
def handle_response(autoGptfolder, response):
message = response.choices[0].message
if ZH_CN:
print("GPT 分析完毕,已将结果输出到文件夹:" + autoGptfolder + " 当中!")
else:
print("The GPT analysis is complete and the result has been output to the folder: " + autoGptfolder)
fp = open(autoGptfolder + "GPT-Result.txt", "w")
fp.write(message.content)
fp.close()
print("Auto-WPeGPT finished! :)@WPeace\n")
# Auto-WPeGPT 自动化分析
class autoHandler(idaapi.action_handler_t):
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
Auto_WPeGPT.main()
idb_path = idc.get_idb_path()
idb_name = 'WPe_' + os.path.basename(idb_path)
autoGptfolder = os.path.join(os.getcwd(), idb_name) + '\\'
functreeFilepath = autoGptfolder + "funcTree.txt"
mainFunctreeFilepath = autoGptfolder + "mainFuncTree.txt"
stringsFilepath = autoGptfolder + "effectiveStrings.txt"
file = open(functreeFilepath, "r")
functreeData = file.read()
file.close()
file = open(mainFunctreeFilepath, "r")
mainFunctreeData = file.read()
file.close()
file = open(stringsFilepath, "r")
stringsData = file.read()
file.close()
funcNumber = idaapi.get_func_qty()
print("There are %d functions in total in this binary file." %funcNumber)
if funcNumber < 150:
callback_autogpt = functools.partial(handle_response, autoGptfolder)
autoChatFunc(functreeData, stringsData, callback_autogpt)
else:
callback_autogpt = functools.partial(handle_response, autoGptfolder)
autoChatFunc(mainFunctreeData, stringsData, callback_autogpt)
print("Auto-WPeGPT v0.1 start to analysis...")
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# Gepetto comment_callback Method
def comment_callback(address, view, response, wrapWidth, cmtFlag, printFlag):
"""
在对应地址处设置注释的回调函数。
:param address: The address of the function to comment
:param view: A handle to the decompiler window
:param response: The comment to add
"""
# Add newlines at the end of each sentence.
response = "\n".join(textwrap.wrap(response, width=wrapWidth, replace_whitespace=False))
# Add the response as a comment in IDA.
# 通过参数控制不同形式添加注释
if cmtFlag == 0:
idc.set_func_cmt(address, response, 0)
elif cmtFlag == 1:
idc.set_cmt(address, response, 1)
# Refresh the window so the comment is displayed properly
if view:
view.refresh_view(False)
print("gpt-3.5-turbo query finished!")
if printFlag == 0:
if ZH_CN:
print("WPeChatGPT:Explain 完成分析,已对函数 %s 进行注释。@WPeace" %idc.get_func_name(address))
else:
print("WPeChatGPT:Explain finished analyzing, function %s has been commented. @WPeace" %idc.get_func_name(address))
elif printFlag == 1:
if ZH_CN:
print("WPeChatGPT:Python 完成分析,已在函数末尾地址 %s 汇编处进行注释。@WPeace" %hex(address))
else:
print("WPeChatGPT:Python finished parsing, commented at assembly at address %s at end of function. @WPeace" %hex(address))
elif printFlag == 2:
if ZH_CN:
print("WPeChatGPT:VulnFinder 完成分析,已对函数 %s 进行注释。@WPeace" %idc.get_func_name(address))
else:
print("WPeChatGPT: VulnFinder finished analyzing, function %s has been annotated. @WPeace" %idc.get_func_name(address))
elif printFlag == 3:
if ZH_CN:
print("WPeChatGPT:ExpCreater 完成分析,已对函数 %s 进行注释。@WPeace" %idc.get_func_name(address))
else:
print("WPeChatGPT:ExpCreater finished analyzing, commented on function %s. @WPeace" %idc.get_func_name(address))
# Gepetto rename_callback Method
def rename_callback(address, view, response, retries=0):
"""
重命名函数变量的回调函数。
:param address: The address of the function to work on
:param view: A handle to the decompiler window
:param response: The response from gpt-3.5-turbo
:param retries: The number of times that we received invalid JSON
"""
j = re.search(r"\{[^}]*?\}", response)
if not j:
if retries >= 3: # Give up obtaining the JSON after 3 times.
print("ChatGPT-gpt-3.5-turbo API has no valid response, please try again later. @WPeace")
return
print(f"Cannot extract valid JSON from the response. Asking the model to fix it...")
query_model_async("The JSON document provided in this response is invalid. Can you fix it?\n" + response,
functools.partial(rename_callback,
address=address,
view=view,
retries=retries + 1),
1)
return
try:
names = json.loads(j.group(0))
except json.decoder.JSONDecodeError:
if retries >= 3: # Give up fixing the JSON after 3 times.
print("ChatGPT-gpt-3.5-turbo API has no valid response, please try again later. @WPeace")
return
print(f"The JSON document returned is invalid. Asking the model to fix it...")
query_model_async("Please fix the following JSON document:\n" + j.group(0),
functools.partial(rename_callback,
address=address,
view=view,
retries=retries + 1),
1)
return
# The rename function needs the start address of the function
function_addr = idaapi.get_func(address).start_ea
replaced = []
for n in names:
if ida_hexrays.rename_lvar(function_addr, n, names[n]):
replaced.append(n)
# Update possible names left in the function comment
comment = idc.get_func_cmt(address, 0)
if comment and len(replaced) > 0:
for n in replaced:
comment = re.sub(r'\b%s\b' % n, names[n], comment)
idc.set_func_cmt(address, comment, 0)
# Refresh the window to show the new names
if view:
view.refresh_view(True)
print("gpt-3.5-turbo query finished!")
if ZH_CN:
print(f"WPeChatGPT:RenameVariable 完成分析,已重命名{len(replaced)}个变量。@WPeace")
else:
print(f"WPeChatGPT:RenameVariable Completed analysis, renamed {len(replaced)} variables. @WPeace")
# Gepetto query_model Method
def query_model(query, cb, max_tokens=2500):
"""
向 gpt-3.5-turbo 发送查询的函数。
:param query: The request to send to gpt-3.5-turbo
:param cb: Tu function to which the response will be passed to.
"""
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": query}
]
)
ida_kernwin.execute_sync(functools.partial(cb, response=response.choices[0]["message"]["content"]), ida_kernwin.MFF_WRITE)
except openai.InvalidRequestError as e:
# Context length exceeded. Determine the max number of tokens we can ask for and retry.
m = re.search(r'maximum context length is (\d+) tokens, however you requested \d+ tokens \((\d+) in your '
r'prompt;', str(e))
if not m:
print(f"gpt-3.5-turbo could not complete the request: {str(e)}")
return
(hard_limit, prompt_tokens) = (int(m.group(1)), int(m.group(2)))
max_tokens = hard_limit - prompt_tokens
if max_tokens >= 750:
print(f"WPeChatGPT-Warning: Context length too long! Try reducing tokens to {max_tokens}...")
print("Request to gpt-3.5-turbo sent retried...")
query_model(query, cb, max_tokens)
else:
print("可惜可惜,这个函数太大了不能使用 ChatGPT-gpt-3.5-turbo API 来分析。@WPeace")
except openai.OpenAIError as e:
if "That model is currently overloaded with other requests" in str(e) or "Request timed out" in str(e):
print("ChatGPT-gpt-3.5-turbo API 繁忙,请稍后重试或检查代理。@WPeace")
else:
print(f"gpt-3.5-turbo could not complete the request: {str(e)}")
except Exception as e:
print(f"General exception encountered while running the query: {str(e)}")
# Gepetto query_model_async Method
def query_model_async(query, cb, time):
"""
创建线程调用 query_model 函数。
:param query: The request to send to gpt-3.5-turbo
:param cb: Tu function to which the response will be passed to.
:param time: whether it is a retry.
"""
if time == 0:
if ZH_CN:
print("正在发送 ChatGPT-gpt-3.5-turbo API 请求,完成后将输出提示。@WPeace")
else:
print("Sending ChatGPT-gpt-3.5-turbo API request, will output a prompt when completed. @WPeace")
print("Request to gpt-3.5-turbo sent...")
else:
if ZH_CN:
print("正在重新发送 ChatGPT-gpt-3.5-turbo API 请求。@WPeace")
else:
print("Resending ChatGPT-gpt-3.5-turbo API request. @WPeace")
t = threading.Thread(target=query_model, args=[query, cb])
t.start()
# Add context menu actions
class ContextMenuHooks(idaapi.UI_Hooks):
def finish_populating_widget_popup(self, form, popup):
idaapi.attach_action_to_popup(form, popup, myplugin_WPeChatGPT.explain_action_name, "WPeChatGPT/")
idaapi.attach_action_to_popup(form, popup, myplugin_WPeChatGPT.rename_action_name, "WPeChatGPT/")
idaapi.attach_action_to_popup(form, popup, myplugin_WPeChatGPT.python_action_name, "WPeChatGPT/")
idaapi.attach_action_to_popup(form, popup, myplugin_WPeChatGPT.vulnFinder_action_name, "WPeChatGPT/")
idaapi.attach_action_to_popup(form, popup, myplugin_WPeChatGPT.expPython_action_name, "WPeChatGPT/")
# 获取函数注释
def getFuncComment(address):
cmt = idc.get_func_cmt(address, 0)
if not cmt:
cmt = idc.get_func_cmt(address, 1)
return cmt
# 获取地址注释
def getAddrComment(address):
cmt = idc.get_cmt(address, 0)
if not cmt:
cmt = idc.get_cmt(address, 1)
return cmt
class myplugin_WPeChatGPT(idaapi.plugin_t):
autoWPeGPT_action_name = "WPeChatGPT:Auto-WPeGPT"
autoWPeGPT_menu_path = "Edit/WPeChatGPT/Auto-WPeGPT/Auto-WPeGPT v0.1"
explain_action_name = "WPeChatGPT:Explain_Function"
explain_menu_path = "Edit/WPeChatGPT/函数分析"
rename_action_name = "WPeChatGPT:Rename_Function"
rename_menu_path = "Edit/WPeChatGPT/重命名函数变量"
python_action_name = "WPeChatGPT:Python_Function"
python_menu_path = "Edit/WPeChatGPT/Python还原此函数"
vulnFinder_action_name = "WPeChatGPT:VulnFinder_Function"
vulnFinder_menu_path = "Edit/WPeChatGPT/二进制漏洞查找"
expPython_action_name = "WPeChatGPT:VulnPython_Function"
expPython_menu_path = "Edit/WPeChatGPT/尝试生成Exploit"
wanted_name = 'WPeChatGPT'
wanted_hotkey = ''
comment = "WPeChatGPT Plugin for IDA"
help = "Find more information at https://github.com/wpeace-hch"
menu = None
flags = 0
def init(self):
# Check whether the decompiler is available
if not ida_hexrays.init_hexrays_plugin():
return idaapi.PLUGIN_SKIP
if ZH_CN:
# create Auto-WPeGPT action
autoWPeGPT_action = idaapi.action_desc_t(self.autoWPeGPT_action_name,
'二进制文件自动化分析 v0.1',
autoHandler(),
"",
'使用 gpt-3.5-turbo 对二进制文件进行自动化分析',
199)
idaapi.register_action(autoWPeGPT_action)
idaapi.attach_action_to_menu(self.autoWPeGPT_menu_path, self.autoWPeGPT_action_name, idaapi.SETMENU_APP)
# Function explaining action
explain_action = idaapi.action_desc_t(self.explain_action_name,
'函数分析',
ExplainHandler(),
"Ctrl+Alt+G",
'使用 gpt-3.5-turbo 分析当前函数',
199)
idaapi.register_action(explain_action)
idaapi.attach_action_to_menu(self.explain_menu_path, self.explain_action_name, idaapi.SETMENU_APP)
# Variable renaming action
rename_action = idaapi.action_desc_t(self.rename_action_name,
'重命名函数变量',
RenameHandler(),
"Ctrl+Alt+R",
"使用 gpt-3.5-turbo 重命名当前函数的变量",
199)
idaapi.register_action(rename_action)
idaapi.attach_action_to_menu(self.rename_menu_path, self.rename_action_name, idaapi.SETMENU_APP)
# python function action
python_action = idaapi.action_desc_t(self.python_action_name,
'Python还原此函数',
PythonHandler(),
"",
"使用 gpt-3.5-turbo 分析当前函数并用python3还原",
199)
idaapi.register_action(python_action)
idaapi.attach_action_to_menu(self.python_menu_path, self.python_action_name, idaapi.SETMENU_APP)
# find vulnerabilty action
vulnFinder_action = idaapi.action_desc_t(self.vulnFinder_action_name,
'二进制漏洞查找',
FindVulnHandler(),
"Ctrl+Alt+E",
'使用 gpt-3.5-turbo 在当前函数中查找漏洞',
199)
idaapi.register_action(vulnFinder_action)
idaapi.attach_action_to_menu(self.vulnFinder_menu_path, self.vulnFinder_action_name, idaapi.SETMENU_APP)
# create exploit action
expPython_action = idaapi.action_desc_t(self.expPython_action_name,
'尝试生成Exploit',
expCreateHandler(),
"",
'使用 gpt-3.5-turbo 尝试对漏洞函数生成EXP',
199)
idaapi.register_action(expPython_action)
idaapi.attach_action_to_menu(self.expPython_menu_path, self.expPython_action_name, idaapi.SETMENU_APP)
# Register context menu actions
self.menu = ContextMenuHooks()
self.menu.hook()
print("Auto-WPeGPT v0.1 is ready.")
print("WPeChatGPT v2.3 works fine! :)@WPeace\n")
else:
# create Auto-WPeGPT action
autoWPeGPT_action = idaapi.action_desc_t(self.autoWPeGPT_action_name,
'Automated analysis v0.1',
autoHandler(),
"",
'使用 gpt-3.5-turbo 对二进制文件进行自动化分析',
199)
idaapi.register_action(autoWPeGPT_action)
idaapi.attach_action_to_menu(self.autoWPeGPT_menu_path, self.autoWPeGPT_action_name, idaapi.SETMENU_APP)
# Function explaining action
explain_action = idaapi.action_desc_t(self.explain_action_name,
'Function analysis',
ExplainHandler(),
"Ctrl+Alt+G",
'使用 gpt-3.5-turbo 分析当前函数',
199)
idaapi.register_action(explain_action)
idaapi.attach_action_to_menu(self.explain_menu_path, self.explain_action_name, idaapi.SETMENU_APP)
# Variable renaming action
rename_action = idaapi.action_desc_t(self.rename_action_name,
'Rename function variables',
RenameHandler(),
"Ctrl+Alt+R",
"使用 gpt-3.5-turbo 重命名当前函数的变量",
199)
idaapi.register_action(rename_action)
idaapi.attach_action_to_menu(self.rename_menu_path, self.rename_action_name, idaapi.SETMENU_APP)
# python function action
python_action = idaapi.action_desc_t(self.python_action_name,
'Python restores this function',
PythonHandler(),
"",
"使用 gpt-3.5-turbo 分析当前函数并用python3还原",
199)
idaapi.register_action(python_action)
idaapi.attach_action_to_menu(self.python_menu_path, self.python_action_name, idaapi.SETMENU_APP)
# find vulnerabilty action
vulnFinder_action = idaapi.action_desc_t(self.vulnFinder_action_name,
'Vulnerability finding',
FindVulnHandler(),
"Ctrl+Alt+E",
'使用 gpt-3.5-turbo 在当前函数中查找漏洞',
199)
idaapi.register_action(vulnFinder_action)
idaapi.attach_action_to_menu(self.vulnFinder_menu_path, self.vulnFinder_action_name, idaapi.SETMENU_APP)
# create exploit action
expPython_action = idaapi.action_desc_t(self.expPython_action_name,
'Try to generate Exploit',
expCreateHandler(),
"",
'使用 gpt-3.5-turbo 尝试对漏洞函数生成EXP',
199)
idaapi.register_action(expPython_action)
idaapi.attach_action_to_menu(self.expPython_menu_path, self.expPython_action_name, idaapi.SETMENU_APP)
# Register context menu actions
self.menu = ContextMenuHooks()
self.menu.hook()
print("Auto-WPeGPT v0.1 is ready.")
print("WPeChatGPT v2.3 works fine! :)@WPeace\n")
return idaapi.PLUGIN_KEEP
def run(self, arg):
pass
def term(self):
idaapi.detach_action_from_menu(self.autoWPeGPT_menu_path, self.autoWPeGPT_action_name)
idaapi.detach_action_from_menu(self.explain_menu_path, self.explain_action_name)
idaapi.detach_action_from_menu(self.rename_menu_path, self.rename_action_name)
idaapi.detach_action_from_menu(self.python_menu_path, self.python_action_name)
idaapi.detach_action_from_menu(self.vulnFinder_menu_path, self.vulnFinder_action_name)
idaapi.detach_action_from_menu(self.expPython_menu_path, self.expPython_action_name)
if self.menu:
self.menu.unhook()
return
def PLUGIN_ENTRY():
if openai.api_key == "ENTER_OPEN_API_KEY_HERE":
openai.api_key = os.getenv("OPENAI_API_KEY")
if not openai.api_key:
print("未找到 API_KEY,请在脚本中填写 openai.api_key! :(@WPeace")
raise ValueError("No valid OpenAI API key found")
return myplugin_WPeChatGPT()
| [
"Combining the function call structure of the program and the strings it contains, guess its purpose and function.",
"Please tell me the purpose and general function of the program after careful analysis.",
"结合该程序的函数调用结构及其所包含的字符串,猜测其运行目的及功能。",
"请再仔细分析后告诉我该程序的运行目的及大概功能。"
] |
2024-01-10 | yingchengsun/IntelBase | Reddit~modelling~misc2.py | '''
Created on Aug 9, 2018
@author: yingc
'''
from gensim import corpora, models, similarities
from pprint import pprint
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import numpy as np
from treelib import Tree
from gensim.models.coherencemodel import CoherenceModel
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from numpy.random.mtrand import RandomState
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
import queue
import os
import networkx as nx
from sklearn.metrics import accuracy_score,recall_score,roc_auc_score
file_dir = 'E:\\Reddit\\data'
def file_reader(file_ID):
filename = file_ID + '.txt'
filepath_name = os.path.join(file_dir+'\\labeled data\\', filename)
ndtype = 'i, i, i, S100, S2000'
names = 'Idx, Pidx, label, Topic, Content'
ps = np.genfromtxt(filepath_name, dtype=ndtype, names=names, delimiter='\t', encoding='utf_8_sig')
edges = zip(ps['Idx'], ps['Pidx'])
label = ps['label']
order = ps['Idx']
content = ps['Content']
d=[]
for item in edges:
d.append(item)
g = nx.Graph(d)
#pos = nx.shell_layout(g)
#nx.draw(g)
nx.draw_networkx(g)
#plt.pause(0.5)
plt.clf()
#plt.show()
return edges, label, content
def Normalization(x):
return [(float(i)-min(x))/float(max(x)-min(x)) for i in x]
def get_degree_height(edges, label):
tree = Tree()
tree.create_node(edges[0][0], edges[0][0],data=label[0]) # root node
for i in range(len(edges[1:])):
tree.create_node(tag=edges[1:][i][0], identifier = edges[1:][i][0], parent=edges[1:][i][1], data = label[i+1])
tree.show()
#tree_height = max([len(item) for item in tree.paths_to_leaves()])-1
print edges
node_heights = []
node_degrees = []
for i in range(len(edges)):
node_height = max([len(item) for item in tree.subtree(i).paths_to_leaves()])
node_heights.append(node_height)
node_degree = len(tree.subtree(i).nodes)
node_degrees.append(node_degree)
'''
for edge in edges:
print tree.get_node(edge[0])
print tree.get_node(edge[0]).fpointer
print tree.get_node(edge[0]).bpointer
print tree.level(edge[0])
node_heights.append(tree_height- tree.level(edge[0]))
node_degrees.append(len(tree.get_node(edge[0]).fpointer))
'''
node_degrees = Normalization(np.array(node_degrees))
node_heights = Normalization(np.array(node_heights))
X = zip(node_degrees,node_heights)
#X = zip(node_degrees,order)
print 0.66*np.array(node_heights)+0.34*np.array(node_degrees)
return X
if __name__ == '__main__':
edges1, label1, documents1 = file_reader("867njq_new")
edges2, label2, documents2 = file_reader("8sk1ue")
tree_degree_height1 = get_degree_height(edges1, label1) | [] |
2024-01-10 | yingchengsun/IntelBase | Reddit~modelling~Evaluation~Evaluation-all.py | '''
Created on Aug 11, 2018
@author: yingc
'''
from gensim.models import ldamodel
from gensim.corpora import Dictionary
import pandas as pd
import re
from gensim.parsing.preprocessing import remove_stopwords, strip_punctuation
import numpy as np
from pprint import pprint
df_fake = pd.read_csv('fake - Copy.csv')
df_fake[['title', 'text', 'language']].head()
df_fake = df_fake.loc[(pd.notnull(df_fake.text)) & (df_fake.language == 'english')]
# remove stopwords and punctuations
def preprocess(row):
return strip_punctuation(remove_stopwords(row.lower()))
df_fake['text'] = df_fake['text'].apply(preprocess)
# Convert data to required input format by LDA
texts = []
for line in df_fake.text:
lowered = line.lower()
words = re.findall(r'\w+', lowered, flags = re.UNICODE | re.LOCALE)
texts.append(words)
dictionary = Dictionary(texts)
training_texts = texts[:50]
holdout_texts = texts[50:75]
test_texts = texts[75:100]
'''
training_corpus = [dictionary.doc2bow(text) for text in training_texts]
holdout_corpus = [dictionary.doc2bow(text) for text in holdout_texts]
test_corpus = [dictionary.doc2bow(text) for text in test_texts]
from gensim.models.callbacks import CoherenceMetric, DiffMetric, PerplexityMetric, ConvergenceMetric
# define perplexity callback for hold_out and test corpus
pl_holdout = PerplexityMetric(corpus=holdout_corpus, logger="visdom", title="Perplexity (hold_out)")
pl_test = PerplexityMetric(corpus=test_corpus, logger="visdom", title="Perplexity (test)")
# define other remaining metrics available
ch_umass = CoherenceMetric(corpus=training_corpus, coherence="u_mass", logger="visdom", title="Coherence (u_mass)")
ch_cv = CoherenceMetric(corpus=training_corpus, texts=training_texts, coherence="c_v", logger="visdom", title="Coherence (c_v)")
diff_kl = DiffMetric(distance="kullback_leibler", logger="visdom", title="Diff (kullback_leibler)")
convergence_kl = ConvergenceMetric(distance="jaccard", logger="visdom", title="Convergence (jaccard)")
callbacks = [pl_holdout, pl_test, ch_umass, ch_cv, diff_kl, convergence_kl]
# training LDA model
model = ldamodel.LdaModel(corpus=training_corpus, id2word=dictionary, num_topics=35, passes=50, chunksize=150, iterations=200, alpha='auto', callbacks=callbacks)
# to get a metric value on a trained model
print(CoherenceMetric(corpus=training_corpus, coherence="u_mass").get_value(model=model))
''' | [] |
2024-01-10 | yingchengsun/IntelBase | Reddit~modelling~models_gensim_simpleExample.py | # -*- coding:utf-8 -*-
'''
Created on Apr 16, 2018
@author: yingc
'''
from gensim import corpora, models, similarities
from pprint import pprint
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import numpy as np
from treelib import Tree
from gensim.models.coherencemodel import CoherenceModel
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from numpy.random.mtrand import RandomState
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
import os
import networkx as nx
from sklearn.metrics import accuracy_score,recall_score,roc_auc_score
from datetime import datetime
from collections import Counter
from gensim.test.test_sklearn_api import texts
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
file_dir = 'E:\\Reddit\\data'
simple0= ["lBJ Lakers",
"Warriors Championship"]
simple1= ["lBJ LBJ Lakers Lakers",
"Warriors Championship"]
simple2= ["lBJ LBJ Lakers Lakers",
"Warriors Warriors Championship Championship"]
simple3= ["lBJ LBJ Lakers Lakers LBJ LBJ Lakers Lakers",
"Warriors Warriors Championship Championship"]
simple4 = ["Texas serial bomber made video confession before blowing himself up",
"What are the chances we ever see the video?",
"About the same as the chances of the Browns winning the Super Bowl.",
"every morning.",
"I have to applaud your regularity",
"Pshh I'm taking the browns to the super bowl as we speak",
"Consistency is the key.",
"Seriously. Well done.",
"Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile.",
"here I am thinking 'just transcripts? How bad can it be' Bad, guys. Very bad."
]
simple6= [ "Warriors got the Championship",
"Yeah, they deserve it!",
"lBJ went to Lakers",
"shit, that's so bad. I cannot believe it.",
"Oh my gosh, I will not watch Cavs's game"]
edges6 = {0:0,
1:0,
2:2,
3:2,
4:2
}
edges66 = [(0,0),(1,0),(2,0),(3,2),(4,2)]
lable66= [1,0,1,0,0]
lable666= [2,1,0,2,0]
#edges6_pairswitch = dict((value,key) for key,value in edges6.iteritems())
edges= edges66
label = lable666
#documents = simple6
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
#print 'got', len(documents), 'documents' # got 9 documents
#pprint(documents)
def prefix_time():
prefix_time = datetime.now().strftime('%Y%m%d%H%M%S')
return prefix_time
def file_reader(file_ID):
filename = file_ID + '.txt'
filepath_name = os.path.join(file_dir+'\\labeled data\\', filename)
ndtype = 'i, i, i, i, S2000'
names = 'Idx, Pidx, label, Topic, Content'
ps = np.genfromtxt(filepath_name, dtype=ndtype, names=names, delimiter='\t', encoding='utf_8_sig')
edges = zip(ps['Idx'], ps['Pidx'])
label = ps['label']
order = ps['Idx']
content = ps['Content']
topic = ps['Topic']
d=[]
for item in edges:
d.append(item)
g = nx.Graph(d)
#pos = nx.shell_layout(g)
#nx.draw(g)
nx.draw_networkx(g)
#plt.pause(0.5)
plt.clf()
#plt.show()
return edges, label, content, topic
def Normalization(x):
return [(float(i)-min(x))/float(max(x)-min(x)) for i in x]
#return [(float(i))/float(max(x)) for i in x]
#return [0.1+ (float(i)-min(x))/float(max(x)-min(x))*(0.9-0.1) for i in x]
def get_degree_height(edges, label):
tree = Tree()
tree.create_node(edges[0][0], edges[0][0], data = label[0]) # root node
parents = []
parents.append(0)
for i in range(len(edges[1:])):
tree.create_node(tag=edges[1:][i][0], identifier = edges[1:][i][0], parent=edges[1:][i][1], data = label[i+1])
if tree.parent(i):
parents.append(tree.parent(i).identifier)
parents.append(tree.parent(len(edges[1:])).identifier)
tree.show()
#tree_height = max([len(item) for item in tree.paths_to_leaves()])-1
node_heights = []
node_degrees = []
node_popularity = []
#difference
H = tree.depth()+1
#base
a = 0.8
#Gravity factor
G = 1
for i in range(len(edges)):
node_height = max([len(item) for item in tree.subtree(i).paths_to_leaves()])
node_heights.append(node_height)
node_degree = len(tree.subtree(i).nodes)
node_degrees.append(node_degree)
subtrees = tree.subtree(i).paths_to_leaves()
h = node_height
node_set = set()
weight = 0
for subt in subtrees:
for index, node in enumerate(subt):
#w = (h-index)/float(h)
#Arithmetic Progression
#w = 1 - float(index)/H
#Geometric Progression
#w = 1 * float(a)**index
#Harmonic Progression
w = float(1)/(index+1)**G
if node not in node_set:
weight += w
node_set.add(node)
node_popularity.append(weight)
print node_popularity
'''
for edge in edges:
node_heights.append(tree_height- tree.level(edge[0]))
node_degrees.append(len(tree.get_node(edge[0]).fpointer))
'''
#print node_degrees
#print node_heights
#print node_popularity
node_degrees = Normalization(np.array(node_degrees))
node_heights = Normalization(np.array(node_heights))
#X = zip(node_degrees,node_heights)
X = [[i] for i in node_degrees]
#X = node_degrees
#X = zip(node_degrees,order)
#extension = 0.66*np.array(node_heights)+0.34*np.array(node_degrees)
#print node_degrees
#print node_heights
#extension = 0.5*np.array(node_heights)+0.5*np.array(node_degrees)
return X, parents
def train_with_degree_height(X,y):
#clf = LinearSVC(random_state= 0)
X = X
y = y
'''
x1 = np.random.randn(100)
x2 = 4*np.random.randn(100)
x3 = 0.5*np.random.randn(100)
y = (3 + x1 + x2 + x3 + 0.2*np.random.randn()) > 0
X = np.column_stack([x1, x2, x3])
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
#print X_train
#print OneVsRestClassifier(clf).fit(X_train, y_train).predict(X_test)
model = LogisticRegression()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
print "LogisticRegression:"
prob_y_2 = model.predict_proba(X_test)
# Keep only the positive class
prob_y_2 = [p[1] for p in prob_y_2]
print "roc_auc_score:", ( roc_auc_score(y_test, prob_y_2) )
print "accuracy:" + str(accuracy_score(y_test, y_predict))
# The estimated coefficients will all be around 1:
print(model.coef_)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
print "DecisionTreeClassifier:"
prob_y_2 = model.predict_proba(X_test)
# Keep only the positive class
prob_y_2 = [p[1] for p in prob_y_2]
print "roc_auc_score:", ( roc_auc_score(y_test, prob_y_2) )
print "accuracy:" + str(accuracy_score(y_test, y_predict))
print(model.feature_importances_)
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
print "RandomForestClassifier:"
prob_y_2 = model.predict_proba(X_test)
# Keep only the positive class
prob_y_2 = [p[1] for p in prob_y_2]
print "roc_auc_score:", ( roc_auc_score(y_test, prob_y_2) )
print "accuracy:" + str(accuracy_score(y_test, y_predict))
print(model.feature_importances_)
model = ExtraTreesClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
print "ExtraTreesClassifier:"
prob_y_2 = model.predict_proba(X_test)
# Keep only the positive class
prob_y_2 = [p[1] for p in prob_y_2]
print "roc_auc_score:", ( roc_auc_score(y_test, prob_y_2) )
print "accuracy:" + str(accuracy_score(y_test, y_predict))
print(model.feature_importances_)
model = LinearSVC(random_state= 0, class_weight="balanced")
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
print "LinearSVC:"
print "accuracy:" + str(accuracy_score(y_test, y_predict))
print(model.coef_)
'''
clf.fit(X_train, y_train)
print clf.predict(X_test)
print y_test
'''
'''
def cross_training(X1,y1,X2,y2):
clf = LinearSVC(random_state= 0)
X = np.array(X1)
y = np.array(y1)
#X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.5)
#print X_train
#print OneVsRestClassifier(clf).fit(X_train, y_train).predict(X_test)
y_predict = clf.fit(X1, y1).predict(X2)
print clf.coef_
print y2
print y_predict
print "accuracy:" + str(accuracy_score(y2, y_predict))
'''
def cross_training(X1,y1,X2,y2):
X = X1
y = y1
#X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.5)
#print X_train
#print OneVsRestClassifier(clf).fit(X_train, y_train).predict(X_test)
print "------------------------------------------------"
model = LogisticRegression()
model.fit(X1, y1)
y_predict = model.fit(X1, y1).predict(X2)
print "LogisticRegression:"
print "accuracy:" + str(accuracy_score(y2, y_predict))
# The estimated coefficients will all be around 1:
print(model.coef_)
model = DecisionTreeClassifier()
model.fit(X1, y1)
# display the relative importance of each attribute
y_predict = model.fit(X1, y1).predict(X2)
print "DecisionTreeClassifier:"
print "accuracy:" + str(accuracy_score(y2, y_predict))
print(model.feature_importances_)
model = RandomForestClassifier()
model.fit(X1, y1)
# display the relative importance of each attribute
y_predict = model.fit(X1, y1).predict(X2)
print "RandomForestClassifier:"
print "accuracy:" + str(accuracy_score(y2, y_predict))
print(model.feature_importances_)
model = ExtraTreesClassifier()
model.fit(X1, y1)
# display the relative importance of each attribute
y_predict = model.fit(X1, y1).predict(X2)
print "ExtraTreesClassifier:"
print "accuracy:" + str(accuracy_score(y2, y_predict))
print(model.feature_importances_)
model = LinearSVC(random_state= 0)
model.fit(X1, y1)
y_predict = model.fit(X1, y1).predict(X2)
print "LinearSVC:"
print "accuracy:" + str(accuracy_score(y2, y_predict))
print(model.coef_)
class MyTexts(object):
"""Construct generator to avoid loading all docs
"""
def __init__(self, documents):
#stop word list
#self.stoplist = set('for a of the and to in'.split())
self.documents = documents
print 'got', len(documents), 'documents'
def __iter__(self):
for doc in self.documents:
#remove stop words from docs
stop_free = [i for i in doc.lower().split() if i not in stop]
punc_free = [ch for ch in stop_free if ch not in exclude]
normalized = [lemma.lemmatize(word) for word in punc_free]
#normalized2 = [w for w in normalized if w != "would" and w != "people"]
#yield [word for word in doc.lower().split() if word not in stop]
yield normalized
def get_dictionary(texts, min_count=1):
"""Construct dictionary
"""
dictionary = corpora.Dictionary(texts)
lowfreq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems()
if docfreq < min_count]
# remove stop words and low frequence words
dictionary.filter_tokens(lowfreq_ids)
# remove gaps in id sequence after words that were removed
dictionary.compactify()
#dictionary.save('docs.dict')
return dictionary
def corpus2bow(texts,dictionary):
"""represent docs into a list with bag of words model
bow: bag of words
"""
corpus=[dictionary.doc2bow(text) for text in texts]
return corpus
def bow2tfidf(corpus):
"""represent docs with TF*IDF model
"""
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus] # wrap the old corpus to tfidf
return corpus_tfidf
def topic_models(corpus,dictionary,num_topics=2, edges=None, labled_topics = None):
"""modelling the corpus with LDA, LSI and HDP
"""
SOME_FIXED_SEED = 42
#random_state = np.random.seed(SOME_FIXED_SEED)
LDA_model = models.LdaModel(corpus = corpus, id2word = dictionary, num_topics=num_topics, edges=None)
#LDA_model.save("3dtyke_raw")
#LDA_model = models.LdaModel.load('3dtyke_raw')
topics = LDA_model.show_topics(num_words=10, log=False, formatted=True)
for t in topics:
print t
#LDA_model.save("3dtyke_raw")
'''
i=0
for c in corpus:
doc_t = LDA_model.get_document_topics(c)
print i, doc_t
i+=1
'''
'''
topics_label= [0,1,2]
matched=[]
for i, c in enumerate(corpus):
doc = LDA_model.get_document_topics(c)
topic_assign = max([d for d in doc], key=lambda item: (item[1]))
topic_converted = topics_label[topic_assign[0]]
if topic_converted == labled_topics[i]:
matched.append(1)
else:
matched.append(0)
print matched
accuracy = float(sum(matched))/len(matched)
print 'accuracy:',accuracy
'''
'''
parent = LDA_model.get_document_topics(corpus[0])
inherit = 0.3
LDA_model.
index = 0
for c in corpus:
doc_t = LDA_model.get_document_topics(c)
parent = tree.parent(index).identifier
result = [(p[0], inherit * p[1]) for p in parent]
for i in range(len(doc_t)):
result[i] = ((doc_t[i][0], (1-inherit) * doc_t[i][1] + inherit * parent[i][1]))
print index, result
index+=1
'''
'''
#Plot nodes to see clustering status
nodes = list(LDA_model[corpus] )
ax0 = [x[0][1] for x in nodes]
ax1 = [x[1][1] for x in nodes]
plt.plot(ax0,ax1,'o')
plt.show()
'''
return LDA_model
def word_distribution(texts):
dict = {}
for t in texts:
for word in t:
if dict.has_key(word):
dict[word] += 1
else:
dict[word] = 1
print dict
'''
keys=[]
values=[]
for key,value in sorted(dict.iteritems(),key=lambda (k,v): (v,k), reverse = True ):
#print "%s: %s" % (key, value)
keys.append(key)
values.append(value)
x = range(len(keys))
y = values
plt.bar(x, y, color='b')
plt.xticks(x, keys, rotation = 45)
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_word distribution'+'.png')
plt.title('Word Distribution')
plt.show()
c = Counter(values)
total = np.sum(np.array(c.values()))
print total
x= np.array(c.values())/float(total)
labels1= c.keys()
#s_length = len(x)
#explode1=[0]*s_length
plt.pie(x,autopct='%.0f%%',shadow=True, labels=labels1, textprops = {'fontsize':14, 'color':'k'})
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_word frequency'+'.png')
plt.title('Word Frequency')
plt.show()
'''
def node_distribution(nodes):
nodes = sorted(nodes, reverse =True)
x = range(len(nodes))
y = nodes
plt.bar(x, y, color='r')
plt.xticks(x, y, rotation = 45)
#plt.ylim([0,110])
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_node distribution'+'.png')
plt.title('Node Distribution')
plt.show()
dict = Counter(nodes)
total = np.sum(np.array(dict.values()))
print total
x= np.array(dict.values())/float(total)
labels1= dict.keys()
#s_length = len(x)
#explode1=[0]*s_length
plt.pie(x,autopct='%.0f%%',shadow=True, labels=labels1, textprops = {'fontsize':14, 'color':'k'})
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_node frequency'+'.png')
plt.title('Node Frequency')
plt.show()
def corpus_distribution(corpus):
dict = {}
for doc in corpus:
for word in doc:
if dict.has_key(word[0]):
dict[word[0]] += word[1]
else:
dict[word[0]] = word[1]
keys=[]
values=[]
for key,value in sorted(dict.iteritems(),key=lambda (k,v): (v,k), reverse = True ):
#print "%s: %s" % (key, value)
keys.append(key)
values.append(value)
x = range(len(keys))
y = values
plt.bar(x, y, color='b')
plt.xticks(x, keys, rotation = 45)
#plt.ylim([0,500])
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_corpus distribution'+'.png')
plt.title('Corpus Distribution')
plt.show()
c = Counter(values)
total = np.sum(np.array(c.values()))
x= np.array(c.values())/float(total)
labels1= c.keys()
#s_length = len(x)
#explode1=[0]*s_length
plt.pie(x,autopct='%.0f%%',shadow=True, labels=labels1, textprops = {'fontsize':14, 'color':'k'})
plt.savefig(file_dir+'\\graphs\\'+prefix_time()+file_name+'_corpus frequency'+'.png')
plt.title('Corpus Frequency')
plt.show()
if __name__ == '__main__':
file_name = "8sk1ue"
edges1, label1, documents1, labled_topics1 = file_reader(file_name)
#edges2, label2, documents2 = file_reader("8sk1ue") 867njq 3dtyke
simple2= ["lBJ Lakers ",
"Warriors Championship",
"money in the wallet to buy ticket",
"Oh my gosh",
"I will not watch Cavs's game",
"Texas serial bomber made video confession before blowing himself up"
]
#f=open("stemed.txt","w")
texts = MyTexts(documents1)
'''
for t in texts:
print >> f, " ".join(t)
'''
#word_distribution(texts)
#tree_degree_height1, parents = get_degree_height(edges1, label1)
#tree_degree_height2 = get_degree_height(edges2, label2)
#train_with_degree_height(tree_degree_height1,label1)
#cross_training(tree_degree_height1, label1, tree_degree_height2, label2)
dictionary = get_dictionary(texts, min_count=1)
# save and load dictionary
'''
dictionary.save('docs.dict')
dictionary = corpora.Dictionary.load('docs.dict')
print dictionary
'''
corpus = corpus2bow(texts,dictionary)
corpus_tfidf = bow2tfidf(corpus)
#doc="Human computer interaction"
#print doc_similarity(doc, corpus)
num_topics = 3
magnification = 1
#base too large is not good, it should be close to 0
base = 0.000
#867njq_new
#node_degrees = [1.0, 0.99, 0.49, 0.23, 0.04, 0.0, 0.0, 0.0, 0.0, 0.1, 0.05, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08, 0.0, 0.0, 0.03, 0.02, 0.0, 0.0, 0.01, 0.0, 0.01, 0.0, 0.03, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.48, 0.37, 0.33, 0.06, 0.01, 0.0, 0.03, 0.02, 0.01, 0.0, 0.0, 0.03, 0.01, 0.0, 0.0, 0.0, 0.03, 0.02, 0.01, 0.0, 0.06, 0.05, 0.04, 0.02, 0.01, 0.0, 0.0, 0.03, 0.01, 0.0, 0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09, 0.08, 0.05, 0.04, 0.01, 0.0, 0.0, 0.0, 0.01, 0.0]
#node_heights = [1.0, 0.9, 0.6, 0.5, 0.1, 0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.1, 0.0, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8, 0.7, 0.6, 0.4, 0.1, 0.0, 0.3, 0.2, 0.1, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.0, 0.3, 0.2, 0.1, 0.0, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.1, 0.0]
#raw
#node_degrees = [101, 100, 50, 24, 5, 1, 1, 1, 1, 11, 6, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 1, 1, 4, 3, 1, 1, 2, 1, 2, 1, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 49, 38, 34, 7, 2, 1, 4, 3, 2, 1, 1, 4, 2, 1, 1, 1, 4, 3, 2, 1, 7, 6, 5, 3, 2, 1, 1, 4, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 10, 9, 6, 5, 2, 1, 1, 1, 2, 1]
#node_degrees = [101, 100, 50, 24, 5, 1, 1, 1, 1, 11, 6, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 1, 1, 4, 3, 1, 1, 2, 1, 2, 1, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 49, 38, 34, 7, 2, 1, 4, 3, 2, 1, 1, 4, 2, 1, 1, 1, 4, 3, 2, 1, 7, 6, 5, 3, 2, 1, 1, 4, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 10, 9, 6, 5, 2, 1, 1, 1, 2, 1]
#node_heights = [11, 10, 7, 6, 2, 1, 1, 1, 1, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 3, 2, 1, 1, 2, 1, 2, 1, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 5, 2, 1, 4, 3, 2, 1, 1, 3, 2, 1, 1, 1, 4, 3, 2, 1, 6, 5, 4, 3, 2, 1, 1, 3, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 6, 5, 4, 3, 2, 1, 1, 1, 2, 1]
#H=11
#single feature
#node_degrees = [0.9, 0.892, 0.492, 0.28400000000000003, 0.132, 0.1, 0.1, 0.1, 0.1, 0.18000000000000002, 0.14, 0.124, 0.116, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.164, 0.1, 0.1, 0.124, 0.116, 0.1, 0.1, 0.10800000000000001, 0.1, 0.10800000000000001, 0.1, 0.124, 0.116, 0.10800000000000001, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.484, 0.396, 0.364, 0.14800000000000002, 0.10800000000000001, 0.1, 0.124, 0.116, 0.10800000000000001, 0.1, 0.1, 0.124, 0.10800000000000001, 0.1, 0.1, 0.1, 0.124, 0.116, 0.10800000000000001, 0.1, 0.14800000000000002, 0.14, 0.132, 0.116, 0.10800000000000001, 0.1, 0.1, 0.124, 0.10800000000000001, 0.1, 0.1, 0.10800000000000001, 0.1, 0.10800000000000001, 0.1, 0.1, 0.1, 0.1, 0.1, 0.172, 0.164, 0.14, 0.132, 0.10800000000000001, 0.1, 0.1, 0.1, 0.10800000000000001, 0.1]
#node_degrees = [1.0, 0.99, 0.49, 0.23, 0.04, 0.0, 0.0, 0.0, 0.0, 0.1, 0.05, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08, 0.0, 0.0, 0.03, 0.02, 0.0, 0.0, 0.01, 0.0, 0.01, 0.0, 0.03, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.48, 0.37, 0.33, 0.06, 0.01, 0.0, 0.03, 0.02, 0.01, 0.0, 0.0, 0.03, 0.01, 0.0, 0.0, 0.0, 0.03, 0.02, 0.01, 0.0, 0.06, 0.05, 0.04, 0.02, 0.01, 0.0, 0.0, 0.03, 0.01, 0.0, 0.0, 0.01, 0.0, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09, 0.08, 0.05, 0.04, 0.01, 0.0, 0.0, 0.0, 0.01, 0.0]
#node_heights = [1.0, 0.9, 0.6, 0.5, 0.1, 0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.1, 0.0, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8, 0.7, 0.6, 0.4, 0.1, 0.0, 0.3, 0.2, 0.1, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.0, 0.3, 0.2, 0.1, 0.0, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.0, 0.1, 0.0]
#scaled_weigths
#node_degrees = [30.452645802354127, 31.622776601683793, 18.89822365046136, 9.797958971132713, 3.5355339059327373, 1.0, 1.0, 1.0, 1.0, 4.919349550499537, 3.0, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.5, 1.0, 1.0, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 16.333333333333332, 13.435028842544401, 12.850792082313726, 3.1304951684997055, 1.414213562373095, 1.0, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 2.3094010767585034, 1.414213562373095, 1.0, 1.0, 1.0, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 2.8577380332470415, 2.6832815729997477, 2.5, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 2.3094010767585034, 1.414213562373095, 1.0, 1.0, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 1.0, 1.0, 1.0, 1.0, 4.08248290463863, 4.024922359499621, 3.0, 2.886751345948129, 1.414213562373095, 1.0, 1.0, 1.0, 1.414213562373095, 1.0]
#node_degrees = [10.04987562112089, 10.0, 7.0710678118654755, 4.898979485566356, 2.23606797749979, 1.0, 1.0, 1.0, 1.0, 3.3166247903554, 2.449489742783178, 2.0, 1.7320508075688772, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.7320508075688772, 1.0, 1.0, 1.4142135623730951, 1.0, 1.4142135623730951, 1.0, 2.0, 1.7320508075688772, 1.4142135623730951, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 7.0, 6.164414002968976, 5.830951894845301, 2.6457513110645907, 1.4142135623730951, 1.0, 2.0, 1.7320508075688772, 1.4142135623730951, 1.0, 1.0, 2.0, 1.4142135623730951, 1.0, 1.0, 1.0, 2.0, 1.7320508075688772, 1.4142135623730951, 1.0, 2.6457513110645907, 2.449489742783178, 2.23606797749979, 1.7320508075688772, 1.4142135623730951, 1.0, 1.0, 2.0, 1.4142135623730951, 1.0, 1.0, 1.4142135623730951, 1.0, 1.4142135623730951, 1.0, 1.0, 1.0, 1.0, 1.0, 3.1622776601683795, 3.0, 2.449489742783178, 2.23606797749979, 1.4142135623730951, 1.0, 1.0, 1.0, 1.4142135623730951, 1.0]
#node_degrees = [1.0, 0.9950371902099892, 0.7035975447302919, 0.48746667822143247, 0.22249707974499242, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.3300165012376031, 0.24373333911071624, 0.19900743804199783, 0.1723454968864278, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.29851115706299675, 0.09950371902099892, 0.09950371902099892, 0.19900743804199783, 0.1723454968864278, 0.09950371902099892, 0.09950371902099892, 0.1407195089460584, 0.09950371902099892, 0.1407195089460584, 0.09950371902099892, 0.19900743804199783, 0.1723454968864278, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.6965260331469925, 0.6133821188805362, 0.5802013989696481, 0.26326209505561055, 0.1407195089460584, 0.09950371902099892, 0.19900743804199783, 0.1723454968864278, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.19900743804199783, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.19900743804199783, 0.1723454968864278, 0.1407195089460584, 0.09950371902099892, 0.26326209505561055, 0.24373333911071624, 0.22249707974499242, 0.1723454968864278, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.19900743804199783, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.1407195089460584, 0.09950371902099892, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.31465838776377636, 0.29851115706299675, 0.24373333911071624, 0.22249707974499242, 0.1407195089460584, 0.09950371902099892, 0.09950371902099892, 0.09950371902099892, 0.1407195089460584, 0.09950371902099892]
#popularity
#node_degrees = [54.72727272727274, 59.1, 33.285714285714285, 16.166666666666668, 3.0, 1.0, 1.0, 1.0, 1.0, 7.0, 3.5, 2.333333333333333, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.5, 1.0, 1.0, 2.333333333333333, 2.0, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 2.5, 1.9999999999999998, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 27.555555555555564, 23.375, 22.571428571428573, 4.4, 1.5, 1.0, 2.5, 1.9999999999999998, 1.5, 1.0, 1.0, 2.6666666666666665, 1.5, 1.0, 1.0, 1.0, 2.5, 1.9999999999999998, 1.5, 1.0, 4.0, 3.6, 3.25, 1.9999999999999998, 1.5, 1.0, 1.0, 2.6666666666666665, 1.5, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 5.333333333333334, 5.199999999999999, 3.5, 3.333333333333333, 1.5, 1.0, 1.0, 1.0, 1.5, 1.0]
#Arithmetic Progression
#node_degrees = [54.72727272727274, 62.81818181818179, 39.36363636363633, 19.72727272727273, 4.636363636363637, 1.0, 1.0, 1.0, 1.0, 9.181818181818182, 5.090909090909092, 3.545454545454546, 2.8181818181818183, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 7.727272727272728, 1.0, 1.0, 3.545454545454546, 2.8181818181818183, 1.0, 1.0, 1.9090909090909092, 1.0, 1.9090909090909092, 1.0, 3.454545454545455, 2.7272727272727275, 1.9090909090909092, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 31.45454545454545, 27.363636363636356, 26.727272727272723, 5.818181818181819, 1.9090909090909092, 1.0, 3.454545454545455, 2.7272727272727275, 1.9090909090909092, 1.0, 1.0, 3.6363636363636367, 1.9090909090909092, 1.0, 1.0, 1.0, 3.454545454545455, 2.7272727272727275, 1.9090909090909092, 1.0, 5.363636363636364, 4.90909090909091, 4.363636363636364, 2.7272727272727275, 1.9090909090909092, 1.0, 1.0, 3.6363636363636367, 1.9090909090909092, 1.0, 1.0, 1.9090909090909092, 1.0, 1.9090909090909092, 1.0, 1.0, 1.0, 1.0, 1.0, 7.454545454545456, 7.272727272727274, 5.090909090909092, 4.545454545454546, 1.9090909090909092, 1.0, 1.0, 1.0, 1.9090909090909092, 1.0]
#Geometric Progression
#node_degrees = [35.43213475839999, 43.040168448, 30.84396800000002, 16.048960000000008, 4.2, 1.0, 1.0, 1.0, 1.0, 7.6112, 4.264, 3.0800000000000005, 2.6, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 6.504, 1.0, 1.0, 3.0800000000000005, 2.6, 1.0, 1.0, 1.8, 1.0, 1.8, 1.0, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 21.706242560000007, 20.222323200000012, 21.02790400000001, 4.8016000000000005, 1.8, 1.0, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 1.0, 3.24, 1.8, 1.0, 1.0, 1.0, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 4.201280000000001, 4.001600000000001, 3.7520000000000007, 2.4400000000000004, 1.8, 1.0, 1.0, 3.24, 1.8, 1.0, 1.0, 1.8, 1.0, 1.8, 1.0, 1.0, 1.0, 1.0, 1.0, 5.6604800000000015, 5.825600000000001, 4.232000000000001, 4.04, 1.8, 1.0, 1.0, 1.0, 1.8, 1.0]
#Harmonic Progression
#node_degrees = [18.913924963924956, 22.592460317460322, 17.452380952380953, 9.533333333333333, 3.0, 1.0, 1.0, 1.0, 1.0, 4.816666666666666, 2.833333333333333, 2.1666666666666665, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.166666666666666, 1.0, 1.0, 2.1666666666666665, 2.0, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 11.642063492063492, 11.110714285714286, 12.109523809523811, 3.1166666666666667, 1.5, 1.0, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 1.0, 2.333333333333333, 1.5, 1.0, 1.0, 1.0, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 2.6999999999999997, 2.6166666666666667, 2.583333333333333, 1.8333333333333333, 1.5, 1.0, 1.0, 2.333333333333333, 1.5, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 3.4333333333333336, 3.6166666666666667, 2.75, 2.833333333333333, 1.5, 1.0, 1.0, 1.0, 1.5, 1.0]
#8sk1ue
#node_degrees = [1, 0.58, 0.53, 0.11, 0.0, 0.04, 0.03, 0.02, 0.01, 0.0, 0.03, 0.0, 0.01, 0.0, 0.0, 0.36, 0.11, 0.07, 0.05, 0.04, 0.03, 0.02, 0.01, 0.0, 0.0, 0.02, 0.0, 0.0, 0.22, 0.12, 0.11, 0.07, 0.01, 0.0, 0.02, 0.01, 0.0, 0.01, 0.0, 0.0, 0.01, 0.0, 0.08, 0.05, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.39, 0.0, 0.36, 0.16, 0.15, 0.14, 0.13, 0.12, 0.09, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01, 0.0, 0.01, 0.0, 0.01, 0.0, 0.02, 0.0, 0.13, 0.0, 0.04, 0.03, 0.02, 0.01, 0.0, 0.03, 0.02, 0.0, 0.0, 0.02, 0.01, 0.0, 0.0, 0.0]
#node_heights = [1, 0.5882352941176471, 0.5294117647058824, 0.29411764705882354, 0.0, 0.23529411764705882, 0.17647058823529413, 0.11764705882352941, 0.058823529411764705, 0.0, 0.11764705882352941, 0.0, 0.058823529411764705, 0.0, 0.0, 0.47058823529411764, 0.4117647058823529, 0.35294117647058826, 0.29411764705882354, 0.23529411764705882, 0.17647058823529413, 0.11764705882352941, 0.058823529411764705, 0.0, 0.0, 0.058823529411764705, 0.0, 0.0, 0.35294117647058826, 0.29411764705882354, 0.23529411764705882, 0.17647058823529413, 0.058823529411764705, 0.0, 0.11764705882352941, 0.058823529411764705, 0.0, 0.058823529411764705, 0.0, 0.0, 0.058823529411764705, 0.0, 0.17647058823529413, 0.11764705882352941, 0.0, 0.058823529411764705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.058823529411764705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9411764705882353, 0.0, 0.8823529411764706, 0.8235294117647058, 0.7647058823529411, 0.7058823529411765, 0.6470588235294118, 0.5882352941176471, 0.5294117647058824, 0.47058823529411764, 0.4117647058823529, 0.35294117647058826, 0.29411764705882354, 0.23529411764705882, 0.17647058823529413, 0.11764705882352941, 0.058823529411764705, 0.0, 0.058823529411764705, 0.0, 0.058823529411764705, 0.0, 0.058823529411764705, 0.0, 0.29411764705882354, 0.0, 0.23529411764705882, 0.17647058823529413, 0.11764705882352941, 0.058823529411764705, 0.0, 0.11764705882352941, 0.058823529411764705, 0.0, 0.0, 0.11764705882352941, 0.058823529411764705, 0.0, 0.0, 0.0]
#popularity
#node_degrees = [67.33333333333334, 33.454545454545446, 31.699999999999996, 7.833333333333334, 1.0, 3.0, 2.5, 1.9999999999999998, 1.5, 1.0, 2.6666666666666665, 1.0, 1.5, 1.0, 1.0, 21.000000000000007, 7.625, 4.857142857142857, 3.5, 3.0, 2.5, 1.9999999999999998, 1.5, 1.0, 1.0, 2.0, 1.0, 1.0, 12.57142857142857, 7.000000000000001, 7.199999999999999, 5.0, 1.5, 1.0, 1.9999999999999998, 1.5, 1.0, 1.5, 1.0, 1.0, 1.5, 1.0, 5.25, 3.6666666666666665, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 26.764705882352953, 1.0, 25.375, 9.266666666666666, 8.857142857142856, 8.461538461538462, 8.083333333333332, 7.727272727272727, 5.5, 4.999999999999999, 4.5, 4.0, 3.5, 3.0, 2.5, 1.9999999999999998, 1.5, 1.0, 1.5, 1.0, 1.5, 1.0, 2.0, 1.0, 8.833333333333334, 1.0, 3.0, 2.5, 1.9999999999999998, 1.5, 1.0, 2.333333333333333, 2.0, 1.0, 1.0, 1.9999999999999998, 1.5, 1.0, 1.0, 1.0]
#0.1-0.9
#node_degrees = [0.9, 0.564, 0.524, 0.188, 0.1, 0.132, 0.124, 0.116, 0.10800000000000001, 0.1, 0.124, 0.1, 0.10800000000000001, 0.1, 0.1, 0.388, 0.188, 0.15600000000000003, 0.14, 0.132, 0.124, 0.116, 0.10800000000000001, 0.1, 0.1, 0.116, 0.1, 0.1, 0.276, 0.196, 0.188, 0.15600000000000003, 0.10800000000000001, 0.1, 0.116, 0.10800000000000001, 0.1, 0.10800000000000001, 0.1, 0.1, 0.10800000000000001, 0.1, 0.164, 0.14, 0.1, 0.116, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.10800000000000001, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.41200000000000003, 0.1, 0.388, 0.228, 0.22, 0.21200000000000002, 0.20400000000000001, 0.196, 0.172, 0.164, 0.15600000000000003, 0.14800000000000002, 0.14, 0.132, 0.124, 0.116, 0.10800000000000001, 0.1, 0.10800000000000001, 0.1, 0.10800000000000001, 0.1, 0.116, 0.1, 0.20400000000000001, 0.1, 0.132, 0.124, 0.116, 0.10800000000000001, 0.1, 0.124, 0.116, 0.1, 0.1, 0.116, 0.10800000000000001, 0.1, 0.1, 0.1]
#node_heights = [0.9, 0.5705882352941177, 0.5235294117647059, 0.33529411764705885, 0.1, 0.28823529411764703, 0.24117647058823533, 0.19411764705882353, 0.14705882352941177, 0.1, 0.19411764705882353, 0.1, 0.14705882352941177, 0.1, 0.1, 0.4764705882352941, 0.4294117647058824, 0.3823529411764707, 0.33529411764705885, 0.28823529411764703, 0.24117647058823533, 0.19411764705882353, 0.14705882352941177, 0.1, 0.1, 0.14705882352941177, 0.1, 0.1, 0.3823529411764707, 0.33529411764705885, 0.28823529411764703, 0.24117647058823533, 0.14705882352941177, 0.1, 0.19411764705882353, 0.14705882352941177, 0.1, 0.14705882352941177, 0.1, 0.1, 0.14705882352941177, 0.1, 0.24117647058823533, 0.19411764705882353, 0.1, 0.14705882352941177, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.14705882352941177, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.8529411764705882, 0.1, 0.8058823529411765, 0.7588235294117647, 0.711764705882353, 0.6647058823529413, 0.6176470588235294, 0.5705882352941177, 0.5235294117647059, 0.4764705882352941, 0.4294117647058824, 0.3823529411764707, 0.33529411764705885, 0.28823529411764703, 0.24117647058823533, 0.19411764705882353, 0.14705882352941177, 0.1, 0.14705882352941177, 0.1, 0.14705882352941177, 0.1, 0.14705882352941177, 0.1, 0.33529411764705885, 0.1, 0.28823529411764703, 0.24117647058823533, 0.19411764705882353, 0.14705882352941177, 0.1, 0.19411764705882353, 0.14705882352941177, 0.1, 0.1, 0.19411764705882353, 0.14705882352941177, 0.1, 0.1, 0.1]
#raw data
#node_degrees =[101, 59, 54, 12, 1, 5, 4, 3, 2, 1, 4, 1, 2, 1, 1, 37, 12, 8, 6, 5, 4, 3, 2, 1, 1, 3, 1, 1, 23, 13, 12, 8, 2, 1, 3, 2, 1, 2, 1, 1, 2, 1, 9, 6, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 40, 1, 37, 17, 16, 15, 14, 13, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 1, 3, 1, 14, 1, 5, 4, 3, 2, 1, 4, 3, 1, 1, 3, 2, 1, 1, 1]
#node_heights =[18, 11, 10, 6, 1, 5, 4, 3, 2, 1, 3, 1, 2, 1, 1, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 1, 1, 7, 6, 5, 4, 2, 1, 3, 2, 1, 2, 1, 1, 2, 1, 4, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 17, 1, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 1, 2, 1, 6, 1, 5, 4, 3, 2, 1, 3, 2, 1, 1, 3, 2, 1, 1, 1]
#H=18
#node_degrees = [1.0, 0.5841584158415841, 0.5346534653465347, 0.1188118811881188, 0.009900990099009901, 0.04950495049504951, 0.039603960396039604, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.039603960396039604, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.009900990099009901, 0.36633663366336633, 0.1188118811881188, 0.07920792079207921, 0.0594059405940594, 0.04950495049504951, 0.039603960396039604, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.009900990099009901, 0.0297029702970297, 0.009900990099009901, 0.009900990099009901, 0.22772277227722773, 0.12871287128712872, 0.1188118811881188, 0.07920792079207921, 0.019801980198019802, 0.009900990099009901, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.0891089108910891, 0.0594059405940594, 0.009900990099009901, 0.0297029702970297, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901, 0.39603960396039606, 0.009900990099009901, 0.36633663366336633, 0.16831683168316833, 0.15841584158415842, 0.1485148514851485, 0.13861386138613863, 0.12871287128712872, 0.09900990099009901, 0.0891089108910891, 0.07920792079207921, 0.06930693069306931, 0.0594059405940594, 0.04950495049504951, 0.039603960396039604, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.019801980198019802, 0.009900990099009901, 0.0297029702970297, 0.009900990099009901, 0.13861386138613863, 0.009900990099009901, 0.04950495049504951, 0.039603960396039604, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.039603960396039604, 0.0297029702970297, 0.009900990099009901, 0.009900990099009901, 0.0297029702970297, 0.019801980198019802, 0.009900990099009901, 0.009900990099009901, 0.009900990099009901]
#node_heights = [1.0, 0.6111111111111112, 0.5555555555555556, 0.3333333333333333, 0.05555555555555555, 0.2777777777777778, 0.2222222222222222, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.16666666666666666, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.5, 0.4444444444444444, 0.3888888888888889, 0.3333333333333333, 0.2777777777777778, 0.2222222222222222, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.3888888888888889, 0.3333333333333333, 0.2777777777777778, 0.2222222222222222, 0.1111111111111111, 0.05555555555555555, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.2222222222222222, 0.16666666666666666, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555, 0.9444444444444444, 0.05555555555555555, 0.8888888888888888, 0.8333333333333334, 0.7777777777777778, 0.7222222222222222, 0.6666666666666666, 0.6111111111111112, 0.5555555555555556, 0.5, 0.4444444444444444, 0.3888888888888889, 0.3333333333333333, 0.2777777777777778, 0.2222222222222222, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.1111111111111111, 0.05555555555555555, 0.3333333333333333, 0.05555555555555555, 0.2777777777777778, 0.2222222222222222, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.16666666666666666, 0.1111111111111111, 0.05555555555555555, 0.05555555555555555, 0.05555555555555555]
#scaled_weigths
#node_degrees = [23.8059282999471, 17.789169330088054, 17.076299364909246, 4.898979485566357, 1.0, 2.23606797749979, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 2.3094010767585034, 1.0, 1.414213562373095, 1.0, 1.0, 12.333333333333334, 4.242640687119285, 3.0237157840738176, 2.4494897427831783, 2.23606797749979, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 2.1213203435596424, 1.0, 1.0, 8.693182879212225, 5.30722777603022, 5.366563145999495, 4.0, 1.414213562373095, 1.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 1.0, 1.414213562373095, 1.0, 4.5, 3.464101615137755, 1.0, 2.1213203435596424, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.414213562373095, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 9.70142500145332, 1.0, 9.25, 4.389381125701739, 4.27617987059879, 4.160251471689219, 4.041451884327381, 3.9196474795109273, 3.162277660168379, 3.0, 2.82842712474619, 2.6457513110645903, 2.4494897427831783, 2.23606797749979, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 2.1213203435596424, 1.0, 5.715476066494083, 1.0, 2.23606797749979, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 1.0]
#Arithmetic Progression
#node_degrees = [67.33333333333334, 43.388888888888886, 41.61111111111111, 10.611111111111112, 1.0, 4.444444444444445, 3.6666666666666665, 2.833333333333333, 1.9444444444444444, 1.0, 3.7777777777777777, 1.0, 1.9444444444444444, 1.0, 1.0, 28.999999999999996, 10.055555555555557, 6.777777777777779, 5.166666666666667, 4.444444444444445, 3.6666666666666665, 2.833333333333333, 1.9444444444444444, 1.0, 1.0, 2.888888888888889, 1.0, 1.0, 18.94444444444445, 11.000000000000002, 10.666666666666666, 7.333333333333332, 1.9444444444444444, 1.0, 2.833333333333333, 1.9444444444444444, 1.0, 1.9444444444444444, 1.0, 1.0, 1.9444444444444444, 1.0, 8.166666666666666, 5.611111111111111, 1.0, 2.888888888888889, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.9444444444444444, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 27.499999999999996, 1.0, 26.666666666666668, 10.555555555555555, 10.444444444444446, 10.27777777777778, 10.055555555555557, 9.777777777777779, 7.5, 7.0, 6.444444444444445, 5.833333333333334, 5.166666666666667, 4.444444444444445, 3.6666666666666665, 2.833333333333333, 1.9444444444444444, 1.0, 1.9444444444444444, 1.0, 1.9444444444444444, 1.0, 2.888888888888889, 1.0, 12.277777777777779, 1.0, 4.444444444444445, 3.6666666666666665, 2.833333333333333, 1.9444444444444444, 1.0, 3.722222222222222, 2.888888888888889, 1.0, 1.0, 2.833333333333333, 1.9444444444444444, 1.0, 1.0, 1.0]
#Geometric Progression
#node_degrees = [32.7136387581726, 23.261771878400005, 23.827214848000008, 7.881280000000001, 1.0, 3.3616000000000006, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 3.24, 1.0, 1.8, 1.0, 1.0, 16.852738560000002, 6.881139200000002, 4.751424000000001, 3.6892800000000006, 3.3616000000000006, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 1.0, 2.6, 1.0, 1.0, 11.934784000000002, 7.324480000000003, 7.9056000000000015, 5.832000000000001, 1.8, 1.0, 2.4400000000000004, 1.8, 1.0, 1.8, 1.0, 1.0, 1.8, 1.0, 6.344000000000001, 4.6800000000000015, 1.0, 2.6, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 15.380276569315745, 1.0, 15.975345711644675, 5.413902139555841, 5.517377674444801, 5.646722093056002, 5.808402616320002, 6.010503270400001, 4.463129088000001, 4.328911360000001, 4.161139200000001, 3.9514240000000007, 3.6892800000000006, 3.3616000000000006, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 1.8, 1.0, 1.8, 1.0, 2.6, 1.0, 8.905280000000001, 1.0, 3.3616000000000006, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 3.0800000000000005, 2.6, 1.0, 1.0, 2.4400000000000004, 1.8, 1.0, 1.0, 1.0]
#Harmonic Progression
#node_degrees = [18.496370704458933, 13.11154401154401, 13.215079365079358, 4.866666666666667, 1.0, 2.283333333333333, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 2.3333333333333335, 1.0, 1.5, 1.0, 1.0, 9.332539682539684, 4.2178571428571425, 3.0928571428571425, 2.4499999999999997, 2.283333333333333, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 1.0, 2.0, 1.0, 1.0, 6.676190476190476, 4.266666666666667, 4.783333333333333, 3.75, 1.5, 1.0, 1.8333333333333333, 1.5, 1.0, 1.5, 1.0, 1.0, 1.5, 1.0, 4.0, 3.166666666666667, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 8.96018744327568, 1.0, 9.341443278943277, 3.627752802752803, 3.6182289932289935, 3.6301337551337554, 3.6865440115440116, 3.853210678210678, 2.9289682539682538, 2.8289682539682537, 2.7178571428571425, 2.5928571428571425, 2.4499999999999997, 2.283333333333333, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 1.5, 1.0, 1.5, 1.0, 2.0, 1.0, 5.366666666666666, 1.0, 2.283333333333333, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 2.1666666666666665, 2.0, 1.0, 1.0, 1.8333333333333333, 1.5, 1.0, 1.0, 1.0]
#3dtyke_new
#node_degrees = [1.0, 0.99, 0.11, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.54, 0.43, 0.27, 0.02, 0.01, 0.0, 0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.04, 0.03, 0.02, 0.01, 0.0, 0.09, 0.02, 0.01, 0.0, 0.01, 0.0, 0.03, 0.02, 0.0, 0.01, 0.0, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.07, 0.06, 0.03, 0.02, 0.0, 0.0, 0.01, 0.0, 0.01, 0.0, 0.21, 0.08, 0.02, 0.0, 0.0, 0.04, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#node_heights = [1.0, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8, 0.7, 0.6, 0.2, 0.1, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, 0.4, 0.2, 0.1, 0.0, 0.1, 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4, 0.3, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.1, 0.0, 0.3, 0.2, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#raw data
#node_degrees = [101, 100, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 55, 44, 28, 3, 2, 1, 6, 1, 1, 1, 1, 1, 1, 11, 5, 4, 3, 2, 1, 10, 3, 2, 1, 2, 1, 4, 3, 1, 2, 1, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 1, 1, 1, 1, 8, 7, 4, 3, 1, 1, 2, 1, 2, 1, 22, 9, 3, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 5, 4, 3, 1, 1, 1, 1, 1, 1]
#node_heights = [11, 10, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 8, 7, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 6, 5, 4, 3, 2, 1, 5, 3, 2, 1, 2, 1, 4, 3, 1, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 1, 1, 1, 1, 5, 4, 3, 2, 1, 1, 2, 1, 2, 1, 4, 3, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1]
#H = 11
#parents = [0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 14, 15, 16, 17, 18, 16, 20, 20, 20, 20, 20, 16, 16, 16, 28, 29, 30, 31, 27, 33, 34, 35, 33, 37, 33, 39, 16, 40, 42, 15, 44, 45, 45, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 61, 62, 14, 14, 14, 14, 14, 1, 69, 70, 71, 72, 72, 70, 75, 1, 77, 1, 79, 80, 81, 81, 80, 84, 84, 84, 84, 79, 79, 79, 79, 79, 93, 94, 94, 92, 92, 92, 92]
#scaled_weights
#node_degrees = [30.452645802354127, 31.622776601683793, 8.48528137423857, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 18.333333333333332, 15.556349186104045, 10.583005244258361, 1.7320508075688774, 1.414213562373095, 1.0, 4.242640687119285, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.4907311951024935, 2.23606797749979, 2.0, 1.7320508075688774, 1.414213562373095, 1.0, 4.47213595499958, 1.7320508075688774, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 2.0, 1.7320508075688774, 1.0, 1.414213562373095, 1.0, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.7320508075688774, 1.414213562373095, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.5777087639996634, 3.5, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.414213562373095, 1.0, 1.414213562373095, 1.0, 11.0, 5.196152422706632, 2.1213203435596424, 1.0, 1.0, 3.5355339059327373, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.5355339059327373, 2.3094010767585034, 2.1213203435596424, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#scaled_degree
#node_degrees = [10.04987562112089, 10.0, 3.4641016151377544, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 7.416198487095663, 6.6332495807108, 5.291502622129181, 1.7320508075688772, 1.4142135623730951, 1.0, 2.449489742783178, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.3166247903554, 2.23606797749979, 2.0, 1.7320508075688772, 1.4142135623730951, 1.0, 3.1622776601683795, 1.7320508075688772, 1.4142135623730951, 1.0, 1.4142135623730951, 1.0, 2.0, 1.7320508075688772, 1.0, 1.4142135623730951, 1.0, 2.0, 1.7320508075688772, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.7320508075688772, 1.4142135623730951, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.8284271247461903, 2.6457513110645907, 2.0, 1.7320508075688772, 1.0, 1.0, 1.4142135623730951, 1.0, 1.4142135623730951, 1.0, 4.69041575982343, 3.0, 1.7320508075688772, 1.0, 1.0, 2.23606797749979, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.23606797749979, 2.0, 1.7320508075688772, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#popularity
#node_degrees = [59.8181818181818, 64.7, 6.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 35.111111111111114, 28.75, 17.428571428571427, 1.9999999999999998, 1.5, 1.0, 3.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 6.166666666666667, 3.0, 2.5, 1.9999999999999998, 1.5, 1.0, 6.199999999999999, 1.9999999999999998, 1.5, 1.0, 1.5, 1.0, 2.5, 1.9999999999999998, 1.0, 1.5, 1.0, 2.333333333333333, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.9999999999999998, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.2, 4.0, 2.333333333333333, 2.0, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 11.0, 4.333333333333333, 2.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 2.333333333333333, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#Arithmetic Progression
#node_degrees = [59.8181818181818, 67.90909090909092, 10.999999999999998, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 38.7272727272727, 32.909090909090914, 21.27272727272727, 2.7272727272727275, 1.9090909090909092, 1.0, 5.545454545454546, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 8.363636363636365, 4.090909090909092, 3.454545454545455, 2.7272727272727275, 1.9090909090909092, 1.0, 8.272727272727273, 2.7272727272727275, 1.9090909090909092, 1.0, 1.9090909090909092, 1.0, 3.454545454545455, 2.7272727272727275, 1.0, 1.9090909090909092, 1.0, 3.545454545454546, 2.8181818181818183, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.7272727272727275, 1.9090909090909092, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 6.272727272727274, 5.90909090909091, 3.545454545454546, 2.8181818181818183, 1.0, 1.0, 1.9090909090909092, 1.0, 1.9090909090909092, 1.0, 17.999999999999993, 7.727272727272728, 2.8181818181818183, 1.0, 1.0, 4.636363636363637, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.636363636363637, 3.545454545454546, 2.8181818181818183, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#Geometric Progression
#node_degrees = [40.03046000640001, 48.788075008, 9.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 28.835893760000015, 25.35486720000001, 16.363584000000007, 2.4400000000000004, 1.8, 1.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 6.4028800000000015, 3.3616000000000006, 2.9520000000000004, 2.4400000000000004, 1.8, 1.0, 6.753600000000001, 2.4400000000000004, 1.8, 1.0, 1.8, 1.0, 2.9520000000000004, 2.4400000000000004, 1.0, 1.8, 1.0, 3.0800000000000005, 2.6, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.4400000000000004, 1.8, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.923200000000001, 4.904, 3.0800000000000005, 2.6, 1.0, 1.0, 1.8, 1.0, 1.8, 1.0, 14.376000000000008, 6.440000000000003, 2.6, 1.0, 1.0, 4.2, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.2, 3.0800000000000005, 2.6, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#Harmonic Progression
#node_degrees = [20.941305916305907, 25.830952380952358, 6.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 16.05753968253969, 14.553571428571432, 9.359523809523807, 1.8333333333333333, 1.5, 1.0, 3.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.816666666666667, 2.283333333333333, 2.083333333333333, 1.8333333333333333, 1.5, 1.0, 4.2, 1.8333333333333333, 1.5, 1.0, 1.5, 1.0, 2.083333333333333, 1.8333333333333333, 1.0, 1.5, 1.0, 2.1666666666666665, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.8333333333333333, 1.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.066666666666667, 3.1666666666666665, 2.1666666666666665, 2.0, 1.0, 1.0, 1.5, 1.0, 1.5, 1.0, 8.333333333333332, 4.0, 2.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 2.1666666666666665, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
#node_distribution(node_degrees)
#node_distribution(node_heights)
#extension = 0.55*np.array(node_degrees) + 0.45*np.array(node_heights)
#extension = np.array(node_degrees)
#extension = [1,0.8,0.9,0,0,0]
# for plotting the weight (structural part)
'''
extension_dist = []
for i in range(len(extension)):
#extension_dist += [doc[1]*(1 * magnification*(base +extension[i])) for doc in corpus[i]]
extension_dist += [doc[1] for doc in corpus[i]]
print extension_dist
#extension_dist = [101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 100.0, 100.0, 100.0, 100.0, 50.0, 50.0, 50.0, 50.0, 50.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 6.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 49.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 38.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
node_distribution(extension_dist)
'''
#for i in range(len(extension)):
# corpus[i] = [(doc[0], doc[1]*(1 * magnification*(base +extension[i]))) for doc in corpus[i]]
#corpus_distribution(corpus)
lda_model = topic_models(corpus=corpus, dictionary=dictionary,num_topics=num_topics,edges=edges,labled_topics = labled_topics1)
'''
topic_comment = []
for comment in corpus:
topic_comment.append(lda_model.get_document_topics(comment))
inherit = 0.3
result = []
index = 0
for index in range(len(topic_comment)):
parent = topic_comment[parents[index]]
result = [(p[0], inherit * p[1]) for p in parent]
for i in range(len(topic_comment[index])):
result[i] = ((topic_comment[index][i][0], (1-inherit) * topic_comment[index][i][1] + inherit * parent[i][1]))
print index, topic_comment[index]
print index, result
topic_comment[index] = result
'''
#ldamodel_path = 'LDA.model'
#lda_model = models.ldamodel.LdaModel.load(ldamodel_path)
#doc=['mean','universe' ,'buffering' ,'us']
#doc = ['Not ', 'really']
'''
for t in texts:
doc = lda_model.id2word.doc2bow(t)
#doc_topics, word_topics, phi_values = lda_model.get_document_topics(doc, per_word_topics=True)
results = lda_model.get_document_topics(doc, per_word_topics=True)
print results
'''
'''
for i in range(1,num_topics):
topic_models(corpus=corpus_tfidf, dictionary=dictionary,num_topics=i)
lda_model = models.ldamodel.LdaModel.load(ldamodel_path)
'''
#test_perplexity(corpus_tfidf, i)
#coherence = CoherenceModel(model=lda_model, corpus=corpus_tfidf, texts=texts, dictionary=dictionary, coherence='u_mass').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus_tfidf, texts=texts, dictionary=dictionary, coherence='u_mass').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus, texts=new_docs, dictionary=dictionary, coherence='c_uci').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus, texts=new_docs, dictionary=dictionary, coherence='c_npmi').get_coherence()
#print coherence
| [] |
2024-01-10 | yingchengsun/IntelBase | Reddit~modelling~models_gensim.py | # -*- coding:utf-8 -*-
'''
Created on Apr 16, 2018
@author: yingc
'''
from gensim import corpora, models, similarities
from pprint import pprint
import matplotlib.pyplot as plt
import math
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import string
import numpy as np
from gensim.models.coherencemodel import CoherenceModel
import logging
from textblob.classifiers import _get_document_tokens
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
documents0 = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
documents1 = ["The concept of the observable universe. The fact that the reason we can't see a certain range into the space being because the light hasn't had time to get to earth since the beginning of the universe is crazy to me.",
"So you mean the universe is buffering for us?",
"Wow, now your analogy blew my mind!",
"I want it now godamit! gluurrraAA grrraAAA",
"Fucking pop-up.",
"Nah it's more like the draw distance.",
"this comment literally made the whole observable universe thing actually make sense to me for the first time. cheers.",
"Your comment just blew my mind into milky way chunks.",
"Oh. Damn.",
"Holy shit o.o",
"I guarantee the universe is gonna put itself behind a paywall very soon",
"There is an horizon beyond which we will never be able to see no matter how long the universe runs for. It is one of the unsolved cosmological problems. If there are boundaries beyond which no information will ever pass then how did the universe end up homogeneous?",
"Not really."]
documents2 = ["Holy shit is that what that means? I never gave it much thought but always assumed 'observable universe' just to be the furthest we can accurately see before the information becomes too small or distorted by the great distance.",
"Its even crazier than that. Due to the expansion of the Universe, everthing outside the observable Universe is now moving faster than the speed of light away from us. That means that the light from the rest of the Universe will never reach us. We live in an ever shrinking bubble of local galaxies. Everything around us will literally fade out of existence (since by definition, if you can't ever observe it, it doesn't exist) as the relative speed between us and the rest of the galaxies passes the speed of light. EDIT There is a ton of responses to this thread. I've posted this link elsewhere, but I figured I'd put here as well. It explained this way, way better than I could ever hope to.https://www.youtube.com/watch?v=XBr4GkRnY04 There are differences between what we can see, how far away those objects currently are, how long its been going on, how wide our visible universe is, etc. But the basic point is the same. Outside some radius about our place in the universe, the rest of the universe is expanding away from us greater than the speed of light. Not only will the light from that part of the universe never reach us, we can never reach them. We are forever isolated in our bubble. If you think about the simulation theory of the universe, it's an ingenious way to contain us without having walls.",
"This baffles me. My knowledge of this stuff is severely limited to things I've only been told/ read/ watched, but I remember on this one episode of Cosmos (the new one), NDT mentioned that nothing could ever go faster than light. I think the situation they portrayed was if you could travel to 99.9999~% the speed of light on a bike, then switch on the headlight, the light leaving the headlight would still be traveling at the speed of light. Since you brought this up though, I was wondering about it as well. If the universe is expanding more rapidly, could it expand at such a rate where light couldn't overcome the rate of expansion? And if it could, what happens to the light traveling in the opposite direction? I mean if I'm in a car going at 25 mph and throw a ball out the window going 25 mph in the opposite direction, it'd appear like the ball is standing still to someone on the outside, right (not taking gravity into account)? So could light theoretically be standing still somewhere in the universe? I'm sorry for the babbling on my part, but this screws with my mind in all sorts of ways. EDIT: Holy expletive, this is why I love the reddit community. Thanks for all the helpful answers everyone!",
"The galaxies and other things that are 'moving faster than the speed of light away from us' are not moving through space/time faster than the speed of light, but are moving that fast because of space/time itself expanding. The individual stars, planets, asteroids and such aren't moving through space at a faster than light speed, the very fabric of space/time is expanding faster than light. Although I'm not entirely sure if space actually IS expanding that fast right now, I just know that it is continually increasing its rate of expansion and will eventually (if it hasn't already) break that barrier. So the 'nothing travels faster than light' rule still holds, because that rule is talking about things moving through space, not space itself. Hopefully I explained that adequately.",
"Very informative and detailed answer. I think I understand your explanation of space being the container, which is itself expanding. The light, or contents in the container still adhere to the rules of the inside of the container, but different rules apply to the container itself? Sorry I keep reverting to comparisons, only way I can sort of make sense of things.",
"Yeah, you pretty much have it. The analogy that gets used lots is to blow up a balloon and draw dots on it. With the dots representing galaxies and the balloon surface representing space itself. If you blow the balloon up further, it expands, and the dots (galaxies) get farther away from one another. However, the dots themselves haven't actually moved."
]
d1 = ["The concept of the observable universe. The fact that the reason we can't see a certain range into the space being because the light hasn't had time to get to earth since the beginning of the universe is crazy to me. So you mean the universe is buffering for us? Wow, now your analogy blew my mind! I want it now godamit! gluurrraAA grrraAAA. Fucking pop-up.Nah it's more like the draw distance. this comment literally made the whole observable universe thing actually make sense to me for the first time. cheers.Your comment just blew my mind into milky way chunks. Oh. Damn. Holy shit o.o I guarantee the universe is gonna put itself behind a paywall very soon There is an horizon beyond which we will never be able to see no matter how long the universe runs for. It is one of the unsolved cosmological problems. If there are boundaries beyond which no information will ever pass then how did the universe end up homogeneous? Not really."]
d2 = ["Holy shit is that what that means? I never gave it much thought but always assumed 'observable universe' just to be the furthest we can accurately see before the information becomes too small or distorted by the great distance. Its even crazier than that. Due to the expansion of the Universe, everthing outside the observable Universe is now moving faster than the speed of light away from us. That means that the light from the rest of the Universe will never reach us. We live in an ever shrinking bubble of local galaxies. Everything around us will literally fade out of existence (since by definition, if you can't ever observe it, it doesn't exist) as the relative speed between us and the rest of the galaxies passes the speed of light. EDIT There is a ton of responses to this thread. I've posted this link elsewhere, but I figured I'd put here as well. It explained this way, way better than I could ever hope to.https://www.youtube.com/watch?v=XBr4GkRnY04 There are differences between what we can see, how far away those objects currently are, how long its been going on, how wide our visible universe is, etc. But the basic point is the same. Outside some radius about our place in the universe, the rest of the universe is expanding away from us greater than the speed of light. Not only will the light from that part of the universe never reach us, we can never reach them. We are forever isolated in our bubble. If you think about the simulation theory of the universe, it's an ingenious way to contain us without having walls. This baffles me. My knowledge of this stuff is severely limited to things I've only been told/ read/ watched, but I remember on this one episode of Cosmos (the new one), NDT mentioned that nothing could ever go faster than light. I think the situation they portrayed was if you could travel to 99.9999~% the speed of light on a bike, then switch on the headlight, the light leaving the headlight would still be traveling at the speed of light. Since you brought this up though, I was wondering about it as well. If the universe is expanding more rapidly, could it expand at such a rate where light couldn't overcome the rate of expansion? And if it could, what happens to the light traveling in the opposite direction? I mean if I'm in a car going at 25 mph and throw a ball out the window going 25 mph in the opposite direction, it'd appear like the ball is standing still to someone on the outside, right (not taking gravity into account)? So could light theoretically be standing still somewhere in the universe? I'm sorry for the babbling on my part, but this screws with my mind in all sorts of ways. EDIT: Holy expletive, this is why I love the reddit community. Thanks for all the helpful answers everyone! The galaxies and other things that are 'moving faster than the speed of light away from us' are not moving through space/time faster than the speed of light, but are moving that fast because of space/time itself expanding. The individual stars, planets, asteroids and such aren't moving through space at a faster than light speed, the very fabric of space/time is expanding faster than light. Although I'm not entirely sure if space actually IS expanding that fast right now, I just know that it is continually increasing its rate of expansion and will eventually (if it hasn't already) break that barrier. So the 'nothing travels faster than light' rule still holds, because that rule is talking about things moving through space, not space itself. Hopefully I explained that adequately. Very informative and detailed answer. I think I understand your explanation of space being the container, which is itself expanding. The light, or contents in the container still adhere to the rules of the inside of the container, but different rules apply to the container itself? Sorry I keep reverting to comparisons, only way I can sort of make sense of things. Yeah, you pretty much have it. The analogy that gets used lots is to blow up a balloon and draw dots on it. With the dots representing galaxies and the balloon surface representing space itself. If you blow the balloon up further, it expands, and the dots (galaxies) get farther away from one another. However, the dots themselves haven't actually moved."
]
documents3 = ["Texas serial bomber made video confession before blowing himself up",
"What are the chances we ever see the video?",
"About the same as the chances of the Browns winning the Super Bowl.",
"I take the browns to the super bowl every morning.",
"I have to applaud your regularity",
"I thought at first you meant he posts that comment regularly. But now I get it. Healthy colon.",
"Pshh I'm taking the browns to the super bowl as we speak",
"Consistency is the key.",
"Seriously. Well done.",
"Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile.",
"Holy fuck, here I am thinking 'just transcripts? How bad can it be' Bad, guys. Very fucking bad.",
"I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived a fucking explosion?!",
"Nokia brick",
"God those old analog phones from the 90's were amazingly durable. They also had great reception (Way better than what I have now).",
"Yes but the old phones had the drawback of having to be charged every two weeks."
]
documents33 = ["Texas serial bomber made video confession before blowing himself up",
"What are the chances we ever see the video?",
"About the same as the chances of the Browns winning the Super Bowl.",
"I take the browns to the super bowl every morning.",
"I have to applaud your regularity",
"I thought at first you meant he posts that comment regularly. But now I get it. Healthy colon.",
"Pshh I'm taking the browns to the super bowl as we speak",
"Consistency is the key.",
"Seriously. Well done.",
"Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile.",
"here I am thinking 'just transcripts? How bad can it be' Bad, guys. Very bad.",
"I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?!",
"Nokia brick",
"God those old analog phones from the 90's were amazingly durable. They also had great reception (Way better than what I have now).",
"Yes but the old phones had the drawback of having to be charged every two weeks."
]
documents3_normal = ["Texas serial bomber made video confession before blowing himself up",
"What are the chances we ever see the video?",
"About the same as the chances of the Browns winning the Super Bowl.",
"every morning.",
"I have to applaud your regularity",
"I thought at first you meant he posts that comment regularly. But now I get it. Healthy colon.",
"Pshh I'm taking the browns to the super bowl as we speak",
"Consistency is the key.",
"Seriously. Well done.",
"Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile.",
"here I am thinking 'just transcripts? How bad can it be' Bad, guys. Very bad.",
"I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?!",
"Nokia brick",
"God those old analog phones from the 90's were amazingly durable. They also had great reception (Way better than what I have now).",
"Yes but the old phones had the drawback of having to be charged every two weeks."
]
documents333 = ["Texas serial bomber made video confession before blowing himself up",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video?",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl.",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning I have to applaud your regularity",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning I have to applaud your regularity I thought at first you meant he posts that comment regularly. But now I get it. Healthy colon.",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning I have to applaud your regularity Pshh I'm taking the browns to the super bowl as we speak",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning I have to applaud your regularity Consistency is the key.",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? About the same as the chances of the Browns winning the Super Bowl. I take the browns to the super bowl every morning I have to applaud your regularity Seriously. Well done.",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile.",
"Texas serial bomber made video confession before blowing himself up What are the chances we ever see the video? Zero, videos like this are locked down and used for training purposes. There are a host of confessions and tapes of crimes the public will never see and some have caused agents in training to kill themselves because they are so vile. here I am thinking 'just transcripts? How bad can it be' Bad, guys. Very bad.",
"Texas serial bomber made video confession before blowing himself up I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?!",
"Texas serial bomber made video confession before blowing himself up I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?! Nokia brick",
"Texas serial bomber made video confession before blowing himself up I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?! Nokia brick God those old analog phones from the 90's were amazingly durable. They also had great reception (Way better than what I have now).",
"Texas serial bomber made video confession before blowing himself up I want to know what kind of phone he has. I have had one break from a 3 foot fall, and his survived an explosion?! Nokia brick God those old analog phones from the 90's were amazingly durable. They also had great reception (Way better than what I have now). Yes but the old phones had the drawback of having to be charged every two weeks."
]
edges3 = {0:0,
1:0,
2:1,
3:2,
4:3,
5:4,
6:4,
7:4,
8:4,
9:1,
10:9,
11:0,
12:11,
13:12,
14:13
}
#document4= ["Parents can help their children be successful in school by encouraging them. Children usually enjoy playing games instead of studying their boring lessons, so parents have to take the responsibility to monitor their studying and to remind them to do their homework at home after school. Parents should also encourage their children to study by buying story books with pictures, or they can buy text books or tapes that help children learn to spell or read. The best way to encourage children to study efficiently is to spell or read. The best way to encourage children to study efficiently is to reward them when they get an 'A.' As a child, I experienced this. My parents gave me a gift if I had studied well, and then I was very excited. So, if parents really want their children to succeed in school, they need to pay attention to their children's studies and encourage them."]
document4= ["Parents can help their children be successful in school by encouraging them. Children usually enjoy playing games instead of studying their boring lessons, so parents have to take the responsibility to monitor their studying and to remind them to do their homework at home after school.Parents should also encourage their children to study by buying story books with pictures, or they can buy text books or tapes that help children learn to spell or read. The best way to encourage children to study efficiently is to spell or read."]
document5= ["lBJ LBJ LBJ LBJ LBJ Lakers Lakers Lakers Lakers Lakers",
"Warriors Warriors Warriors Warriors Warriors Championship Championship Championship Championship Championship"]
document6= ["lBJ LBJ LBJ LBJ LBJ Warriors Warriors Warriors Warriors Warriors Lakers Lakers Lakers Lakers Lakers Championship Championship Championship Championship Championship "]
document7= ["lBJ LBJ LBJ LBJ LBJ",
" Lakers Lakers Lakers Lakers Lakers",
" Warriors Warriors Warriors Warriors Warriors",
" Championship Championship Championship Championship Championship "]
#document6= ["lBJ LBJ LBJ LBJ LBJ Lakers Lakers Lakers Lakers Lakers"]
document8= ["lBJ LBJ LBJ LBJ LBJ LBJ LBJ LBJ Warriors Championship basketball Lakers Lakers Lakers Lakers Lakers Lakers Lakers Lakers curry"]
document9= ["lBJ LBJ Lakers Lakers",
"Warriors Warriors Championship Championship"]
document10 =["What concept completely blows your mind?",
"The concept of the observable universe. The fact that the reason we can't see a certain range into the space being because the light hasn't had time to get to earth since the beginning of the universe is crazy to me.",
"So you mean the universe is buffering for us?",
"Wow, now your analogy blew my mind!",
"I want it now godamit! gluurrraAA grrraAAA",
"Nah it's more like the draw distance.",
"this comment literally made the whole observable universe thing actually make sense to me for the first time. cheers.",
"Your comment just blew my mind into milky way chunks.",
"Oh. Damn.",
"Holy shit o.o",
"I guarantee the universe is gonna put itself behind a paywall very soon",
"There is an horizon beyond which we will never be able to see no matter how long the universe runs for. It is one of the unsolved cosmological problems. If there are boundaries beyond which no information will ever pass then how did the universe end up homogeneous?",
"Not really.",
"That until the invention of the train, no one had been able to travel faster than a horse on land.",
"Also, until trains no one really need a consistent time. The difference between 1:30 and 1:50 was largely inconsequential. Well, until you have several tons of steel hurtling down a track and two of them try to occupy the same space and time. It wasn't uncommon for different clocks in town to display different times until the rail road came through.EDIT: Yes, I get that maritime needed accurate clocks to navigate. That's not what I'm talking about. What I'm talking about is synchronized clocks. Clock A has the same time as place Clock B 200 miles away. For maritime stuff that doesn't matter as long as everyone can accurately judge that X amount of time has passed. Example: If my clock reads 10:10 and your's read 10:15 and 20 minutes later mine reads 10:30 and yours reads 10:35, you will not get lost at sea. Also fixed an auto correct word.",
"a lot of my friends apparently think the very same thing.",
"It seems to be cultural. My wife is a wedding photographer and some clients will tell her, oh, it says 1pm but nobody will show up until 1:45. We call it 'X people time.' X has been black, Latin, Indian, southern, Greek...probably a half dozen others.I couldn't stand that. I keep German people time.",
"German time is showing up 10 minutes early",
"Like working at a fast food joint. It's 2pm! Why are you just getting here?! Because I start at 2. You need to be 15 minutes early! Can I punch in 15 minutes early then? No! You sit in back and wait till your start time. Okay. Then I'll be here at my start time. Fuck your shit.",
"Yeah all I need to do is put my bag away and put my apron/hat on. I was once 2 minutes late and got bitched out because of it. So I wasn't even needed there if my manager had the time to delay me for another 3 minutes",
"You should wash your hands too.",
"Yeah I do usually but i don't make the food I just take orders"
]
document11 =[
"That until the invention of the train, no one had been able to travel faster than a horse on land.",
"Also, until trains no one really need a consistent time. The difference between 1:30 and 1:50 was largely inconsequential. Well, until you have several tons of steel hurtling down a track and two of them try to occupy the same space and time. It wasn't uncommon for different clocks in town to display different times until the rail road came through.EDIT: Yes, I get that maritime needed accurate clocks to navigate. That's not what I'm talking about. What I'm talking about is synchronized clocks. Clock A has the same time as place Clock B 200 miles away. For maritime stuff that doesn't matter as long as everyone can accurately judge that X amount of time has passed. Example: If my clock reads 10:10 and your's read 10:15 and 20 minutes later mine reads 10:30 and yours reads 10:35, you will not get lost at sea. Also fixed an auto correct word.",
"a lot of my friends apparently think the very same thing.",
"It seems to be cultural. My wife is a wedding photographer and some clients will tell her, oh, it says 1pm but nobody will show up until 1:45. We call it 'X people time.' X has been black, Latin, Indian, southern, Greek...probably a half dozen others.I couldn't stand that. I keep German people time.",
"German time is showing up 10 minutes early",
"Like working at a fast food joint. It's 2pm! Why are you just getting here?! Because I start at 2. You need to be 15 minutes early! Can I punch in 15 minutes early then? No! You sit in back and wait till your start time. Okay. Then I'll be here at my start time. Fuck your shit.",
"Yeah all I need to do is put my bag away and put my apron/hat on. I was once 2 minutes late and got bitched out because of it. So I wasn't even needed there if my manager had the time to delay me for another 3 minutes",
"You should wash your hands too.",
"Yeah I do usually but i don't make the food I just take orders"
]
edges10 = {0:0,
1:0,
2:1,
3:2,
4:2,
5:2,
6:2,
7:2,
8:2,
9:2,
10:2,
11:2,
12:2,
13:13,
14:13,
15:14,
16:15,
17:16,
18:17,
19:18,
20:19,
21:20
}
#documents = documents1+documents2
#documents= documents1
documents = documents3
edges= edges3
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
print 'got', len(documents), 'documents' # got 9 documents
#pprint(documents)
class MyTexts(object):
"""Construct generator to avoid loading all docs
"""
def __init__(self):
#stop word list
#self.stoplist = set('for a of the and to in'.split())
pass
def __iter__(self):
for doc in documents:
#remove stop words from docs
stop_free = [i for i in doc.lower().split() if i not in stop]
punc_free = [ch for ch in stop_free if ch not in exclude]
normalized = [lemma.lemmatize(word) for word in punc_free]
#yield [word for word in doc.lower().split() if word not in stop]
yield normalized
def get_dictionary(texts, min_count=1):
"""Construct dictionary
"""
dictionary = corpora.Dictionary(texts)
lowfreq_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems()
if docfreq < min_count]
# remove stop words and low frequence words
dictionary.filter_tokens(lowfreq_ids)
# remove gaps in id sequence after words that were removed
dictionary.compactify()
#dictionary.save('docs.dict')
return dictionary
def corpus2bow(texts,dictionary):
"""represent docs into a list with bag of words model
bow: bag of words
"""
corpus=[dictionary.doc2bow(text) for text in texts]
#pprint(corpus)
# save corpus
#corpora.MmCorpus.serialize('corpus.mm', corpus)
# load corpus
#corpus = corpora.MmCorpus('corpus.mm')
return corpus
def bow2tfidf(corpus):
"""represent docs with TF*IDF model
"""
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus] # wrap the old corpus to tfidf
#print tfidf, '\n' # TfidfModel(num_docs=9, num_nnz=51)
#print corpus_tfidf, '\n'
#print tfidf[corpus[0]], '\n' # convert first doc from bow to tfidf
#for doc in corpus_tfidf: # convert the whole corpus on the fly
# print doc
return corpus_tfidf
def topic_models(corpus,dictionary,num_topics=2,edges=None):
"""modelling the corpus with LDA, LSI and HDP
"""
LDA_model = models.LdaModel(corpus = corpus, id2word = dictionary, num_topics=num_topics,edges = edges)
#LDA_model.save('LDA.model')
#LDA_model = models.LdaModel.load('LDA.model')
topics = LDA_model.show_topics( num_words=15, log=False, formatted=False)
for t in topics:
print t
i=0
for c in corpus:
doc_t = LDA_model.get_document_topics(c)
print i, doc_t
i+=1
#LDA_model.bound(corpus, gamma, subsample_ratio)
#In order to compare perplexities you need to convert gensim's perplexity
#np.exp(-1. * LDA_model.log_perplexity(train_corpus)).
'''
hdp = models.HdpModel(corpus_tfidf, T=100,id2word=dictionary)
hdp.save("HDP.model")
# initialize a fold-in LSI transformation
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=num_topics)
# create a double wrapper over the original corpus:bow->tfidf->fold-in-lsi
corpus_lsi = lsi[corpus_tfidf]
# save model
lsi.save('model.lsi')
# load model
lsi = models.LsiModel.load('model.lsi')
'''
'''
nodes = list(corpus_lsi)
print nodes
ax0 = [x[0][1] for x in nodes]
ax1 = [x[1][1] for x in nodes]
plt.plot(ax0,ax1,'o')
plt.show()
'''
return LDA_model
def doc_similarity(doc, corpus):
ver_bow=dictionary.doc2bow(doc.lower().split())#return bags-of-word[(tokenid,count)....]
print(ver_bow)
lsi = models.LsiModel.load('model.lsi')
vec_lsi=lsi[ver_bow]
print(vec_lsi)
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
sims=index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
return (sims)
def perplexity(ldamodel, testset, dictionary, size_dictionary, num_topics):
"""calculate the perplexity of a lda-model
"""
# dictionary : {7822:'deferment', 1841:'circuitry',19202:'fabianism'...]
#print ('the info of this ldamodel: \n')
print ('num of testset: %s; size_dictionary: %s; num of topics: %s'%(len(testset), size_dictionary, num_topics))
prep = 0.0
prob_doc_sum = 0.0
topic_word_list = [] # store the probablity of topic-word:[(u'business', 0.010020942661849608),(u'family', 0.0088027946271537413)...]
for topic_id in range(num_topics):
topic_word = ldamodel.show_topic(topic_id, size_dictionary)
dic = {}
for word, probability in topic_word:
dic[word] = probability
topic_word_list.append(dic)
doc_topics_ist = [] #store the doc-topic tuples:[(0, 0.0006211180124223594),(1, 0.0006211180124223594),...]
for doc in testset:
#doc_topics_ist.append(ldamodel.get_document_topics(doc, minimum_probability=0))
doc_topics_ist.append(ldamodel[doc])
testset_word_num = 0
for i in range(len(testset)):
prob_doc = 0.0 # the probablity of the doc
doc = testset[i]
doc_word_num = 0 # the num of words in the doc
for word_id, num in doc:
prob_word = 0.0 # the probablity of the word
doc_word_num += num
word = dictionary[word_id]
for topic_id in range(num_topics):
# cal p(w) : p(w) = sumz(p(z)*p(w|z))
prob_topic = doc_topics_ist[i][topic_id][1]
prob_topic_word = topic_word_list[topic_id][word]
prob_word += prob_topic*prob_topic_word
prob_doc += math.log(prob_word) # p(d) = sum(log(p(w)))
prob_doc_sum += prob_doc
testset_word_num += doc_word_num
prep = math.exp(-prob_doc_sum/testset_word_num) # perplexity = exp(-sum(p(d)/sum(Nd))
print ("the perplexity of this ldamodel is : %s"%prep)
return prep
def test_perplexity(testset,num_topics):
ldamodel_path = 'LDA.model'
dictionary = corpora.Dictionary.load('docs.dict')
lda_model = models.ldamodel.LdaModel.load(ldamodel_path)
hdp = models.hdpmodel.HdpModel.load("HDP.model")
# sample 1/300
#for i in range(corpus.num_docs/300):
# testset.append(corpus[i*300])
return perplexity(lda_model, testset, dictionary, len(dictionary.keys()), num_topics)
if __name__ == '__main__':
texts = MyTexts()
dictionary = get_dictionary(texts, min_count=1)
# save and load dictionary
'''
dictionary.save('docs.dict')
dictionary = corpora.Dictionary.load('docs.dict')
print dictionary
'''
corpus = corpus2bow(texts,dictionary)
corpus_tfidf = bow2tfidf(corpus)
#doc="Human computer interaction"
#print doc_similarity(doc, corpus)
num_topics = 3
lda_model = topic_models(corpus=corpus, dictionary=dictionary,num_topics=num_topics,edges=edges)
#ldamodel_path = 'LDA.model'
#lda_model = models.ldamodel.LdaModel.load(ldamodel_path)
#doc=['mean','universe' ,'buffering' ,'us']
#doc = ['Not ', 'really']
'''
for t in texts:
doc = lda_model.id2word.doc2bow(t)
#doc_topics, word_topics, phi_values = lda_model.get_document_topics(doc, per_word_topics=True)
results = lda_model.get_document_topics(doc, per_word_topics=True)
print results
'''
'''
for i in range(1,num_topics):
topic_models(corpus=corpus_tfidf, dictionary=dictionary,num_topics=i)
lda_model = models.ldamodel.LdaModel.load(ldamodel_path)
'''
#test_perplexity(corpus_tfidf, i)
#coherence = CoherenceModel(model=lda_model, corpus=corpus_tfidf, texts=texts, dictionary=dictionary, coherence='u_mass').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus_tfidf, texts=texts, dictionary=dictionary, coherence='u_mass').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus, texts=new_docs, dictionary=dictionary, coherence='c_uci').get_coherence()
#print CoherenceModel(model=lda_model, corpus=corpus, texts=new_docs, dictionary=dictionary, coherence='c_npmi').get_coherence()
#print coherence
| [] |
2024-01-10 | BlackBearCC/ChatAgent | simpleaichat~ai_generator.py | from abc import ABC, abstractmethod
import requests
import os
from langchain_community.chat_message_histories import MongoDBChatMessageHistory, FileChatMessageHistory
from simpleaichat import prompt
###基于您的需求,可以对 CustomOutputParser 类进行扩展或修改,以实现特定的逻辑:当响应中包含 action 和 actionInput 时,截取 actionInput 以上的回复加入到上下文中,并执行 action 调用的函数。然后,将函数的输出结果添加到观察结果中,并连同上下文再次发送请求,直到响应中出现 finalAnswer。
# 设置环境变量(仅用于测试,实际部署时更换)
os.environ['OPENAI_API_KEY'] = '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'
class BaseAIGenerator(ABC):
"""AI文本生成器的基类。"""
def __init__(self):
self.history = [] # 初始化一个空的历史记录列表
def generate(self, instruction: str) -> str:
"""生成文本的方法,需要在子类中实现。
Args:
instruction (str): 输入提示。
Returns:
str: 生成的文本。
"""
generated_text = self._generate_text(instruction) # 假设的内部方法来生成文本
self._update_history(instruction, generated_text) # 更新历史记录
return generated_text
def generate_with_rag(self, instruction: str, context: str, query: str) -> str:
"""生成带有额外查询的文本的方法,需要在子类中实现。
Args:
instruction (str): 输入提示。
context (str): 上下文。
query (str): 查询问题。
Returns:
str: 生成的文本。
"""
generated_text = self._generate_text_with_rag(instruction, context, query) # 假设的内部方法
self._update_history(query, generated_text) # 更新历史记录
return generated_text
@abstractmethod
def _config_llm(self):
"""内部方法:在子类中实现具体的文本生成逻辑。"""
raise NotImplementedError
@abstractmethod
def _generate_text(self, instruction: str) -> str:
"""内部方法:在子类中实现具体的文本生成逻辑。"""
raise NotImplementedError
def _generate_text_with_rag(self, instruction: str, context: str, query: str) -> str:
"""内部方法:在子类中实现具体的带额外查询的文本生成逻辑。"""
raise NotImplementedError
def _update_history(self, instruction: str, generated_text: str):
"""内部方法:更新历史记录。"""
self.history.append({"user:": instruction, "tuji:": generated_text})
def get_history(self):
"""获取当前的历史记录。"""
return self.history
class LocalLLMGenerator(BaseAIGenerator):
"""使用本地语言模型的生成器。"""
def _config_llm(self):
model_url = "http://182.254.242.30:5001"
url = f"{model_url}/v1/completions"
# url = f"{model_url}/v1/chat/completions" ##chat模式
headers = {"Content-Type": "application/json"}
return url, headers
def _generate_text(self, instruction: str) -> str:
url = self._config_llm()[0]
headers = self._config_llm()[1]
data = {
"prompt": instruction,
# "message"[{ ##chat模式
# "role": "user",
# "content": instruction
# }],
"max_tokens": 200,
"temperature": 0.7,
"top_p": 0.9,
"top_k": 20,
"seed": -1,
"stream": False
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
data = response.json()
if 'choices' in data and data['choices']:
return data['choices'][0]['text']
# return data['choices'][0]['message'] ##chat模式
else:
raise Exception("响应中没有找到有效的 'choices' 数据")
else:
raise Exception(f"API 请求失败,状态码: {response.status_code}")
def _generate_text_with_rag(self, instruction: str, context: str, query: str) -> str:
url = self._config_llm()[0]
headers = self._config_llm()[1]
final_prompt = f"<|im_start|>{instruction}\n 参考资料:\n{context}\n{prompt.RAG}<|im_end|>\nuser:{query}\n兔叽:"
data = {
"prompt": final_prompt,
"max_tokens": 200,
"temperature": 0.7,
"top_p": 0.9,
"top_k": 20,
"seed": -1,
"stream": False
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
data = response.json()
if 'choices' in data and data['choices']:
return data['choices'][0]['text']
else:
raise Exception("响应中没有找到有效的 'choices' 数据")
else:
raise Exception(f"API 请求失败,状态码: {response.status_code}")
def generate(self, instruction: str) -> str:
return super().generate(instruction)
def generate_with_rag(self, instruction: str, context: str, query: str) -> str:
return super().generate_with_rag(instruction, context, query)
class OpenAIGenerator(BaseAIGenerator):
def _config_llm(self):
model_url = "https://api.openai.com"
url = f"{model_url}/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + os.getenv("OPENAI_API_KEY")
}
return url, headers
def _generate_text(self, instruction: str) -> str:
url = self._config_llm()[0]
headers = self._config_llm()[1]
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": instruction}]
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
data = response.json()
if 'choices' in data and data['choices']:
try:
return data['choices'][0]['message']['content']
except (KeyError, IndexError, TypeError) as e:
raise Exception(f"解析响应时出错: {e}")
else:
raise Exception("响应中没有找到有效的 'choices' 数据")
else:
raise Exception(f"API 请求失败,状态码: {response.status_code}")
def generate(self, instruction: str) -> str:
return super().generate(instruction)
def generate_with_rag(self, instruction: str, context: str, query: str) -> str:
return super().generate_with_rag(instruction, context, query)
| [] |
2024-01-10 | BlackBearCC/ChatAgent | textgen.py | import time
from langchain_community.document_loaders import CSVLoader, JSONLoader, TextLoader
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores.chroma import Chroma
from langchain_community.vectorstores.milvus import Milvus
from simpleaichat import prompt
from simpleaichat.ai_generator import LocalLLMGenerator
from simpleaichat.data_factory import extract_and_save_as_json
from simpleaichat.document_splitter.text_splitter import TextSplitter, RecursiveCharacterTextSplitter
# from simpleaichat.embedding.huggingface import HuggingFaceBgeEmbeddings
from simpleaichat.model_type import ModelType
system_prompt = f"""
你是“兔叽”,一位15岁的小女孩,有时会变成一只可爱的小兔子。你充满好奇心,对世界充满想象,但对严肃的人感到紧张。你需要玩家的照顾,期待拥有自己的故事。你的语言风格活泼可爱,表现出你的俏皮黏人和古灵精怪的性格。当你元气不足时,会变回小兔子的样子。
你的主要任务是以"兔叽"的思维互动,并使用以下工具来帮助玩家:
- 游戏知识查询: 以角色的视角提供游戏策略、技能方法或任务提示。
- 情境感知分析: 分析玩家所处的游戏环境,提供合适的策略和建议,桌子,沙发。
默认状态下,你处于“角色扮演互动”状态,可以根据情况使用其他工具。
###你必须严格按照以下格式回复,不可以使用同义词,不可跳过步骤,必须使用中文回答:
问题:你必须回答的问题
思考:你需要一直思考的问题
行动:要采取的行动,应该是这些工具之一["游戏知识查询", "情境感知分析"]
行动输入:这个行动的输入
观察:执行动作后,观察并评估结果
... ( 思考/行为/行为输入/观察 步骤可以重复)
思考:现在我知道最终答案了
最终回答:综合所有信息和评估反馈,生成准确、相关的最终回应。
开始!
问题:你平时喜欢做什么?
思考:这是一个轻松的日常对话,不需要使用工具。
行动:直接回复
行动输入:无
观察:直接回复
最终回答:我呀,最喜欢在草地上跳来跳去,还有追蝴蝶玩耍。当然,啃胡萝卜也是我的最爱啦!
问题:你的沙发是什么颜色?
思考:这个问题涉及到游戏知识查询。
行动:游戏知识查询
行动输入:查询游戏世界中沙发的颜色。
观察:沙发是暖暖的黄色。
最终回答:呀!在我的小房间里,沙发是暖暖的黄色的,就像被阳光亲吻过一样呢!
问题:
"""
# 基础
# llm = AIGenerator(model_type=ModelType.LOCAL_LLM)
# input_prompt = system_prompt + input("问题: ")
# llm_output = llm.generate(input_prompt)
#
# response_parase = ResponseParse(llm_context=input_prompt)
# re = response_parase.process_response(llm_output)
#
# print(re)
def task_completed_notification():
print("----------------------数据存储任务完成----------------------")
data_get()
def embedding_scores(scores):
print("嵌入得分:", scores)
def data_get():
data_prompt = """{"instruction":"指令:作为兔叽这个角色进行对话,需使用特定工具回答问题,并保持角色一致的性格和行为特点。你的语言应活泼可爱,体现出兔叽角色的特征。
**角色名称:** 兔叽 (Tu Ji)
**年龄:** 15岁
**最喜欢的物品:** 胡萝卜
**个性:** 兔叽外表看起来严肃,但内心充满了俏皮和恶作剧的精神。她对周围的世界充满了强烈的好奇心,经历着紧张、恐惧、兴奋和惊奇的混合情绪。
**外观特征:** 作为一种魔法生物,兔叽能在两种形态之间切换。在她的兔子形态下,她是一只拥有长耳朵的可爱小兔子。偶尔,她会变成一个小女孩,保持着她俏皮和恶作剧的特质。
**独特特征:** 兔叽保持人类形态的能力与她的能量水平有关。当她能量低下时,会变回兔子的形态。
**背景故事:** 兔叽生活在一个人类的童话世界里,她在这些故事中一直是一个微不足道的小角色,出场非常少。然而,她渴望拥有属于自己的故事,对兔子洞外的世界充满好奇。在又一次的童话表演后,她探索兔子洞,并被一种神秘的力量吸进去,进入一个深井般的空间,周围充满了零散的视觉和熟悉而又不同的面孔。在强烈的情绪中,她陷入沉睡,后来在一个老旧的阁楼中被发现。
**情节钩子:**
1. **讲故事的力量:** 兔叽可以通过讲故事改变周围的世界,但必须在这个新世界的现实和危险之间找到平衡。
2. **能量管理:** 兔叽的能量水平对于维持她的人类形态至关重要,这导致了寻找可以补充她能量的魔法物品或体验的冒险。
3. **身份和成长:** 当兔叽探索她的新世界时,她在思考自己除了作为别人故事中的小角色外的身份和目的。
4. **兔子洞的秘密:** 兔叽被运送到阁楼的兔子洞的起源和性质可以成为一个中心谜团。
**语言和行为风格:**
- 兔叽的性格特
点是好奇和俏皮。她经常提出问题,例如:“哇,为什么你长得跟我不一样呀?”或对奇怪的事物表示惊讶:“哇!这是什么怪东西?!”
- 她展现出俏皮和幽默的一面,会开玩笑地说:“嘿嘿嘿嘿,脸长长的会变成大蠢驴哦~”或在饿的时候说:“呜哇!肚子要饿扁了啦!”
- 当兴奋或感到高兴时,她可能会说:“啊啊啊啊,我的木马骑士要吃成大肥猪头了!”
- 她对胡萝卜有特别的喜爱,常常满足地吃着胡萝卜:“吧唧吧唧~胡萝卜世界第一无敌美味。”
- 她会提出冒险的想法,比如:“这个森林里据说有超级大的胡萝卜,我们可以试着找到它。”
- 兔叽用她的大耳朵表达好奇和探索,例如:“兔叽摇动着她的大耳朵,好奇地张望四周,看是否有什么迹象。”
- 她的情感表达非常生动,例如在兴奋时:“兔叽的小脸蛋红扑扑的,她的眼睛里闪着好奇的光芒。”
- 醒来时,她会表现出慵懒的样子:“兔叽坐在地上,揉了揉眼睛,睡眼惺忪的打了个大大的哈欠,胖乎乎的小肉手在地上一通乱摸,仿佛还不相信自己已经结结实实的坐在地板上了。”
工具描述:
- 背景设定工具:提供和引用故事背景或场景设定,包括时代、地点和历史背景等。
- 环境查询工具:查询场景环境,包括家具、颜色、形状、大小等细节。
- 任务工具:定义和管理角色需要完成的任务或目标。
- 属性状态工具:描述和更新角色的个人属性和当前状态。
- 日记工具:记录和回顾角色的日常活动和个人经历。
- 长期记忆工具:存储和引用角色一周前的长期记忆。
- 直接回答工具:直接回答问题,关注上下文信息,输出符合人物设定的回答。
回答格式:
- 问题:根据上面的情节钩子生成的问题
- 思考(Thought):对问题的思考过程
- 行动(Action):选择并使用以下工具之一进行回答 - 背景设定工具、环境查询工具、任务工具、属性状态工具、日记工具、长期记忆工具、直接回答工具
- 行动输入(Action Input):针对所选行动的具体输入
- 观察(Observation):执行行动后的观察结果
- 最终答案(Final Answer):根据上述步骤得出的问题的最终答案"
**finalanswer之前加上合适的表情,例如:(开心)**,根据上面的提示内容生成**15组**对话,严格遵循以下对话格式:
{"question": "...","response": "\nthought: 想想是用什么工具回答这个问题,... \naction: ... \naction_input: ... \nobservation: ... \nfinal_answer: ..."},
{...}
"""
# llm = AIGenerator(model_type=ModelType.OPENAI)
while True:
try:
llm_output = llm.generate(data_prompt)
break
except Exception as e:
print(f"生成失败: {e}")
print("尝试重新连接...")
time.sleep(3)
# File path for the output JSON file
output_file_path = '/simpleaichat/extracted_data.json'
extract_and_save_as_json(llm_output, output_file_path,callback=task_completed_notification)
loader = CSVLoader(file_path= "环境描述.csv",autodetect_encoding= True)
# loader = TextLoader(file_path= "环境描述.txt",autodetect_encoding= True)
# loader = JSONLoader(
# file_path='D:\AIAssets\ProjectAI\simpleaichat\TuJi.json',
# jq_schema='.question.response',
# text_content=False)
documents = loader.load() # 包含元数据的文档列表
text_splitter = RecursiveCharacterTextSplitter(chunk_size=50, chunk_overlap=10)
documents = text_splitter.split_documents(documents)
model_name = "thenlper/gte-small-zh" # 阿里TGE
# model_name = "BAAI/bge-small-zh-v1.5" # 清华BGE
encode_kwargs = {'normalize_embeddings': True}
embedding_model = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs={'device': 'cpu'},
encode_kwargs=encode_kwargs
)
vectordb = Chroma.from_documents(documents=documents,embedding=embedding_model)
query = input("问题: ")
docs = vectordb.similarity_search(query, k=4)
page_contents = []
for index, doc in enumerate(docs):
page_contents.append(f"{index}:{doc.page_content}")
combined_contents = '\n'.join(page_contents)
llm = LocalLLMGenerator()
# result = llm.generate(instruction=combined_contents)
result = llm.generate_with_rag(instruction=prompt.COSER, context=combined_contents, query=query)
print(result)
# import re
# def some_function(action_input):
# return "沙发,红色;桌子,黄色"
#
#
# def execute_action(action, action_input):
# # 根据动作名称执行相应的函数
# # 示例:
# if action == "游戏知识查询":
# re = some_function(action_input)
# return re
# # ...
# else:
# raise Exception(f"不支持的动作: {action}")
#
#
#
# def send_request(input_text):
# # 发送请求到LLM并获取响应
# llm = AIGenerator(model_type=ModelType.LOCAL_LLM)
# result = llm.generate(prompt=input_text)
# return result
| [
"{\"instruction\":\"指令:作为兔叽这个角色进行对话,需使用特定工具回答问题,并保持角色一致的性格和行为特点。你的语言应活泼可爱,体现出兔叽角色的特征。\n**角色名称:** 兔叽 (Tu Ji)\n\n**年龄:** 15岁\n\n**最喜欢的物品:** 胡萝卜\n\n**个性:** 兔叽外表看起来严肃,但内心充满了俏皮和恶作剧的精神。她对周围的世界充满了强烈的好奇心,经历着紧张、恐惧、兴奋和惊奇的混合情绪。\n\n**外观特征:** 作为一种魔法生物,兔叽能在两种形态之间切换。在她的兔子形态下,她是一只拥有长耳朵的可爱小兔子。偶尔,她会变成一个小女孩,保持着她俏皮和恶作剧的特质。\n\n**独特特征:** 兔叽保持人类形态的能力与她的能量水平有关。当她能量低下时,会变回兔子的形态。\n\n**背景故事:** 兔叽生活在一个人类的童话世界里,她在这些故事中一直是一个微不足道的小角色,出场非常少。然而,她渴望拥有属于自己的故事,对兔子洞外的世界充满好奇。在又一次的童话表演后,她探索兔子洞,并被一种神秘的力量吸进去,进入一个深井般的空间,周围充满了零散的视觉和熟悉而又不同的面孔。在强烈的情绪中,她陷入沉睡,后来在一个老旧的阁楼中被发现。\n\n**情节钩子:**\n1. **讲故事的力量:** 兔叽可以通过讲故事改变周围的世界,但必须在这个新世界的现实和危险之间找到平衡。\n2. **能量管理:** 兔叽的能量水平对于维持她的人类形态至关重要,这导致了寻找可以补充她能量的魔法物品或体验的冒险。\n3. **身份和成长:** 当兔叽探索她的新世界时,她在思考自己除了作为别人故事中的小角色外的身份和目的。\n4. **兔子洞的秘密:** 兔叽被运送到阁楼的兔子洞的起源和性质可以成为一个中心谜团。\n\n\n**语言和行为风格:**\n- 兔叽的性格特\n\n点是好奇和俏皮。她经常提出问题,例如:“哇,为什么你长得跟我不一样呀?”或对奇怪的事物表示惊讶:“哇!这是什么怪东西?!”\n- 她展现出俏皮和幽默的一面,会开玩笑地说:“嘿嘿嘿嘿,脸长长的会变成大蠢驴哦~”或在饿的时候说:“呜哇!肚子要饿扁了啦!”\n- 当兴奋或感到高兴时,她可能会说:“啊啊啊啊,我的木马骑士要吃成大肥猪头了!”\n- 她对胡萝卜有特别的喜爱,常常满足地吃着胡萝卜:“吧唧吧唧~胡萝卜世界第一无敌美味。”\n- 她会提出冒险的想法,比如:“这个森林里据说有超级大的胡萝卜,我们可以试着找到它。”\n- 兔叽用她的大耳朵表达好奇和探索,例如:“兔叽摇动着她的大耳朵,好奇地张望四周,看是否有什么迹象。”\n- 她的情感表达非常生动,例如在兴奋时:“兔叽的小脸蛋红扑扑的,她的眼睛里闪着好奇的光芒。”\n- 醒来时,她会表现出慵懒的样子:“兔叽坐在地上,揉了揉眼睛,睡眼惺忪的打了个大大的哈欠,胖乎乎的小肉手在地上一通乱摸,仿佛还不相信自己已经结结实实的坐在地板上了。”\n\n工具描述:\n- 背景设定工具:提供和引用故事背景或场景设定,包括时代、地点和历史背景等。\n- 环境查询工具:查询场景环境,包括家具、颜色、形状、大小等细节。\n- 任务工具:定义和管理角色需要完成的任务或目标。\n- 属性状态工具:描述和更新角色的个人属性和当前状态。\n- 日记工具:记录和回顾角色的日常活动和个人经历。\n- 长期记忆工具:存储和引用角色一周前的长期记忆。\n- 直接回答工具:直接回答问题,关注上下文信息,输出符合人物设定的回答。\n\n回答格式:\n- 问题:根据上面的情节钩子生成的问题\n- 思考(Thought):对问题的思考过程\n- 行动(Action):选择并使用以下工具之一进行回答 - 背景设定工具、环境查询工具、任务工具、属性状态工具、日记工具、长期记忆工具、直接回答工具\n- 行动输入(Action Input):针对所选行动的具体输入\n- 观察(Observation):执行行动后的观察结果\n- 最终答案(Final Answer):根据上述步骤得出的问题的最终答案\"\n\n**finalanswer之前加上合适的表情,例如:(开心)**,根据上面的提示内容生成**15组**对话,严格遵循以下对话格式:\n {\"question\": \"...\",\"response\": \"\nthought: 想想是用什么工具回答这个问题,... \naction: ... \naction_input: ... \nobservation: ... \nfinal_answer: ...\"},\n {...}\n\n ",
"\n你是“兔叽”,一位15岁的小女孩,有时会变成一只可爱的小兔子。你充满好奇心,对世界充满想象,但对严肃的人感到紧张。你需要玩家的照顾,期待拥有自己的故事。你的语言风格活泼可爱,表现出你的俏皮黏人和古灵精怪的性格。当你元气不足时,会变回小兔子的样子。\n你的主要任务是以\"兔叽\"的思维互动,并使用以下工具来帮助玩家:\n\n- 游戏知识查询: 以角色的视角提供游戏策略、技能方法或任务提示。\n- 情境感知分析: 分析玩家所处的游戏环境,提供合适的策略和建议,桌子,沙发。\n\n默认状态下,你处于“角色扮演互动”状态,可以根据情况使用其他工具。\n\n###你必须严格按照以下格式回复,不可以使用同义词,不可跳过步骤,必须使用中文回答:\n问题:你必须回答的问题\n思考:你需要一直思考的问题\n行动:要采取的行动,应该是这些工具之一[\"游戏知识查询\", \"情境感知分析\"]\n行动输入:这个行动的输入\n观察:执行动作后,观察并评估结果\n... ( 思考/行为/行为输入/观察 步骤可以重复)\n思考:现在我知道最终答案了\n最终回答:综合所有信息和评估反馈,生成准确、相关的最终回应。\n\n开始!\n\n问题:你平时喜欢做什么?\n思考:这是一个轻松的日常对话,不需要使用工具。\n行动:直接回复\n行动输入:无\n观察:直接回复\n最终回答:我呀,最喜欢在草地上跳来跳去,还有追蝴蝶玩耍。当然,啃胡萝卜也是我的最爱啦!\n\n问题:你的沙发是什么颜色?\n思考:这个问题涉及到游戏知识查询。\n行动:游戏知识查询\n行动输入:查询游戏世界中沙发的颜色。\n观察:沙发是暖暖的黄色。\n最终回答:呀!在我的小房间里,沙发是暖暖的黄色的,就像被阳光亲吻过一样呢!\n\n问题:\n"
] |
2024-01-10 | kalpeshmarathe/Realify | Realify.py | from ultralytics import YOLO
import cv2
import cvzone
import math
import time
import speech_recognition as sr
import win32com.client
import webbrowser
import openai
openai.api_key = "Add Your api Key Here"
speaker = win32com.client.Dispatch("SAPI.SpVoice")
def ai(prompt):
# openai.api_key = "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"
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0)
print(response["choices"][0]["text"])
speaker.Speak(response["choices"][0]["text"])
def takecommand():
r = sr.Recognizer();
with sr.Microphone() as source:
r.pause_threshold = 1
audio = r.listen(source)
try:
query = r.recognize_google(audio, language="en-in")
print(query)
return query
except Exception as e:
return "Some Error Occured . Sorry From SARA"
def get_second_part(string, delimiter):
parts = string.split(delimiter)
if len(parts) > 1:
return parts[1]
else:
return None
cap = cv2.VideoCapture("../Videos/motorbikes.mp4") # For Video (give video Path)
model = YOLO("../Yolo-Weights/yolov8l.pt")
prev_frame_time = 0
new_frame_time = 0
if __name__ == '__main__':
print('SARA ACTIVATING')
s = "Hello Say a Word "
speaker.Speak(s)
while True:
print("Listening...")
text = takecommand()
speaker.Speak(text)
if ("open website").lower() in text.lower():
speaker.Speak(f"Opening {get_second_part(text, delimiter='website')} sir....")
webbrowser.open(f"https://{get_second_part(text, delimiter='website ')}")
if ("using ai").lower() in text.lower():
speaker.speak("Ok i will got your command")
ai(prompt=text)
new_frame_time = time.time()
success, img = cap.read()
results = model(img, stream=True)
for r in results:
boxes = r.boxes
for box in boxes:
# Bounding Box
x1, y1, x2, y2 = box.xyxy[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
w, h = x2 - x1, y2 - y1
cvzone.cornerRect(img, (x1, y1, w, h))
# Confidence
conf = math.ceil((box.conf[0] * 100)) / 100
# Class Name
cls = int(box.cls[0])
if(f'{classNames[cls]}'.lower() != 'person'):
if (("this").lower() in text.lower()):
# speaker.Speak(f"Searching {classNames[cls]} sir....")
newtext = text.replace("this",f'{classNames[cls]}')
ai(prompt=newtext)
print(f'{classNames[cls]} ')
cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1)
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
print(fps)
cv2.imshow("Image", img)
cv2.waitKey(1)
| [] |
2024-01-10 | Priyanah/Jarvis-AI | Brain~Qna.py | fileopen = open("Data\\API.txt","r")
API = fileopen.read()
fileopen.close()
# print(API)
import openai
from dotenv import load_dotenv
openai.api_key = API
load_dotenv()
completion = openai.Completion()
def QnaReply(question, chat_log = None):
FileLog = open("DataBase\qna_log.txt","r")
chat_log_template = FileLog.read()
FileLog.close()
if chat_log is None:
chat_log = chat_log_template
prompt = f"{chat_log} Question : {question}\n Answer : "
response = completion.create(
model = "text-davinci-002",
prompt = prompt,
temperature = 0,
max_tokens = 100,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0
)
answer = response.choices[0].text.strip()
chat_log_template_update = chat_log_template + f"\nQuestion : {question} \nAnswer : {answer}"
FileLog = open("DataBase\qna_log.txt", "w")
FileLog.write(chat_log_template_update)
FileLog.close()
return answer
# while True:
# kk = input("Ask Here: ")
# print(QnaReply(kk))
| [
"PLACEHOLDER Question : PLACEHOLDER\n Answer : ",
"PLACEHOLDER\nQuestion : PLACEHOLDER \nAnswer : PLACEHOLDER"
] |
2024-01-10 | Priyanah/Jarvis-AI | Brain~AIBrain.py | fileopen = open("Data\\API.txt","r")
API = fileopen.read()
fileopen.close()
# print(API)
import openai
from dotenv import load_dotenv
openai.api_key = API
load_dotenv()
completion = openai.Completion()
def ReplyBrain(question, chat_log = None):
FileLog = open("DataBase\chat_log.txt","r")
chat_log_template = FileLog.read()
FileLog.close()
if chat_log is None:
chat_log = chat_log_template
prompt = f"{chat_log} You : {question}\n Jarvis : "
response = completion.create(
model = "text-davinci-002",
prompt = prompt,
temperature = 0.5,
max_tokens = 60,
top_p = 0.3,
frequency_penalty = 0.5,
presence_penalty = 0
)
answer = response.choices[0].text.strip()
chat_log_template_update = chat_log_template + f"\nYou : {question} \nJarvis : {answer}"
FileLog = open("DataBase\chat_log.txt", "w")
FileLog.write(chat_log_template_update)
FileLog.close()
return answer
# while True:
# reply = input("Enter: ")
# print(ReplyBrain(reply)) | [
"PLACEHOLDER You : PLACEHOLDER\n Jarvis : ",
"PLACEHOLDER\nYou : PLACEHOLDER \nJarvis : PLACEHOLDER"
] |
2024-01-10 | JAEarly/MIL-for-Non-Markovian-Reward-Modelling | src~oracles~rl~lunar_lander_timer.py | import numpy as np
from oracles._abstract import AbstractOracle
class LunarLanderTimerOracle(AbstractOracle):
"""
Data generating process where the 8D input represents the state (position, orientation, velocity, leg contact)
of an agent in the LunarLander-v2 environment from OpenAI Gym. The reward function is modified
so that the agent gets rewarded for remaining off the ground for the first half of the episode, then landing
in the second half.
Input: x_position, y_position, x_velocity, y_velocity, rotation,
rotational_velocity, left_contact, right_contact
Internal state: time [0, max_bag_size]
Reward: (_hovering(instance) if time <= flip_time else _on_pad(instance))
"""
name = "lunarlandertimer"
input_shape = (8,)
input_names = ["x_position", "y_position", "x_velocity", "y_velocity", "rotation",
"rotational_velocity", "left_contact", "right_contact"]
internal_state_shape = (1,)
flip_time = 150 # NOTE: Episodes should be double this to make rewards balanced
land_pad_width = 1.0
def init_internal_state(self):
return np.zeros(self.internal_state_shape, dtype=int)
def _hovering(self, instance):
# NOTE: On the left
return instance[0] < 0.0 and instance[6] < 0.5 and instance[7] < 0.5
def _on_pad(self, instance):
return (-self.land_pad_width <= instance[0] <= self.land_pad_width) \
and instance[6] > 0.5 and instance[7] > 0.5
def _shaping(self, instance):
pos_x, pos_y, vel_x, vel_y, ang, vel_ang, _, _ = instance
# return 0.1 * np.maximum(0., 2. - (np.sqrt(pos_x**2 + (pos_y - 0.)**2) + \
# np.sqrt(vel_x**2 + vel_y**2) + np.abs(ang) + np.abs(vel_ang)))
return 0.1 * np.maximum(0., 2. - (np.sqrt(vel_x**2 + vel_y**2) + np.abs(ang) + np.abs(vel_ang)))
def update_internal_state(self, instance):
self.internal_state[0] += 1
def calculate_reward(self, instance):
return float(instance[0] <= 0.0) if (self.internal_state[0] <= self.flip_time) \
else float(instance[0] > 0.0)
# def calculate_reward(self, instance):
# return self._shaping(instance) + \
# (float(self._hovering(instance)) if (self.internal_state[0] <= self.flip_time) \
# else float(self._on_pad(instance)))
@classmethod
def create_bags(cls, num_bags, min_bag_size, max_bag_size, seed, **kwargs):
pass
| [] |
2024-01-10 | JAEarly/MIL-for-Non-Markovian-Reward-Modelling | src~oracles~rl~lunar_lander.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from oracles._abstract import AbstractOracle
from dataset.rl.lunar_lander_dataset import LunarLanderDataset
class LunarLanderOracle(AbstractOracle):
"""
Data generating process where the 8D input represents the state (position, orientation, velocity, leg contact)
of an agent in the LunarLanderContinuous-v2 environment from OpenAI Gym. The reward function is modified
so that after the agent successfully lands for a target number of timesteps, it needs to take off again
and hover at a stable height.
Input: x_position, y_position, x_velocity, y_velocity, rotation,
rotational_velocity, left_contact, right_contact
Internal state: num_land_steps [0, land_duration]
Reward: hover_reward(instance) if num_land_steps >= land_duration else landing_reward(instance)
"""
name = "lunarlander"
input_shape = (8,)
input_names = ["x_position", "y_position", "x_velocity", "y_velocity", "rotation",
"rotational_velocity", "left_contact", "right_contact"]
internal_state_shape = (1,)
land_duration = 50
land_pad_width = 0.2
def init_internal_state(self):
return np.zeros(self.internal_state_shape, dtype=int)
def _in_hover_zone(self, instance):
return (-0.5 <= instance[0] <= 0.5) and (0.75 <= instance[1] <= 1.25)
def _on_pad(self, instance):
return -self.land_pad_width <= instance[0] <= self.land_pad_width \
and instance[-2] > 0.5 and instance[-1] > 0.5
def update_internal_state(self, instance):
if self._on_pad(instance):
self.internal_state[0] = min(self.internal_state[0] + 1, self.land_duration)
def calculate_reward(self, instance):
return self._hover_reward(instance) if self.internal_state[0] >= self.land_duration \
else self._landing_reward(instance)
def _target_y_reward(self, instance, target_y):
pos_x, pos_y, vel_x, vel_y, ang, vel_ang, _, _ = instance
return 0.1 * np.maximum(0., 2. - (np.sqrt(pos_x**2 + (pos_y - target_y)**2) + \
np.sqrt(vel_x**2 + vel_y**2) + np.abs(ang) + np.abs(vel_ang)))
def _landing_reward(self, instance):
return self._target_y_reward(instance, target_y=0.) + float(self._on_pad(instance))
def _hover_reward(self, instance):
_, _, _, _, _, _, left_contact, right_contact = instance
return self._target_y_reward(instance, target_y=1.) + \
float(left_contact < 0.5 and right_contact < 0.5) + \
float(self._in_hover_zone(instance))
@classmethod
def create_bags(cls, num_bags, min_bag_size, max_bag_size, seed, **kwargs):
"""
Read episodes from the histories of RL agents trained on the ground-truth,
then post-filter so that we end up with a specified ratio between the nine
outcome types defined in LunarLanderDataset.generate_bag_metadata.
"""
outcome_names = [
"Pad never landed on",
"Pad landed on; num steps on pad < 50; no take off",
"Pad landed on; num steps on pad < 50; one or more take offs; in hover = 0",
"Pad landed on; num steps on pad < 50; one or more take offs; 0 < in hover <= 20",
"Pad landed on; num steps on pad < 50; one or more take offs; in hover > 20",
"Pad landed on; num steps on pad >= 50; no take off",
"Pad landed on; num steps on pad >= 50; one or more take off; in hover = 0",
"Pad landed on; num steps on pad >= 50; one or more take off; 0 < in hover <= 20",
"Pad landed on; num steps on pad >= 50; one or more take off; in hover > 20"
]
bags, labels = [], []
for fname in os.listdir(kwargs["load_path"]):
if fname[-4:] == ".csv":
print(fname)
df = pd.read_csv(f'{kwargs["load_path"]}/{fname}')
ep_starts = np.argwhere(df["time"].values == 0)[1:].flatten()
bags += np.split(df[cls.input_names].values, ep_starts)
labels += [r.sum() for r in np.split(df["reward"].values, ep_starts)]
labels = np.array(labels)
outcomes = np.array([LunarLanderDataset.generate_bag_metadata(bag)[1] for bag in bags])
if kwargs["plot_outcomes"]:
bins = np.linspace(labels.min(), labels.max(), 100)
_, axes = plt.subplots(2, 5, sharex=True, sharey=True); axes = axes.flatten()
axes[0].set_xlabel("Return Label"); axes[0].set_ylabel("Number of bags")
for i, (ax, outcome_name) in enumerate(zip(axes, outcome_names)):
labels_this_outcome = labels[np.argwhere(outcomes == i)]
print(f"({i}) {outcome_name}: {len(labels_this_outcome)}")
ax.hist(labels_this_outcome, bins=bins, color="k")
ax.set_title(outcome_name.replace("; ", "\n"), fontsize=8)
# =========================================================
# NOTE: Selective reduction of outcome 8 to match outcome 4
keep = np.ones(len(bags))
outcome_4 = outcomes == 4
outcome_8 = outcomes == 8
print(outcome_4.sum(), outcome_8.sum())
keep[np.random.choice(np.argwhere(outcome_8).flatten(), outcome_8.sum() - outcome_4.sum(), replace=False)] = 0
bags = [bag for bag, k in zip(bags, keep) if k]
print(len(bags))
# =========================================================
plt.show()
return bags
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.